Fading Coder

One Final Commit for the Last Sprint

Home > Tech > Content

Implementing Semaphores, Mutexes, and Concurrency Control in Linux Device Drivers

Tech 2

Semaphores: Blocking-Based Concurrency Control

Semaphores provide a synchronization mechanism for managing access to shared resources, particularly useful when critical sections involve longer execution times.

Semaphore Operations

// Define a semaphore
struct semaphore sync_sem;

// Initialize semaphore with a value
void semaphore_init(struct semaphore *sem, int initial_value);

// Acquire semaphore (P operation)
int semaphore_down(struct semaphore *sem); // Deep sleep
int semaphore_down_interruptible(struct semaphore *sem); // Light sleep

// Release semaphore (V operation)
void semaphore_up(struct semaphore *sem);

Application Context

Semaphores are suitable for synchronization between task contexts where critical section execution is relatively lengthy.

Example Implementation: Character Device Driver

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/poll.h>
#include <asm/uaccess.h>
#include <asm/ioctl.h>
#include "chardev.h"

#define BUFFER_SIZE 100

int primary_device = 11;
int secondary_device = 0;
int device_count = 1;

struct chardev_data {
    struct cdev device;
    char buffer[BUFFER_SIZE];
    int data_length;
    struct semaphore sync_lock;
    wait_queue_head_t read_queue;
    wait_queue_head_t write_queue;
    struct fasync_struct *async_obj;
};
struct chardev_data global_device;

int device_open(struct inode *inode_ptr, struct file *file_ptr) {
    file_ptr->private_data = (void *)(container_of(inode_ptr->i_cdev, struct chardev_data, device));
    return 0;
}

int device_close(struct inode *inode_ptr, struct file *file_ptr) {
    struct chardev_data *dev_data = (struct chardev_data *)file_ptr->private_data;
    if(dev_data->async_obj != NULL) {
        fasync_helper(-1, file_ptr, 0, &dev_data->async_obj);
    }
    return 0;
}

ssize_t device_read(struct file *file_ptr, char __user *user_buf, size_t request_size, loff_t *position) {
    struct chardev_data *dev_data = (struct chardev_data *)file_ptr->private_data;
    int transfer_size = 0;
    int result = 0;

    semaphore_down(&dev_data->sync_lock);

    if(dev_data->data_length <= 0) {
        if(file_ptr->f_flags & O_NONBLOCK) {
            semaphore_up(&dev_data->sync_lock);
            printk("No data available in non-blocking mode\n");
            return -1;
        } else {
            semaphore_up(&dev_data->sync_lock);
            result = wait_event_interruptible(dev_data->read_queue, dev_data->data_length > 0);
            if(result) {
                return -ERESTARTSYS;
            }
        }
        semaphore_down(&dev_data->sync_lock);
    }

    transfer_size = (request_size > dev_data->data_length) ? dev_data->data_length : request_size;
    result = copy_to_user(user_buf, dev_data->buffer, transfer_size);
    if(result) {
        semaphore_up(&dev_data->sync_lock);
        printk("User copy failed\n");
        return -1;
    }

    memmove(dev_data->buffer, dev_data->buffer + transfer_size, dev_data->data_length - transfer_size);
    dev_data->data_length -= transfer_size;
    semaphore_up(&dev_data->sync_lock);

    wake_up_interruptible(&dev_data->write_queue);
    return transfer_size;
}

ssize_t device_write(struct file *file_ptr, const char __user *user_buf, size_t request_size, loff_t *position) {
    struct chardev_data *dev_data = (struct chardev_data *)file_ptr->private_data;
    int transfer_size = 0;
    int result = 0;

    semaphore_down(&dev_data->sync_lock);
    if(dev_data->data_length >= BUFFER_SIZE) {
        if(file_ptr->f_flags & O_NONBLOCK) {
            semaphore_up(&dev_data->sync_lock);
            printk("Buffer full in non-blocking mode\n");
            return -1;
        } else {
            semaphore_up(&dev_data->sync_lock);
            result = wait_event_interruptible(dev_data->write_queue, dev_data->data_length < BUFFER_SIZE);
            if(result) {
                return -ERESTARTSYS;
            }
            semaphore_down(&dev_data->sync_lock);
        }
    }

    transfer_size = (request_size > BUFFER_SIZE - dev_data->data_length) ? BUFFER_SIZE - dev_data->data_length : request_size;
    result = copy_from_user(dev_data->buffer + dev_data->data_length, user_buf, transfer_size);
    if(result) {
        semaphore_up(&dev_data->sync_lock);
        printk("User copy failed\n");
        return -1;
    }

    dev_data->data_length += transfer_size;
    semaphore_up(&dev_data->sync_lock);

    wake_up_interruptible(&dev_data->read_queue);
    if(dev_data->async_obj != NULL) {
        kill_fasync(&dev_data->async_obj, SIGIO, POLL_IN);
    }
    return transfer_size;
}

long device_ioctl(struct file *file_ptr, unsigned int command, unsigned long argument) {
    struct chardev_data *dev_data = (struct chardev_data *)file_ptr->private_data;
    int __user *user_result = (int *)argument;
    int max_buffer = BUFFER_SIZE;
    int result = 0;

    switch(command) {
        case CHARDEV_GET_MAX_SIZE:
            result = copy_to_user(user_result, &max_buffer, sizeof(int));
            if(result) {
                printk("User copy failed\n");
                return -1;
            }
            break;
        case CHARDEV_GET_CURRENT_SIZE:
            semaphore_down(&dev_data->sync_lock);
            result = copy_to_user(user_result, &dev_data->data_length, sizeof(int));
            semaphore_up(&dev_data->sync_lock);
            if(result) {
                printk("User copy failed\n");
                return -1;
            }
            break;
        default:
            printk("Unknown command\n");
            return -1;
    }
    return 0;
}

unsigned int device_poll(struct file *file_ptr, poll_table *poll_table_ptr) {
    struct chardev_data *dev_data = (struct chardev_data *)file_ptr->private_data;
    unsigned int status_mask = 0;

    poll_wait(file_ptr, &dev_data->read_queue, poll_table_ptr);
    poll_wait(file_ptr, &dev_data->write_queue, poll_table_ptr);

    semaphore_down(&dev_data->sync_lock);
    if(dev_data->data_length > 0) {
        status_mask |= POLLIN | POLLRDNORM;
    }
    if(dev_data->data_length < BUFFER_SIZE) {
        status_mask |= POLLOUT | POLLWRNORM;
    }
    semaphore_up(&dev_data->sync_lock);
    return status_mask;
}

int device_fasync(int fd, struct file *file_ptr, int mode) {
    struct chardev_data *dev_data = (struct chardev_data *)file_ptr->private_data;
    return fasync_helper(fd, file_ptr, mode, &dev_data->async_obj);
}

struct file_operations device_ops = {
    .owner = THIS_MODULE,
    .open = device_open,
    .read = device_read,
    .write = device_write,
    .unlocked_ioctl = device_ioctl,
    .poll = device_poll,
    .fasync = device_fasync,
};

int __init device_init(void) {
    int result = 0;
    dev_t device_number = MKDEV(primary_device, secondary_device);

    result = register_chrdev_region(device_number, device_count, "chardev");
    if(result) {
        result = alloc_chrdev_region(&device_number, secondary_device, device_count, "chardev");
        if(result) {
            printk("Device number allocation failed\n");
            return -1;
        }
        primary_device = MAJOR(device_number);
    }

    cdev_init(&global_device.device, &device_ops);
    global_device.device.owner = THIS_MODULE;
    cdev_add(&global_device.device, device_number, device_count);

    init_waitqueue_head(&global_device.read_queue);
    init_waitqueue_head(&global_device.write_queue);
    semaphore_init(&global_device.sync_lock, 1);
    return 0;
}

void __exit device_exit(void) {
    dev_t device_number = MKDEV(primary_device, secondary_device);
    printk("Exiting device driver\n");
    cdev_del(&global_device.device);
    unregister_chrdev_region(device_number, device_count);
}

MODULE_LICENSE("GPL");
module_init(device_init);
module_exit(device_exit);

Mutexes: Blocking-Based Mutual Exclusion

Mutexes provide exclusive access to shared resources, ensuring only one thread can execute a critical section at a time.

Mutex Operations

// Define and initialize mutex
struct mutex resource_lock;
mutex_init(&resource_lock);

// Acquire mutex
void mutex_lock(struct mutex *lock);

// Release mutex
void mutex_unlock(struct mutex *lock);

Implementation follows similar patterns to semaphores but with simpler semantics for mutual exclusion.

Concurrency Control Selection Principles

  • Use busy-waiting mechanisms for contetxs where sleeping is prohibited; blocking mechanisms are appropriate for contexts that allow sleeping. In interrupt contexts accessing shared resources, always use busy-waiting approaches.
  • For longer critical section operations, prefer blocking mechanisms; for very short critical sections, consider busy-waiting approaches.
  • Disable inetrrupts only when sharing resources with interrupt contexts.
  • Use atomic variables for simple integer shared resources.

Related Articles

Understanding Strong and Weak References in Java

Strong References Strong reference are the most prevalent type of object referencing in Java. When an object has a strong reference pointing to it, the garbage collector will not reclaim its memory. F...

Comprehensive Guide to SSTI Explained with Payload Bypass Techniques

Introduction Server-Side Template Injection (SSTI) is a vulnerability in web applications where user input is improper handled within the template engine and executed on the server. This exploit can r...

Implement Image Upload Functionality for Django Integrated TinyMCE Editor

Django’s Admin panel is highly user-friendly, and pairing it with TinyMCE, an effective rich text editor, simplifies content management significantly. Combining the two is particular useful for bloggi...

Leave a Comment

Anonymous

◎Feel free to join the discussion and share your thoughts.