Chinaunix首页 | 论坛 | 博客
  • 博客访问: 162176
  • 博文数量: 33
  • 博客积分: 2530
  • 博客等级: 少校
  • 技术积分: 580
  • 用 户 组: 普通用户
  • 注册时间: 2008-07-25 16:03
文章分类
文章存档

2011年(1)

2010年(2)

2009年(17)

2008年(13)

我的朋友

分类: LINUX

2009-12-19 21:19:55

利用netlink统一外设异步事件传递
      开发板都会有很多外设异步事件,这里主要讨论由用户触发的事件。这些事件都一般需要通过kernel将事件路由到应用层,交有某个处理程序去处理,包括:耳机(显示耳机图标,打开音频输出)、麦克风、AV/CVBS线、SD卡(挂载分区,可以由udev来做)、充电器插拔(显示充电状态),休眠按键等等。它们的事件获取,包括ISR,操作外设代码都在各自的驱动内,如果由每个驱动都去导出这样的异步事件接收接口给应用层,应用程序需要开启多个线程去等待(比较通用的kernel->user的接口,如read/write/ioctl/proc/sys都是设备文件相关, base在不同的device file上 );并且数据的传递都是由应用层主动发起,而非kernel主动传递,这样的实时性会相对较差。
      前阶段看了一篇关于netlink的文章,想到了利用它来统一这个内核到应用层的接口。基本的想法:创建一个内核线程去接收不同设备驱动发送过来的事件及事件消息(导出一个函数给驱动,函数将事件挂上队列,再唤醒内核线程),并按照事件的先后顺序及优先级将事件通过netlink广播到一个特定的多播组。事件处理应用程序则在这个多播组套接上监听,处理内核传递的事件。这样有多个事件处理程序也可以实现。具体写这个module的时候,还添加了一个较实时的功能,提供一个直接广播接口,如果事件是在interrupt上下文发出,则先试着去直接广播,如果失败(malloc, netlink_broadcast不能睡眠),则挂入内核线程处理队列里。具体的代码如下:

/*!
 * mxc_hw_event.h
 * Collect the hardware events, send to user by netlink
 */


#ifndef _MXC_HW_EVENT_H
#define _MXC_HW_EVENT_H

#define HW_EVENT_GROUP 2

#ifdef __KERNEL__
#include <linux/list.h>
#endif

struct mxc_hw_event
{
    unsigned int event;
    int args;
};

#ifdef __KERNEL__
struct hw_event_elem
{
    struct mxc_hw_event event;
    struct list_head list;
};
#endif

#endif /* _MXC_HW_EVENT_H */



/*!
 * mxc_hw_event.c
 * Collect the hardware events, send to user by netlink
 */


#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/signal.h>
#include <net/sock.h>

#include "mxc_hw_event.h"

#define EVENT_POOL_SIZE 10

static struct sock *nl_event_sock; /* netlink socket */
static struct list_head event_head;
static struct list_head free_head;
static struct hw_event_elem events_pool[EVENT_POOL_SIZE]; /* event pool */
static DECLARE_COMPLETION(exit_completion);
static spinlock_t list_lock = SPIN_LOCK_UNLOCKED;
static DECLARE_WAIT_QUEUE_HEAD(event_wq);
static unsigned int seq = 0; /* send seq */
static int running = 1;
static pid_t pid;

/*!
 * main HW event handler thread
 */

static int hw_event_thread(void *data)
{
    struct sk_buff *skb = NULL;
    struct nlmsghdr *nlh = NULL;
    unsigned int size;
    struct hw_event_elem *event, *n;
    LIST_HEAD(tmp_head);

    /* make this thread daemon, and assign thread name */
    daemonize("mxc_hw_event");

    while (running) {

        /* wait for event coming */
        if (wait_event_interruptible(event_wq, !list_empty(&event_head))) {
            /* signal cause */
            flush_signals(current);
            continue;
        }
       
        /* fetch event from list */
        spin_lock_irq(&list_lock);
        tmp_head = event_head;
        tmp_head.prev->next = &tmp_head;
        tmp_head.next->prev = &tmp_head;
        /* clear the event list head */
        INIT_LIST_HEAD(&event_head);
        spin_unlock_irq(&list_lock);

        list_for_each_entry_safe(event, n, &tmp_head, list) {

            size = NLMSG_SPACE(sizeof(struct mxc_hw_event));
            skb = alloc_skb(size, GFP_KERNEL);
            if (!skb) {
                /* if failed alloc skb, we drop this event */
                printk(KERN_WARNING "mxc_hw_event: skb_alloc() failed\n");
                goto alloc_failure;
            }

            /* put the netlink header struct to skb */
            nlh = NLMSG_PUT(skb, 0, seq++, NLMSG_DONE, size - sizeof(*nlh));

            /* fill the netlink data */
            memcpy((struct mxc_hw_event *)NLMSG_DATA(nlh), &event->event,
                    sizeof(struct mxc_hw_event));

            /* free the event node, set to unused */
            spin_lock_irq(&list_lock);
            list_move(&event->list, &free_head);
            spin_unlock_irq(&list_lock);

            /* send to all process that create this socket */
            NETLINK_CB(skb).pid = 0; /* sender pid */
            
//NETLINK_CB(skb).dst_pid = 0;

            NETLINK_CB(skb).dst_group = HW_EVENT_GROUP;
            /* broadcast the event */
            netlink_broadcast(nl_event_sock, skb, 0, HW_EVENT_GROUP, GFP_KERNEL);

            continue;
nlmsg_failure:
            printk(KERN_WARNING "mxc_hw_event: No tailroom for NLMSG in skb\n");
alloc_failure:
            /* free the event node, set to unused */
            spin_lock_irq(&list_lock);
            list_del(&event->list);
            list_add_tail(&event->list, &free_head);
            spin_unlock_irq(&list_lock);
        }
    }

    complete(&exit_completion);
    return 0;
}

/*!
 *
 * @priority the event priority, REALTIME, EMERENCY, NORMAL
 * @new_event event id to be send
 */

int hw_event_send(int priority, struct mxc_hw_event new_event)
{
    unsigned int size;
    struct sk_buff *skb = NULL;
    struct nlmsghdr *nlh = NULL;
    struct mxc_hw_event *event;
    struct hw_event_elem *event_elem;
    int ret;
    unsigned long flag;
    struct list_head *list_node;

    if (priority == 0) {
        
/**
         * the most high priority event,
         * we send it immediatly.
         */


        size = NLMSG_SPACE(sizeof(struct mxc_hw_event));

        /* alloc skb */
        if (in_interrupt()) {
            skb = alloc_skb(size, GFP_ATOMIC);
        } else {
            skb = alloc_skb(size, GFP_KERNEL);
        }
        if (!skb) {
            /* if failed alloc skb, we drop this event */
            printk(KERN_WARNING "hw_event send: skb_alloc() failed\n");
            goto send_later;
        }

        /* put the netlink header struct to skb */
        nlh = NLMSG_PUT(skb, 0, seq++, NLMSG_DONE, size - sizeof(*nlh));

        /* fill the netlink data */
        event = (struct mxc_hw_event *)NLMSG_DATA(nlh);
        memcpy(event, &new_event, sizeof(struct mxc_hw_event));

        /* send to all process that create this socket */
        NETLINK_CB(skb).pid = 0; /* sender pid */
        
//NETLINK_CB(skb).dst_pid = 0;

        NETLINK_CB(skb).dst_group = HW_EVENT_GROUP;
        /* broadcast the event */
        ret = netlink_broadcast(nl_event_sock, skb, 0, HW_EVENT_GROUP,
                in_interrupt()? GFP_ATOMIC:GFP_KERNEL);
        if (ret) {

nlmsg_failure:
            /* send failed */
            kfree_skb(skb);
            goto send_later;
        }

        return 0;
    }

send_later:
    spin_lock_irqsave(&list_lock, flag);
    if (list_empty(&free_head)) {
        spin_unlock_irqrestore(&list_lock, flag);
        /* no more free event node */
        printk(KERN_WARNING "mxc_event send: no more free node\n");
        return -1;
    }

    /* get a free node from free list, and added to event list */
    list_node = free_head.next;
    /* fill event */
    event_elem = list_entry(list_node, struct hw_event_elem, list);
    event_elem->event = new_event;
    list_move(list_node, &event_head);
    spin_unlock_irqrestore(&list_lock, flag);

    wake_up(&event_wq);

    return 1;
}

static int __init mxc_hw_event_init(void)
{
    int i;

    /* initial the list head for event and free */
    INIT_LIST_HEAD(&free_head);
    INIT_LIST_HEAD(&event_head);

    /* initial the free list */
    for (i = 0; i < EVENT_POOL_SIZE; i++) {
        memset(&events_pool[i], 0, sizeof(struct hw_event_elem));
        list_add_tail(&events_pool[i].list, &free_head);
    }

    /* create netlink kernel sock */
        nl_event_sock = netlink_kernel_create(NETLINK_USERSOCK, 0, NULL, THIS_MODULE);
        if (!nl_event_sock) {
                printk("mxc_hw_event: Fail to create netlink socket.\n");
                return 1;
        }
        pid = kernel_thread(hw_event_thread, NULL, CLONE_KERNEL);

    return 0;
}

static void __exit mxc_hw_event_exit(void)
{
    struct task_struct *task;

    running = 0;
    /* force signal to thread */
    task = find_task_by_pid(pid);
    if (!task) {
        panic("FATAL error, can not find task(%d)\n", pid);
    }
    force_sig(SIGSTOP, task);
    /* wait for thread completion */
    wait_for_completion(&exit_completion);
    sock_release(nl_event_sock->sk_socket);
}

module_init(mxc_hw_event_init);
module_exit(mxc_hw_event_exit);

EXPORT_SYMBOL(hw_event_send);
MODULE_LICENSE("GPL");

阅读(2069) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~