Chinaunix首页 | 论坛 | 博客
  • 博客访问: 638506
  • 博文数量: 113
  • 博客积分: 10
  • 博客等级: 民兵
  • 技术积分: 4176
  • 用 户 组: 普通用户
  • 注册时间: 2012-11-15 20:22
个人简介

最大化我的市场价值

文章分类

全部博文(113)

文章存档

2013年(113)

分类: LINUX

2013-03-11 19:10:25

异步通知fasync

异步通知fasync是应用于系统调用signalsigaction函数,下面我会使用signal函数。简单的说,signal函数就是让一个信号与与一个函数对应,没当接收到这个信号就会调用相应的函数。


一、什么是异步通知

个人认为,异步通知类似于中断的机制,如下面的将要举例的程序,当设备可写时,设备驱动函数发送一个信号给内核,告知内核有数据可读,在条件不满足之前,并不会造成阻塞。而不像之前学的阻塞型IOpoll它们是调用函数进去检查,条件不满足时还会造成阻塞


二、应用层中启用异步通知机制

其实就三个步骤:

1signal(SIGIO, sig_handler);

调用signal函数,让指定的信号SIGIO与处理函数sig_handler对应。

2fcntl(fd, F_SET_OWNER, getpid());

指定一个进程作为文件的“属主(filp->owner)”,这样内核才知道信号要发给哪个进程。

3f_flags = fcntl(fd, F_GETFL);

fcntl(fd, F_SETFL, f_flags | FASYNC);

在设备文件中添加FASYNC标志,驱动中就会调用将要实现的test_fasync函数。

三个步骤执行后,一旦有信号产生,相应的进程就会收到。


以下是fasync应用實例:

点击(此处)折叠或打开

  1. #include <stdio.h>
  2. #include <sys/types.h>
  3. #include <sys/stat.h>
  4. #include <fcntl.h>
  5. #include <sys/select.h>
  6. #include <unistd.h>
  7. #include <signal.h>

  8. unsigned int flag;

  9. void sig_handler(int sig)
  10. {
  11.     printf("%s\n", __FUNCTION__);
  12.     flag = 1;
  13. }

  14. int main(void)
  15. {
  16.     char buf[20];
  17.     int fd;
  18.     int f_flags;
  19.     flag = 0;

  20.     fd = open("/dev/test", O_RDWR);
  21.     if(fd < 0)
  22.     {
  23.         perror("open");
  24.         return -1;
  25.     }
  26.     /*三个步骤*/
  27.     signal(SIGIO, sig_handler);
  28.     fcntl(fd, F_SETOWN, getpid());
  29.     f_flags = fcntl(fd, F_GETFL);
  30.     fcntl(fd, F_SETFL, FASYNC | f_flags);

  31.     while(1)
  32.     {
  33.         printf("waiting \n"); //在还没收到信号前,程序还在不停的打印
  34.         sleep(4);
  35.         if(flag)
  36.             break;
  37.     }
  38.     read(fd, buf, 10);
  39.     printf("finish: read[%s]\n", buf);
  40.     close(fd);
  41.     return 0;
  42. }

三、驱动中需要实现的异步通知


上面说的三个步骤,内核已经帮忙实现了前两个步骤,只需要我们稍稍实现第三个步骤的一个简单的传参。

实现异步通知,内核需要知道几个东西:哪个文件(filp),什么信号(SIGIIO),发给哪个进程(pid),收到信号后做什么(sig_handler)。这些都由前两个步骤完成了。

回想一下,在实现等待队列中,我们需要将一个等待队列wait_queue_t添加到指定的等待队列头wait_queue_head_t中。

在这里,同样需要把一个结构体struct fasync_struct添加到内核的异步队列头(名字是我自己取的)中。这个结构体用来存放对应设备文件的信息(fd, filp)并交给内核来管理。一但收到信号,内核就会在这个所谓的异步队列头找到相应的文件(fd),并在filp->owner中找到对应的进程PID,并且调用对应的sig_handler了。


点击(此处)折叠或打开

  1. struct fasync_struct {
  2.     spinlock_t fa_lock;
  3.     int magic;
  4.     int fa_fd;
  5.     struct fasync_struct *fa_next; /* singly linked list */
  6.     struct file *fa_file;
  7.     struct rcu_head fa_rcu;
  8. };
以下附上ldd3中的scullp驅動源碼,做一下分析:

点击(此处)折叠或打开

  1. #include <linux/module.h>
  2. #include <linux/moduleparam.h>
  3. #include <linux/sched.h>
  4. #include <linux/kernel.h>    /* printk(), min() */
  5. #include <linux/slab.h>        /* kmalloc() */
  6. #include <linux/fs.h>        /* everything... */
  7. #include <linux/proc_fs.h>
  8. #include <linux/errno.h>    /* error codes */
  9. #include <linux/types.h>    /* size_t */
  10. #include <linux/fcntl.h>
  11. #include <linux/poll.h>
  12. #include <linux/cdev.h>
  13. #include <asm/uaccess.h>

  14. #include "scull.h"        /* local definitions */

  15. struct scull_pipe {
  16.         wait_queue_head_t inq, outq; /* read and write queues */
  17.         char *buffer, *end; /* begin of buf, end of buf */
  18.         int buffersize; /* used in pointer arithmetic */
  19.         char *rp, *wp; /* where to read, where to write */
  20.         int nreaders, nwriters; /* number of openings for r/w */
  21.         struct fasync_struct *async_queue; /* asynchronous readers */
  22.         struct semaphore sem; /* mutual exclusion semaphore */
  23.         struct cdev cdev; /* Char device structure */
  24. };

  25. /* parameters */
  26. static int scull_p_nr_devs = SCULL_P_NR_DEVS;    /* number of pipe devices */
  27. int scull_p_buffer = SCULL_P_BUFFER;    /* buffer size */
  28. dev_t scull_p_devno;            /* Our first device number */

  29. module_param(scull_p_nr_devs, int, 0);    /* FIXME check perms */
  30. module_param(scull_p_buffer, int, 0);

  31. static struct scull_pipe *scull_p_devices;

  32. static int scull_p_fasync(int fd, struct file *filp, int mode);
  33. static int spacefree(struct scull_pipe *dev);
  34. /*
  35.  * Open and close
  36.  */

  37. static int scull_p_open(struct inode *inode, struct file *filp)
  38. {
  39.     struct scull_pipe *dev;

  40.     dev = container_of(inode->i_cdev, struct scull_pipe, cdev);
  41.     filp->private_data = dev;

  42.     if (down_interruptible(&dev->sem))
  43.         return -ERESTARTSYS;
  44.     if (!dev->buffer) {
  45.         /* allocate the buffer */
  46.         dev->buffer = kmalloc(scull_p_buffer, GFP_KERNEL);
  47.         if (!dev->buffer) {
  48.             up(&dev->sem);
  49.             return -ENOMEM;
  50.         }
  51.     }
  52.     dev->buffersize = scull_p_buffer;
  53.     dev->end = dev->buffer + dev->buffersize;
  54.     dev->rp = dev->wp = dev->buffer; /* rd and wr from the beginning */

  55.     /* use f_mode,not f_flags: it's cleaner (fs/open.c tells why) */
  56.     if (filp->f_mode & FMODE_READ)
  57.         dev->nreaders++;
  58.     if (filp->f_mode & FMODE_WRITE)
  59.         dev->nwriters++;
  60.     up(&dev->sem);

  61.     return nonseekable_open(inode, filp);
  62. }



  63. static int scull_p_release(struct inode *inode, struct file *filp)
  64. {
  65.     struct scull_pipe *dev = filp->private_data;

  66.     /* remove this filp from the asynchronously notified filp's */
  67.     scull_p_fasync(-1, filp, 0);
  68.     down(&dev->sem);
  69.     if (filp->f_mode & FMODE_READ)
  70.         dev->nreaders--;
  71.     if (filp->f_mode & FMODE_WRITE)
  72.         dev->nwriters--;
  73.     if (dev->nreaders + dev->nwriters == 0) {
  74.         kfree(dev->buffer);
  75.         dev->buffer = NULL; /* the other fields are not checked on open */
  76.     }
  77.     up(&dev->sem);
  78.     return 0;
  79. }

  80. /*
  81.  * Data management: read and write
  82.  */
  83. static ssize_t scull_p_read (struct file *filp, char __user *buf, size_t count,
  84.                 loff_t *f_pos)
  85. {
  86.     struct scull_pipe *dev = filp->private_data;

  87.     if (down_interruptible(&dev->sem))
  88.         return -ERESTARTSYS;

  89.     while (dev->rp == dev->wp) { /* nothing to read */
  90.         up(&dev->sem); /* release the lock */
  91.         if (filp->f_flags & O_NONBLOCK)
  92.             return -EAGAIN;
  93.         PDEBUG("\"%s\" reading: going to sleep\n", current->comm);
  94.         if (wait_event_interruptible(dev->inq, (dev->rp != dev->wp)))
  95.             return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
  96.         /* otherwise loop, but first reacquire the lock */
  97.         if (down_interruptible(&dev->sem))
  98.             return -ERESTARTSYS;
  99.     }
  100.     /* ok, data is there, return something */
  101.     if (dev->wp > dev->rp)
  102.         count = min(count, (size_t)(dev->wp - dev->rp));
  103.     else /* the write pointer has wrapped, return data up to dev->end */
  104.         count = min(count, (size_t)(dev->end - dev->rp));
  105.     if (copy_to_user(buf, dev->rp, count)) {
  106.         up (&dev->sem);
  107.         return -EFAULT;
  108.     }
  109.     dev->rp += count;
  110.     if (dev->rp == dev->end)
  111.         dev->rp = dev->buffer; /* wrapped */
  112.     up (&dev->sem);

  113.     /* finally, awake any writers and return */
  114.     wake_up_interruptible(&dev->outq);
  115.     PDEBUG("\"%s\" did read %li bytes\n",current->comm, (long)count);
  116.     return count;
  117. }

  118. /* Wait for space for writing; caller must hold device semaphore. On
  119.  * error the semaphore will be released before returning. */
  120. static int scull_getwritespace(struct scull_pipe *dev, struct file *filp)
  121. {
  122.     while (spacefree(dev) == 0) { /* full */
  123.         DEFINE_WAIT(wait);
  124.         
  125.         up(&dev->sem);
  126.         if (filp->f_flags & O_NONBLOCK)
  127.             return -EAGAIN;
  128.         PDEBUG("\"%s\" writing: going to sleep\n",current->comm);
  129.         prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE);
  130.         if (spacefree(dev) == 0)
  131.             schedule();
  132.         finish_wait(&dev->outq, &wait);
  133.         if (signal_pending(current))
  134.             return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
  135.         if (down_interruptible(&dev->sem))
  136.             return -ERESTARTSYS;
  137.     }
  138.     return 0;
  139. }    

  140. /* How much space is free? */
  141. static int spacefree(struct scull_pipe *dev)
  142. {
  143.     if (dev->rp == dev->wp)
  144.         return dev->buffersize - 1;
  145.     return ((dev->rp + dev->buffersize - dev->wp) % dev->buffersize) - 1;
  146. }

  147. static ssize_t scull_p_write(struct file *filp, const char __user *buf, size_t count,
  148.                 loff_t *f_pos)
  149. {
  150.     struct scull_pipe *dev = filp->private_data;
  151.     int result;

  152.     if (down_interruptible(&dev->sem))
  153.         return -ERESTARTSYS;

  154.     /* Make sure there's space to write */
  155.     result = scull_getwritespace(dev, filp);
  156.     if (result)
  157.         return result; /* scull_getwritespace called up(&dev->sem) */

  158.     /* ok, space is there, accept something */
  159.     count = min(count, (size_t)spacefree(dev));
  160.     if (dev->wp >= dev->rp)
  161.         count = min(count, (size_t)(dev->end - dev->wp)); /* to end-of-buf */
  162.     else /* the write pointer has wrapped, fill up to rp-1 */
  163.         count = min(count, (size_t)(dev->rp - dev->wp - 1));
  164.     PDEBUG("Going to accept %li bytes to %p from %p\n", (long)count, dev->wp, buf);
  165.     if (copy_from_user(dev->wp, buf, count)) {
  166.         up (&dev->sem);
  167.         return -EFAULT;
  168.     }
  169.     dev->wp += count;
  170.     if (dev->wp == dev->end)
  171.         dev->wp = dev->buffer; /* wrapped */
  172.     up(&dev->sem);

  173.     /* finally, awake any reader */
  174.     wake_up_interruptible(&dev->inq); /* blocked in read() and select() */

  175.     /* and signal asynchronous readers, explained late in chapter 5 */
  176.     if (dev->async_queue)
  177.         kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
  178.     PDEBUG("\"%s\" did write %li bytes\n",current->comm, (long)count);
  179.     return count;
  180. }

  181. static unsigned int scull_p_poll(struct file *filp, poll_table *wait)
  182. {
  183.     struct scull_pipe *dev = filp->private_data;
  184.     unsigned int mask = 0;

  185.     /*
  186.      * The buffer is circular; it is considered full
  187.      * if "wp" is right behind "rp" and empty if the
  188.      * two are equal.
  189.      */
  190.     down(&dev->sem);
  191.     poll_wait(filp, &dev->inq, wait);
  192.     poll_wait(filp, &dev->outq, wait);
  193.     if (dev->rp != dev->wp)
  194.         mask |= POLLIN | POLLRDNORM;    /* readable */
  195.     if (spacefree(dev))
  196.         mask |= POLLOUT | POLLWRNORM;    /* writable */
  197.     up(&dev->sem);
  198.     return mask;
  199. }

  200. static int scull_p_fasync(int fd, struct file *filp, int mode)
  201. {
  202.     struct scull_pipe *dev = filp->private_data;
  203.     return fasync_helper(fd, filp, mode, &dev->async_queue);
  204. }

  205. /* FIXME this should use seq_file */
  206. #ifdef SCULL_DEBUG
  207. static void scullp_proc_offset(char *buf, char **start, off_t *offset, int *len)
  208. {
  209.     if (*offset == 0)
  210.         return;
  211.     if (*offset >= *len) {    /* Not there yet */
  212.         *offset -= *len;
  213.         *len = 0;
  214.     }
  215.     else {            /* We're into the interesting stuff now */
  216.         *start = buf + *offset;
  217.         *offset = 0;
  218.     }
  219. }

  220. static int scull_read_p_mem(char *buf, char **start, off_t offset, int count,
  221.         int *eof, void *data)
  222. {
  223.     int i, len;
  224.     struct scull_pipe *p;

  225. #define LIMIT (PAGE_SIZE-200)    /* don't print any more after this size */
  226.     *start = buf;
  227.     len = sprintf(buf, "Default buffersize is %i\n", scull_p_buffer);
  228.     for(i = 0; i<scull_p_nr_devs && len <= LIMIT; i++) {
  229.         p = &scull_p_devices[i];
  230.         if (down_interruptible(&p->sem))
  231.             return -ERESTARTSYS;
  232.         len += sprintf(buf+len, "\nDevice %i: %p\n", i, p);
  233.         len += sprintf(buf+len, " Buffer: %p to %p (%i bytes)\n", p->buffer, p->end, p->buffersize);
  234.         len += sprintf(buf+len, " rp %p wp %p\n", p->rp, p->wp);
  235.         len += sprintf(buf+len, " readers %i writers %i\n", p->nreaders, p->nwriters);
  236.         up(&p->sem);
  237.         scullp_proc_offset(buf, start, &offset, &len);
  238.     }
  239.     *eof = (len <= LIMIT);
  240.     return len;
  241. }
  242. #endif

  243. /*
  244.  * The file operations for the pipe device
  245.  * (some are overlayed with bare scull)
  246.  */
  247. struct file_operations scull_pipe_fops = {
  248.     .owner =    THIS_MODULE,
  249.     .llseek =    no_llseek,
  250.     .read =        scull_p_read,
  251.     .write =    scull_p_write,
  252.     .poll =        scull_p_poll,
  253.     .ioctl =    scull_ioctl,
  254.     .open =        scull_p_open,
  255.     .release =    scull_p_release,
  256.     .fasync =    scull_p_fasync,
  257. };

  258. /*
  259.  * Set up a cdev entry.
  260.  */
  261. static void scull_p_setup_cdev(struct scull_pipe *dev, int index)
  262. {
  263.     int err, devno = scull_p_devno + index;
  264.     
  265.     cdev_init(&dev->cdev, &scull_pipe_fops);
  266.     dev->cdev.owner = THIS_MODULE;
  267.     err = cdev_add (&dev->cdev, devno, 1);
  268.     /* Fail gracefully if need be */
  269.     if (err)
  270.         printk(KERN_NOTICE "Error %d adding scullpipe%d", err, index);
  271. }

  272. /*
  273.  * Initialize the pipe devs; return how many we did.
  274.  */
  275. int scull_p_init(dev_t firstdev)
  276. {
  277.     int i, result;

  278.     result = register_chrdev_region(firstdev, scull_p_nr_devs, "scullp");
  279.     if (result < 0) {
  280.         printk(KERN_NOTICE "Unable to get scullp region, error %d\n", result);
  281.         return 0;
  282.     }
  283.     scull_p_devno = firstdev;
  284.     scull_p_devices = kmalloc(scull_p_nr_devs * sizeof(struct scull_pipe), GFP_KERNEL);
  285.     if (scull_p_devices == NULL) {
  286.         unregister_chrdev_region(firstdev, scull_p_nr_devs);
  287.         return 0;
  288.     }
  289.     memset(scull_p_devices, 0, scull_p_nr_devs * sizeof(struct scull_pipe));
  290.     for (i = 0; i < scull_p_nr_devs; i++) {
  291.         init_waitqueue_head(&(scull_p_devices[i].inq));
  292.         init_waitqueue_head(&(scull_p_devices[i].outq));
  293.         init_MUTEX(&scull_p_devices[i].sem);
  294.         scull_p_setup_cdev(scull_p_devices + i, i);
  295.     }
  296. #ifdef SCULL_DEBUG
  297.     create_proc_read_entry("scullpipe", 0, NULL, scull_read_p_mem, NULL);
  298. #endif
  299.     return scull_p_nr_devs;
  300. }

  301. /*
  302.  * This is called by cleanup_module or on failure.
  303.  * It is required to never fail, even if nothing was initialized first
  304.  */
  305. void scull_p_cleanup(void)
  306. {
  307.     int i;

  308. #ifdef SCULL_DEBUG
  309.     remove_proc_entry("scullpipe", NULL);
  310. #endif

  311.     if (!scull_p_devices)
  312.         return; /* nothing else to release */

  313.     for (i = 0; i < scull_p_nr_devs; i++) {
  314.         cdev_del(&scull_p_devices[i].cdev);
  315.         kfree(scull_p_devices[i].buffer);
  316.     }
  317.     kfree(scull_p_devices);
  318.     unregister_chrdev_region(scull_p_devno, scull_p_nr_devs);
  319.     scull_p_devices = NULL; /* pedantic */
  320. }

(1)讲一下函数fasync_helper:

int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)

一看就知道,前面的三个参数其实就是scull_p_fasync的三个参数,只要我们定义号的fasync_struct结构体也传进去就可以了。内核会完成我上面我們所说的事情。


(2)当设备可写时,调用函数kill_fasync发送信号SIGIO给内核。

讲解一下这个函数:

void kill_fasync(struct fasync_struct **fp, int sig, int band)

sig就是我们要发送的信号。

band(带宽),一般都是使用POLL_IN,表示设备可读,如果设备可写,使用POLL_OUT


(3)当设备关闭时,需要将fasync_struct从异步队列中删除:
scull_p_fasync(-1, filp, 0);

删除也是调用scull_p_fasync,不过改了一下参数而已。

四、阻塞型IOpoll和异步通知的区别:

一个最重要的区别:

1)异步通知是不会造成阻塞的。

2)调用阻塞IO时如果条件不满足,会在驱动函数中的test_readtest_write中阻塞。

3)如果条件不满足,selcet会在系统调用中阻塞。

所谓的异步,就是进程可以在信号没到前干别的事情,等到信号到来了,进程就会被内核通知去做相应的信号操作。进程是不知道信号什么时候来的。


五,总结

今天只是讲了异步通知在内核中的实现,并且对应的应用函数和驱动函数需要做什么事情。最后总结了一下阻塞IOpoll和异步通知的区别。




阅读(967) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~