Chinaunix首页 | 论坛 | 博客
  • 博客访问: 3566749
  • 博文数量: 205
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 7385
  • 用 户 组: 普通用户
  • 注册时间: 2013-01-23 18:56
个人简介

将晦涩难懂的技术讲的通俗易懂

文章分类

全部博文(205)

文章存档

2024年(8)

2023年(9)

2022年(4)

2021年(12)

2020年(8)

2019年(18)

2018年(19)

2017年(9)

2016年(26)

2015年(18)

2014年(54)

2013年(20)

分类: LINUX

2022-06-04 14:26:30

vhost前后端(vhost_net/virtio_net)转发流程详解

——lvyilong316

  说明:本系列文章是早些年读linux kernel 3.10代码时写的,现在为了查阅方便作为记录,也分享给需要的人。
  
vhost net 的目的是为了避免在host kernel上做一次qemu的调度,提升性能。让vm的数据报在 host的内核就把报文发送出去

vhost_net初始化

vhost-netkernel中是miscdevice的形态存在的Linux驱动中把无法归类的五花八门的设备定义为混杂设备(miscdevice结构体表述)miscdevice共享一个主设备号MISC_MAJOR(10),但次设备号不同 所有的miscdevice设备形成了一个链表,对设备访问时内核根据次设备号查找对应的miscdevice设备,然后调用其file_operations结构中注册的文件操作接口进行操作。对应vhost-net,其file_operations为:

点击(此处)折叠或打开

  1. static const struct file_operations vhost_net_fops = {
  2.     .owner = THIS_MODULE,
  3.     .release = vhost_net_release,
  4.     .unlocked_ioctl = vhost_net_ioctl,
  5. #ifdef CONFIG_COMPAT
  6.     .compat_ioctl = vhost_net_compat_ioctl,
  7. #endif
  8.     .open = vhost_net_open,
  9.     .llseek = noop_llseek,
  10. };

另一方面,当qemu创建tap设备会调用到net_init_tap()函数net_init_tap()其中会检查选项是否指定vhost=on,如果指定,则会调用到vhost_net_init()进行初始化,其中通过open(“/dev/vhost-net”, O_RDWR)打开了vhost-net driver;并通过ioctl(vhost_fd)进行了一系列的初始化。而open(“/dev/vhost-net”, O_RDWR),则会调用到vhost-net驱动的vhost_net_fops-open函数,即vhost_net_open

l vhost_net_open

点击(此处)折叠或打开

  1. vhost_net_open
  2. static int vhost_net_open(struct inode *inode, struct file *f)
  3. {
  4.     struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
  5.     struct vhost_dev *dev;
  6.     struct vhost_virtqueue **vqs;
  7.     int r, i;

  8.     if (!n)
  9.         return -ENOMEM;
  10.     vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
  11.     if (!vqs) {
  12.         kfree(n);
  13.         return -ENOMEM;
  14.     }

  15.     dev = &n->dev;
  16.     /*初始化vq*/
  17.     vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
  18.     vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
  19.     n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
  20.     n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
  21.     for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
  22.         n->vqs[i].ubufs = NULL;
  23.         n->vqs[i].ubuf_info = NULL;
  24.         n->vqs[i].upend_idx = 0;
  25.         n->vqs[i].done_idx = 0;
  26.         n->vqs[i].vhost_hlen = 0;
  27.         n->vqs[i].sock_hlen = 0;
  28.     }
  29.     /*初始化vhost-net的vhost_dev 成员*/
  30.     r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
  31.     if (r < 0) {
  32.         kfree(n);
  33.         kfree(vqs);
  34.         return r;
  35.     }
  36.     /* 初始化vhost_net上的vhost_poll结构,将handle_tx_net,handle_tx_net分别注册在vhost_net->poll->work上 */
  37.     vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
  38.     vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
  39.     /*赋值给open /dev/vhost-net得到的file结构的private_data字段*/
  40.     f->private_data = n;

  41.     return 0;
  42. }

   整个vhost_net_open的调用路径如下图。主要是创建vhost_net结构,并对其成员进行初始化。

     下面逐个分析这些函数,vhost_dev_init函数主要负责初始化vhost_netvhost_dev成员。

l vhost_dev_init

点击(此处)折叠或打开

  1. vhost_dev_init
  2. long vhost_dev_init(struct vhost_dev *dev,
  3.          struct vhost_virtqueue **vqs, int nvqs)
  4. {
  5.     int i;
  6.     /*将vq和vhost_dev关联*/
  7.     dev->vqs = vqs;
  8.     dev->nvqs = nvqs; //vqueue的数量(2)
  9.     mutex_init(&dev->mutex);
  10.     dev->log_ctx = NULL;
  11.     dev->log_file = NULL;
  12.     dev->memory = NULL;
  13.     dev->mm = NULL;
  14.     spin_lock_init(&dev->work_lock);
  15.     INIT_LIST_HEAD(&dev->work_list);
  16.     dev->worker = NULL;

  17.     for (i = 0; i < dev->nvqs; ++i) {
  18.         dev->vqs[i]->log = NULL;
  19.         dev->vqs[i]->indirect = NULL;
  20.         dev->vqs[i]->heads = NULL;
  21.         dev->vqs[i]->dev = dev;
  22.         mutex_init(&dev->vqs[i]->mutex);
  23.         vhost_vq_reset(dev, dev->vqs[i]);
  24.         if (dev->vqs[i]->handle_kick)
  25.             /*初始化vhost-dev的vhost_poll结构*/
  26.             vhost_poll_init(&dev->vqs[i]->poll,
  27.                     dev->vqs[i]->handle_kick, POLLIN, dev);
  28.     }

  29.     return 0;
  30. }

其中又会调用vhost_poll_init来初始化struct vhost_virtqueuevhost_poll结构。

l vhost_poll_init

点击(此处)折叠或打开

  1. vhost_poll_init
  2. void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
  3.          unsigned long mask, struct vhost_dev *dev)
  4. {
  5.     /* 将vhost_poll_wakeup注册给poll->wait->func */
  6.     init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
  7.     init_poll_funcptr(&poll->table, vhost_poll_func);
  8.     poll->mask = mask;
  9.     poll->dev = dev;
  10.     poll->wqh = NULL;
  11.     /* 将传入的fn即handle_tx_kick或handle_rx_kick注册给poll->work->fn */
  12.     vhost_work_init(&poll->work, fn);
  13. }

下面再回到vhost_net_open函数中。接下来两次调用vhost_poll_init

vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);

vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);

这两次调用分别是针对vhost_net的两个vhost_poll成员(一个管接收一个管发送)进行初始化的。由于都是用vhost_poll_init,所以和之前初始化vhost_virtqueuevhost_poll结构是相同的,只是由于参数不同,所以其vhost_poll->work->fn函数被分别初始化为了handle_rx/tx_nethandle_rx/tx_kick

下面是整个初始化过程结束后创建的数据结构关系图。(说明:绿色和紫色分别代表收发两个队列相关结构,由于篇幅有限,只将接收队列结构画完整了)

数据通道

下面分析一下vhost是如何收发数据的。我们分两个方向分析,首先是vm的收方向,然后是vm的发方向。

HostàGuest

        Host将数据包传递到vm可以由以下三个调用路径逻辑分析。所有调用的路径分析都依托于下面这个数据结构关系图。

其中上半部分是vhost设备,就是open /dev/vhost_net的结果,而下面是关联的tap设备。

路径1

下面我们先看第一个调用路径

   首先qemu在创建后端设备,也就是打开vhost_net后,会调用ioctl发送VHOST_NET_SET_BACKEND命令。对应vhost的处理函数即为vhost_net_set_backend

l vhost_net_set_backend

点击(此处)折叠或打开

  1. /*这里传入的fd是qemu传入的打开后端tap设备*/
  2. static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
  3. {
  4.     struct socket *sock, *oldsock;
  5.     struct vhost_virtqueue *vq;
  6.     struct vhost_net_virtqueue *nvq;
  7.     struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
  8.     int r;

  9.     mutex_lock(&n->dev.mutex);
  10.     r = vhost_dev_check_owner(&n->dev);
  11.     if (r)
  12.         goto err;

  13.     if (index >= VHOST_NET_VQ_MAX) {
  14.         r = -ENOBUFS;
  15.         goto err;
  16.     }
  17.     vq = &n->vqs[index].vq;
  18.     nvq = &n->vqs[index];
  19.     mutex_lock(&vq->mutex);

  20.     /* Verify that ring has been setup correctly. */
  21.     if (!vhost_vq_access_ok(vq)) {
  22.         r = -EFAULT;
  23.         goto err_vq;
  24.     }
  25.     sock = get_socket(fd);
  26.     if (IS_ERR(sock)) {
  27.         r = PTR_ERR(sock);
  28.         goto err_vq;
  29.     }

  30.     /* start polling new socket */
  31.     oldsock = rcu_dereference_protected(vq->private_data,
  32.                      lockdep_is_held(&vq->mutex));
  33.     if (sock != oldsock) {/*如果之前vq关联的sock和当前tap设备的sock不是同一个*/
  34.         ubufs = vhost_net_ubuf_alloc(vq,
  35.                      sock && vhost_sock_zcopy(sock));
  36.         if (IS_ERR(ubufs)) {
  37.             r = PTR_ERR(ubufs);
  38.             goto err_ubufs;
  39.         }
  40. /*停掉vq的接收工作,即将vhost_net->poll->wait移除等待队列*/
  41.         vhost_net_disable_vq(n, vq);
  42.         rcu_assign_pointer(vq->private_data, sock); /*将当前新的tap设备的sock赋值给vq的private_data*/
  43.         r = vhost_init_used(vq);
  44.         if (r)
  45.             goto err_used;
  46.         r = vhost_net_enable_vq(n, vq);/*enable vq,后文分析*/
  47.         if (r)
  48.             goto err_used;

  49.         oldubufs = nvq->ubufs;
  50.         nvq->ubufs = ubufs;

  51.         n->tx_packets = 0;
  52.         n->tx_zcopy_err = 0;
  53.         n->tx_flush = false;
  54.     }

  55.     mutex_unlock(&vq->mutex);

  56.     if (oldubufs) {
  57.         vhost_net_ubuf_put_wait_and_free(oldubufs);
  58.         mutex_lock(&vq->mutex);
  59.         vhost_zerocopy_signal_used(n, vq);
  60.         mutex_unlock(&vq->mutex);
  61.     }

  62.     if (oldsock) {
  63.         vhost_net_flush_vq(n, index);
  64.         fput(oldsock->file);
  65.     }

  66.     mutex_unlock(&n->dev.mutex);
  67.     return 0;
  68. }

下面看vhost_net_enable_vq是如何enable vq的。

l vhost_net_enable_vq

点击(此处)折叠或打开

  1. static int vhost_net_enable_vq(struct vhost_net *n,
  2.                 struct vhost_virtqueue *vq)
  3. {
  4.     struct vhost_net_virtqueue *nvq =
  5.         container_of(vq, struct vhost_net_virtqueue, vq);
  6.     /*获取vhost_net 对应的vhost_poll结构*/
  7.     struct vhost_poll *poll = n->poll + (nvq - n->vqs);
  8.     struct socket *sock;

  9.     sock = rcu_dereference_protected(vq->private_data,
  10.                      lockdep_is_held

l vhost_poll_start

点击(此处)折叠或打开

  1. /* Start polling a file. We add ourselves to file's wait queue. The caller must
  2.  * keep a reference to a file until after vhost_poll_stop is called. */
  3. int vhost_poll_start(struct vhost_poll *poll, struct file *file)
  4. {
  5.     unsigned long mask;
  6.     int ret = 0;

  7.     if (poll->wqh)
  8.         return 0;
  9.     /*这里的poll函数也就是tap设备的poll函数*/
  10.     mask = file->f_op->poll(file, &poll->table);
  11.     if (mask)
  12.         vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
  13.     if (mask & POLLERR) {
  14.         if (poll->wqh)
  15.             remove_wait_queue(poll->wqh, &poll->wait);
  16.         ret = -EINVAL;
  17.     }

  18.     return ret;
  19. }
Tap设备的poll函数为tun_chr_poll,而其中主要调用了poll_wait(file, &tfile->wq.wait, wait)

点击(此处)折叠或打开

  1. static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
  2. {
  3.     if (p && p->_qproc && wait_address)
  4.         p->_qproc(filp, wait_address, p);
  5. }
   其中又调用了传入poll_table_qproc函数。对于vhost_pollpoll_table,这个函数为vhost_poll_func

l vhost_poll_func

点击(此处)折叠或打开

  1. static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  2.              poll_table *pt)
  3. {
  4.     struct vhost_poll *poll;

  5.     poll = container_of(pt, struct vhost_poll, table);
  6.     poll->wqh = wqh;
  7.     add_wait_queue(wqh, &poll->wait);
  8. }
   可以看到这个函数主要是vhost_net->poll->wait挂在tap设备的struct tun_file的等待队列上

路径2

    下面来看第二个调用路径,如下所示。

tap设备发送数据包的时候,会调用对应net_devicendo_start_xmit函数,对于tap设备而言就是tun_net_xmit

l tun_net_xmit

点击(此处)折叠或打开

  1. /* Net device start xmit */
  2. static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
  3. {
  4.     struct tun_struct *tun = netdev_priv(dev);
  5.     int txq = skb->queue_mapping;
  6.     struct tun_file *tfile;

  7.     rcu_read_lock();
  8.     tfile = rcu_dereference(tun->tfiles[txq]);

  9.     /* Drop packet if interface is not attached */
  10.     if (txq >= tun->numqueues)
  11.         goto drop;

  12.     tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);

  13.     BUG_ON(!tfile);

  14.     /* Drop if the filter does not like it.
  15.      * This is a noop if the filter is disabled.
  16.      * Filter can be enabled only for the TAP devices. */
  17.     if (!check_filter(&tun->txflt, skb))
  18.         goto drop;

  19.     if (tfile->socket.sk->sk_filter &&
  20.      sk_filter(tfile->socket.sk, skb))
  21.         goto drop;

  22.     /* Limit the number of packets queued by dividing txq length with the
  23.      * number of queues.
  24.      */
  25.     if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
  26.             >= dev->tx_queue_len / tun->numqueues)
  27.         goto drop;

  28.     /* Orphan the skb - required as we might hang on to it
  29.      * for indefinite time. */
  30.     if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
  31.         goto drop;
  32.     skb_orphan(skb);

  33.     nf_reset(skb);

  34.     /*将数据包放入tap设备关联的队列中*/
  35.     /* Enqueue packet */
  36.     skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);

  37.     /* Notify and wake up reader process */
  38.     if (tfile->flags & TUN_FASYNC)
  39.         kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
  40.     /*唤醒在tap设备等待队列等待的进程*/
  41.     wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
  42.                  POLLRDNORM | POLLRDBAND);

  43.     rcu_read_unlock();
  44.     return NETDEV_TX_OK;

  45. drop:
  46.     dev->stats.tx_dropped++;
  47.     skb_tx_error(skb);
  48.     kfree_skb(skb);
  49.     rcu_read_unlock();
  50.     return NETDEV_TX_OK;
  51. }
  其中wake_up_interruptible_poll会进一步调wait_queue_t结构(等待队列的节点)上的fun。这个结构就是vhost_poll->wait,它的funvhost_net初始化时被设置为vhost_poll_wakeup
vhost_poll_wakeup

点击(此处)折叠或打开

  1. static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  2.              void *key)
  3. {
  4.     struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);

  5.     if (!((unsigned long)key & poll->mask))
  6.         return 0;

  7.     vhost_poll_queue(poll);
  8.     return 0;
  9. }
其中又会调用
vhost_poll_queue函数。

l vhost_poll_queue

点击(此处)折叠或打开

  1. void vhost_poll_queue(struct vhost_poll *poll)
  2. {
  3.     vhost_work_queue(poll->dev, &poll->work);
  4. }
l 
vhost_work_queue

点击(此处)折叠或打开

  1. void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
  2. {
  3.     unsigned long flags;

  4.     spin_lock_irqsave(&dev->work_lock, flags);
  5.     if (list_empty(&work->node)) {
  6.         /*将 vhost_poll->work添加到vhost_dev->work_list*/
  7.         list_add_tail(&work->node, &dev->work_list);
  8.         work->queue_seq++;
  9.         wake_up_process(dev->worker);
  10.     }
  11.     spin_unlock_irqrestore(&dev->work_lock, flags);
  12. }
可以看到这里将vhost_poll->work也就是vhost_work结构添加到vhost_dev->work_list中。 

路径3

下面看下第三个调用路径。

qemu 调用过open /dev/vhost_net后会还会通过ioctl发送VHOST_SET_OWNER命令,这个命令主要是创建后端vhost线程。对应的kernel处理函数为vhost_net_set_owner。其中又会调用vhost_dev_set_owner

l vhost_dev_set_owner

点击(此处)折叠或打开

  1. long vhost_dev_set_owner(struct vhost_dev *dev)
  2. {
  3.     struct task_struct *worker;
  4.     int err;

  5.     /* Is there an owner already? */
  6.     if (vhost_dev_has_owner(dev)) {
  7.         err = -EBUSY;
  8.         goto err_mm;
  9.     }

  10.     /* No owner, become one */
  11.     dev->mm = get_task_mm(current);
  12.     worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
  13.     if (IS_ERR(worker)) {
  14.         err = PTR_ERR(worker);
  15.         goto err_worker;
  16.     }

  17.     dev->worker = worker;
  18.     wake_up_process(worker);    /* avoid contributing to loadavg */

  19.     err = vhost_attach_cgroups(dev);
  20.     if (err)
  21.         goto err_cgroup;

  22.     err = vhost_dev_alloc_iovecs(dev);
  23.     if (err)
  24.         goto err_cgroup;

  25.     return 0;
  26. }
    vhost_dev_set_owner主要创建了名字为vhost-pidkthread。其执行函数为vhost_worker

l vhost_worker

点击(此处)折叠或打开

  1. static int vhost_worker(void *data)
  2. {
  3.     struct vhost_dev *dev = data;
  4.     struct vhost_work *work = NULL;
  5.     unsigned uninitialized_var(seq);
  6.     mm_segment_t oldfs = get_fs();

  7.     set_fs(USER_DS);
  8.     use_mm(dev->mm);

  9.     for (;;) {
  10.         /* mb paired w/ kthread_stop */
  11.         set_current_state(TASK_INTERRUPTIBLE);

  12.         spin_lock_irq(&dev->work_lock);
  13.         if (work) {
  14.             work->done_seq = seq;
  15.             if (work->flushing)
  16.                 wake_up_all(&work->done);
  17.         }

  18.         if (kthread_should_stop()) {
  19.             spin_unlock_irq(&dev->work_lock);
  20.             __set_current_state(TASK_RUNNING);
  21.             break;
  22.         }
  23.         /*判断dev->work_list是否为NULL*/
  24.         if (!list_empty(&dev->work_list)) {
  25.             work = list_first_entry(&dev->work_list,
  26.                         struct vhost_work, node);
  27.             list_del_init(&work->node);
  28.             seq = work->queue_seq;
  29.         } else
  30.             work = NULL;
  31.         spin_unlock_irq(&dev->work_lock);

  32.         if (work) {
  33.             __set_current_state(TASK_RUNNING);
  34.             work->fn(work);/*调用dev->work_list上的vhost_work的函数*/
  35.             if (need_resched())
  36.                 schedule();
  37.         } else
  38.             schedule();

  39.     }
  40.     unuse_mm(dev->mm);
  41.     set_fs(oldfs);
  42.     return 0;
  43. }
    从之前的“路径2”可知,vhost_work_queue会将vhost_poll->work添加到vhost_dev->work_list。而对于接收和发送对应着不同的vhost_poll->work,对于接收的vhost_poll->work,其注册的fn函数为handle_rx_net

l handle_rx_net

点击(此处)折叠或打开

  1. static void handle_rx_net(struct vhost_work *work)
  2. {
  3.     struct vhost_net *net = container_of(work, struct vhost_net,
  4.                      poll[VHOST_NET_VQ_RX].work);
  5.     handle_rx(net);
  6. }
其中又会调用handle_rx

l handle_rx

点击(此处)折叠或打开

  1. static void handle_rx(struct vhost_net *net)
  2. {
  3.     struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
  4.     struct vhost_virtqueue *vq = &nvq->vq;
  5.     unsigned uninitialized_var(in), log;
  6.     struct vhost_log *vq_log;
  7.     struct msghdr msg = {
  8.         .msg_name = NULL,
  9.         .msg_namelen = 0,
  10.         .msg_control = NULL, /* FIXME: get and handle RX aux data. */
  11.         .msg_controllen = 0,
  12.         .msg_iov = vq->iov,
  13.         .msg_flags = MSG_DONTWAIT,
  14.     };
  15.     struct virtio_net_hdr_mrg_rxbuf hdr = {
  16.         .hdr.flags = 0,
  17.         .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
  18.     };
  19.     size_t total_len = 0;
  20.     int err, mergeable;
  21.     s16 headcount;
  22.     size_t vhost_hlen, sock_hlen;
  23.     size_t vhost_len, sock_len;
  24.     /* TODO: check that we are running from vhost_worker? */
  25.     /* 获取vq上关联的sock结构 */
  26.     struct socket *sock = rcu_dereference_check(vq->private_data, 1);

  27.     if (!sock)
  28.         return;

  29.     mutex_lock(&vq->mutex);
  30.     vhost_disable_notify(&net->dev, vq);
  31.     vhost_hlen = nvq->vhost_hlen;
  32.     sock_hlen = nvq->sock_hlen;

  33.     vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
  34.         vq->log : NULL;
  35.     mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);

  36.     while ((sock_len = peek_head_len(sock->sk))) {
  37.         sock_len += sock_hlen;
  38.         vhost_len = sock_len + vhost_hlen;
  39.         headcount = get_rx_bufs(vq, vq->heads, vhost_len,
  40.                     &in, vq_log, &log,
  41.                     likely(mergeable) ? UIO_MAXIOV : 1);
  42.         /* On error, stop handling until the next kick. */
  43.         if (unlikely(headcount < 0))
  44.             break;
  45.         /* On overrun, truncate and discard */
  46.         if (unlikely(headcount > UIO_MAXIOV)) {
  47.             msg.msg_iovlen = 1;
  48.             /*由于这里的sock其实就是tap设备的sock结构,所以其接受函数就是tun_recvmsg */
  49.             err = sock->ops->recvmsg(NULL, sock, &msg,
  50.                          1, MSG_DONTWAIT | MSG_TRUNC);
  51.             pr_debug("Discarded rx packet: len %zd\n", sock_len);
  52.             continue;
  53.         }
  54.         /* OK, now we need to know about added descriptors. */
  55.         if (!headcount) {
  56.             if (unlikely(vhost_enable_notify(&net->dev, vq))) {
  57.                 /* They have slipped one in as we were
  58.                  * doing that: check again. */
  59.                 vhost_disable_notify(&net->dev, vq);
  60.                 continue;
  61.             }
  62.             /* Nothing new? Wait for eventfd to tell us
  63.              * they refilled. */
  64.             break;
  65.         }
  66.         /* We don't need to be notified again. */
  67.         if (unlikely((vhost_hlen)))
  68.             /* Skip header. TODO: support TSO. */
  69.             move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
  70.         else
  71.             /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
  72.              * needed because recvmsg can modify msg_iov. */
  73.             copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
  74.         msg.msg_iovlen = in;
  75.         err = sock->ops->recvmsg(NULL, sock, &msg,
  76.                      sock_len, MSG_DONTWAIT | MSG_TRUNC);
  77.         /* Userspace might have consumed the packet meanwhile:
  78.          * it's not supposed to do this usually, but might be hard
  79.          * to prevent. Discard data we got (if any) and keep going. */
  80.         if (unlikely(err != sock_len)) {
  81.             pr_debug("Discarded rx packet: "
  82.                 " len %d, expected %zd\n", err, sock_len);
  83.             vhost_discard_vq_desc(vq, headcount);
  84.             continue;
  85.         }
  86.         if (unlikely(vhost_hlen) &&
  87.          memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
  88.                  vhost_hlen)) {
  89.             vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
  90.              vq->iov->iov_base);
  91.             break;
  92.         }
  93.         /* TODO: Should check and handle checksum. */
  94.         if (likely(mergeable) &&
  95.          memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
  96.                  offsetof(typeof(hdr), num_buffers),
  97.                  sizeof hdr.num_buffers)) {
  98.             vq_err(vq, "Failed num_buffers write");
  99.             vhost_discard_vq_desc(vq, headcount);
  100.             break;
  101.         }
  102.         /* 更新virtio队列,并kick guest */
  103.         vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
  104.                      headcount);
  105.         if (unlikely(vq_log))
  106.             vhost_log_write(vq, vq_log, log, vhost_len);
  107.         total_len += vhost_len;
  108.         /* 如果本次接受达到最大限制,则再次将vhost_poll加入到链表中,等待线程下次调度 */
  109.         if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  110.             vhost_poll_queue(&vq->poll);
  111.             break;
  112.         }
  113.     }

  114.     mutex_unlock(&vq->mutex);
  115. }
    在将数据包拷贝到virtio队列后,就调用vhost_add_used_and_signal_n更新virtio队列,并kick guest

l vhost_add_used_and_signal_n

点击(此处)折叠或打开

  1. void vhost_add_used_and_signal_n(struct vhost_dev *dev,
  2.                  struct vhost_virtqueue *vq,
  3.                  struct vring_used_elem *heads, unsigned count)
  4. {
  5.     vhost_add_used_n(vq, heads, count);
  6.     vhost_signal(dev, vq);
  7. }
    其中vhost_add_used_n用于更新virtio队列,具体原理我们在后面分析vhost_user再讲。vhost_signal用于通知guest,其实是通过内核kvm模块来实现的,这个我们也放在后面单独分析。

Guest内部接收数据

   本节重点分析一下guset中是如何收取数据包的,也就是virtio-net的实现。其对应代码主要在virtio_net.c中。这里我们仍然以kernel 3.10为例分析。

   首先virtio_net实现了pci设备的一般处理函数,virtnet_probe用于pci总线发现virtio net设备。这些在virtio-net初始化”一节已经讲过,这里就以初始化后的数据结构关系图开始展开。

   首先,但后端处理完数据包,放入和前端共享的virtio ring后就会Call前端,这会导致前端guest产生中断。由于每个vq的注册的中断处理函数为vp_interrupt(具体参考virtio-net初始化”),这会导致这个中断处理函数被调用。

vp_interrupt

点击(此处)折叠或打开

  1. static irqreturn_t vp_interrupt(int irq, void *opaque)
  2. {
  3.     struct virtio_pci_device *vp_dev = opaque;
  4.     u8 isr;

  5.     /* reading the ISR has the effect of also clearing it so it's very
  6.      * important to save off the value. */
  7.     /*获取中断服务号*/
  8.     isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);

  9.     /* It's definitely not us if the ISR was not high */
  10.     if (!isr)
  11.         return IRQ_NONE;
  12.     /*如果是配置中断,则调用vp_config_changed处理配置中断*/
  13.     /* Configuration change? Tell driver if it wants to know. */
  14.     if (isr & VIRTIO_PCI_ISR_CONFIG)
  15.         vp_config_changed(irq, opaque);
  16.     /*是数据中断,则调用vp_vring_interrupt处理*/
  17.     return vp_vring_interrupt(irq, opaque);
  18. }

如果产生的中断不是配置中断的话则调用vp_vring_interrupt

vp_vring_interrupt

点击(此处)折叠或打开

  1. /* Notify all virtqueues on an interrupt. */
  2. static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
  3. {
  4.     struct virtio_pci_device *vp_dev = opaque;
  5.     struct virtio_pci_vq_info *info;
  6.     irqreturn_t ret = IRQ_NONE;
  7.     unsigned long flags;

  8.     spin_lock_irqsave(&vp_dev->lock, flags);
  9.     /*中断处理程序返回IRQ_HANDLED表示接收到了准确的中断信号,并且作了相应正确的处理*/
  10.     list_for_each_entry(info, &vp_dev->virtqueues, node) {
  11.         if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
  12.             ret = IRQ_HANDLED;
  13.     }
  14.     spin_unlock_irqrestore(&vp_dev->lock, flags);

  15.     return ret;
  16. }

  尝试调用每个queue的中断处理函数。中断处理函数可能返回的值有两个: IRQ_NONE表示中断程序接收到中断信号后发现这并不是注册时指定的中断原发出的中断信号. IRQ_HANDLED表示接收到了准确的中断信号,并且作了相应正确的处理

vring_interrupt

点击(此处)折叠或打开

  1. irqreturn_t vring_interrupt(int irq, void *_vq)
  2. {
  3.     struct vring_virtqueue *vq = to_vvq(_vq);

  4.     if (!more_used(vq)) {
  5.         pr_debug("virtqueue interrupt with no work for %p\n", vq);
  6.         return IRQ_NONE;
  7.     }

  8.     if (unlikely(vq->broken))
  9.         return IRQ_HANDLED;

  10.     pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
  11.     if (vq->vq.callback)
  12.         vq->vq.callback(&vq->vq);

  13.     return IRQ_HANDLED;
  14. }

  其中主要调用了vq->vq.callback,对于接受队列,其callback被初始化为skb_recv_done

skb_recv_done

点击(此处)折叠或打开

  1. static void skb_recv_done(struct virtqueue *rvq)
  2. {
  3.     struct virtnet_info *vi = rvq->vdev->priv;
  4.     struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];

  5.     /* Schedule NAPI, Suppress further interrupts if successful. */
  6.     if (napi_schedule_prep(&rq->napi)) {/*将rq->napi 设置为NAPI_STATE_SCHED 状态*/
  7.         virtqueue_disable_cb(rvq); /*设置vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT */
  8.         __napi_schedule(&rq->napi);
  9.     }
  10. }

__napi_schedule

点击(此处)折叠或打开

  1. void __napi_schedule(struct napi_struct *n)
  2. {
  3.     unsigned long flags;

  4.     local_irq_save(flags);
  5.     /*将recv_queue的napi加入每cpu变量softnet_data->poll_list */
  6.     ____napi_schedule(&__get_cpu_var(softnet_data), n);
  7.     local_irq_restore(flags);
  8. }

____napi_schedule

点击(此处)折叠或打开

  1. static inline void ____napi_schedule(struct softnet_data *sd,
  2.                  struct napi_struct *napi)
  3. {
  4.     list_add_tail(&napi->poll_list, &sd->poll_list);
  5.     __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  6. }

  可以看到____napi_schedulereceive_queue->napi加入到每cpu变量softnet_data->poll_list中,同时唤醒NET_RX_SOFTIRQ软中断。

  到目前为止,接收的中断处理,也就是中断的上半部已经结束了。接下来是软中断,也就是中断的下半部。

由于软中断NET_RX_SOFTIRQ的处理函数被初始化为了net_rx_action,所以调用NET_RX_SOFTIRQ就会触发net_rx_action的执行。

open_softirq(NET_RX_SOFTIRQ, net_rx_action);

net_rx_action

点击(此处)折叠或打开

  1. static void net_rx_action(struct softirq_action *h)
  2. {
  3.     struct softnet_data *sd = &__get_cpu_var(softnet_data);
  4.     unsigned long time_limit = jiffies + 2;
  5.     int budget = netdev_budget;
  6.     void *have;

  7.     local_irq_disable();
  8.     /*遍历当前cpu上的softnet_data->poll_list上的所有napi结构*/
  9.     while (!list_empty(&sd->poll_list)) {
  10.         struct napi_struct *n;
  11.         int work, weight;

  12.         if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
  13.             goto softnet_break;

  14.         local_irq_enable();
  15.         n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
  16.         have = netpoll_poll_lock(n);
  17.         weight = n->weight;

  18.         work = 0;
  19.         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  20.             work = n->poll(n, weight);/*调用napi结构上的poll函数*/
  21.             trace_napi_poll(n);
  22.         }

  23.         WARN_ON_ONCE(work > weight);
  24.         budget -= work;
  25.         local_irq_disable();
  26.         if (unlikely(work == weight)) { /*如果已经poll出了weight个skb*/
  27.             if (unlikely(napi_disable_pending(n))) {/*如果napi被disable了,则直接将当前napi从softnet_data->poll_list上移除*/
  28.                 local_irq_enable();
  29.                 napi_complete(n);
  30.                 local_irq_disable();
  31.             } else {
  32.                 if (n->gro_list) { /*n->gro_list不空说明有skb还没有被重组完成*/
  33.                     /* flush too old packets
  34.                      * If HZ < 1000, flush all packets.
  35.                      */
  36.                     local_irq_enable();
  37.                     napi_gro_flush(n, HZ >= 1000); /*直接将n->gro_list的skb送往协议栈,不再等待重组*/
  38.                     local_irq_disable();
  39.                 }
  40.                 list_move_tail(&n->poll_list, &sd->poll_list); /*将当前napi从softnet_data->poll_list上移除*/
  41.             }
  42.         }

  43.         netpoll_poll_unlock(have);
  44.     }
  45. out:
  46.     net_rps_action_and_irq_enable(sd);
  47.     return;

  48. softnet_break:
  49.     sd->time_squeeze++;
  50.     __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  51.     goto out;
  52. }

这里要注意一下每次调用napi->poll出的skb个数为napi->weight,而这个值在virtio队列初始化被设置为napi_weight

netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,napi_weight)

napi_weight默认被初始化64,当然它是一个模块参数,所以是可以在模块加载时设置的。

点击(此处)折叠或打开

  1. static int napi_weight = NAPI_POLL_WEIGHT; //64
  2. module_param(napi_weight, int, 0444);


  回调收包逻辑,virtio-netreceive_queuenapi->poll函数初始化为,所以接下来调用的就是virtnet_poll

virtnet_poll


点击(此处)折叠或打开

  1. static int virtnet_poll(struct napi_struct *napi, int budget)
  2. {
  3.     struct receive_queue *rq =
  4.         container_of(napi, struct receive_queue, napi);
  5.     struct virtnet_info *vi = rq->vq->vdev->priv;
  6.     void *buf;
  7.     unsigned int r, len, received = 0;

  8. again:
  9.     while (received < budget &&/*virtqueue_get_buf取出要接收的skb*/
  10.      (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
  11.         receive_buf(rq, buf, len); /*真正的接收处理操作,最终调用netif_receive_skb*/
  12.         --rq->num;
  13.         received++;
  14.     }

  15.     if (rq->num < rq->max / 2) {/* 如果接收队列空闲skb不够,重新refill,添加skb */
  16.         if (!try_fill_recv(rq, GFP_ATOMIC))
  17.             schedule_delayed_work(&vi->refill, 0);/*如果暂时分配不出来skb,则延时给delay_work去处理*/
  18.     }

  19.     /*接收skb个数小于budget,有可能是当时空闲的skb不足,则先调用napi_complete将gro_list的数据包接收,然后再次尝试接收,如果接收
  20.     成功,则再次调用napi*/
  21.     /* Out of packets? */
  22.     if (received < budget) {
  23.         r = virtqueue_enable_cb_prepare(rq->vq);
  24.         napi_complete(napi);
  25.         if (unlikely(virtqueue_poll(rq->vq, r)) &&
  26.          napi_schedule_prep(napi)) {
  27.             virtqueue_disable_cb(rq->vq);
  28.             __napi_schedule(napi);
  29.             goto again;
  30.         }
  31.     }

  32.     return received;
  33. }

  这个函数中比较重要的有三个函数的调用:

virtqueue_get_buf(rq->vq, &len):这个函数根据last_used_idx获取带接收的skb,并释放和这个skb相关联的vring上的desc

receive_buf(rq, buf, len): 处理skb,将skb送往协议栈;

try_fill_recv(rq, GFP_ATOMIC):添加未使用的skb到队列,用于接收(或发送)。

下面逐个分析。

virtqueue_get_buf

点击(此处)折叠或打开

  1. void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
  2. {
  3.     struct vring_virtqueue *vq = to_vvq(_vq);
  4.     void *ret;
  5.     unsigned int i;
  6.     u16 last_used;

  7.     START_USE(vq);

  8.     /* Only get used array entries after they have been exposed by host. */
  9.     virtio_rmb(vq->weak_barriers);
  10.     /*获取本次要是有的used_elem数组index*/
  11.     last_used = (vq->last_used_idx & (vq->vring.num - 1));
  12.     i = vq->vring.used->ring[last_used].id; /*本次要接受skb对应的data下标,也是skb对应第一个desc的index*/
  13.     *len = vq->vring.used->ring[last_used].len;/*本次要接受skb的长度*/

  14.     if (unlikely(i >= vq->vring.num)) {
  15.         BAD_RING(vq, "id %u out of range\n", i);
  16.         return NULL;
  17.     }
  18.     if (unlikely(!vq->data[i])) {
  19.         BAD_RING(vq, "id %u is not a head!\n", i);
  20.         return NULL;
  21.     }

  22.     /* detach_buf clears data, so grab it now. */
  23.     /*取出要接受的skb*/
  24.     ret = vq->data[i];
  25.     /*释放skb对应的desc*/
  26.     detach_buf(vq, i);
  27.     vq->last_used_idx++;
  28.     /* If we expect an interrupt for the next entry, tell host
  29.      * by writing event index and flush out the write before
  30.      * the read in the next get_buf call. */
  31.     if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
  32.         vring_used_event(&vq->vring) = vq->last_used_idx;
  33.         virtio_mb(vq->weak_barriers);
  34.     }

  35.     END_USE(vq);
  36.     return ret;
  37. }

要看懂这个函数,需要借助下面这个接收ring的数据结构关系图。

  vring.used->ring[last_used].iddata中的skbvring中的desc关联起来,因为这个id即是待处理skb所在data数组的下标,也是这个skb对应的起始desc下标。获取到skb后,需要先将skb对应的desc释放,这个工作由detach_buf来完成。

detach_buf


点击(此处)折叠或打开

  1. static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
  2. {
  3.     unsigned int i;

  4.     /* Clear data ptr. */
  5.     vq->data[head] = NULL;

  6.     /* Put back on free list: find end */
  7.     i = head;

  8.     /* Free the indirect table */
  9.     if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
  10.         kfree(phys_to_virt(vq->vring.desc[i].addr));
  11.     /*因为这个skb马上要被接受处理了,所以释放这个skb对应的所有desc*/
  12.     while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
  13.         i = vq->vring.desc[i].next;
  14.         vq->vq.num_free++;
  15.     }

  16.     vq->vring.desc[i].next = vq->free_head;
  17.     vq->free_head = head;
  18.     /* Plus final descriptor */
  19.     vq->vq.num_free++;
  20. }

  下面看receive_buf,这个函数真正完成将skb送往协议栈。

receive_buf

点击(此处)折叠或打开

  1. static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
  2. {
  3.     struct virtnet_info *vi = rq->vq->vdev->priv;
  4.     struct net_device *dev = vi->dev;
  5.     struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
  6.     struct sk_buff *skb;
  7.     struct skb_vnet_hdr *hdr;
  8. ……
  9.     if (vi->mergeable_rx_bufs)
  10.         skb = receive_mergeable(dev, rq, buf, len);
  11.     else if (vi->big_packets)
  12.         skb = receive_big(dev, rq, buf);
  13.     else
  14.         skb = receive_small(buf, len);

  15.     if (unlikely(!skb))
  16.         return;

  17.     hdr = skb_vnet_hdr(skb);

  18.     u64_stats_update_begin(&stats->rx_syncp);
  19.     stats->rx_bytes += skb->len;
  20.     stats->rx_packets++;
  21.     u64_stats_update_end(&stats->rx_syncp);

  22.     if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
  23.         pr_debug("Needs csum!\n");
  24.         if (!skb_partial_csum_set(skb,
  25.                      hdr->hdr.csum_start,
  26.                      hdr->hdr.csum_offset))
  27.             goto frame_err;
  28.     } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
  29.         skb->ip_summed = CHECKSUM_UNNECESSARY;
  30.     }

  31.     skb->protocol = eth_type_trans(skb, dev);
  32.     pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
  33.          ntohs(skb->protocol), skb->len, skb->pkt_type);
  34.     /*根据后端填入virtio_net_hdr中的信息,设置gso的相关字段,说明收到的是大包*/
  35.     if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
  36.         pr_debug("GSO!\n");
  37.         switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
  38.         case VIRTIO_NET_HDR_GSO_TCPV4:
  39.             skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  40.             break;
  41.         case VIRTIO_NET_HDR_GSO_UDP:
  42.             skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  43.             break;
  44.         case VIRTIO_NET_HDR_GSO_TCPV6:
  45.             skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  46.             break;
  47.         default:
  48.             net_warn_ratelimited("%s: bad gso type %u.\n",
  49.                      dev->name, hdr->hdr.gso_type);
  50.             goto frame_err;
  51.         }

  52.         if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
  53.             skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;

  54.         skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
  55.         if (skb_shinfo(skb)->gso_size == 0) {
  56.             net_warn_ratelimited("%s: zero gso size.\n", dev->name);
  57.             goto frame_err;
  58.         }

  59.         /* Header must be checked, and gso_segs computed. */
  60.         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  61.         skb_shinfo(skb)->gso_segs = 0;
  62.     }
  63.     /*发往协议栈*/
  64.     netif_receive_skb(skb);
  65.     return;

  66. frame_err:
  67.     dev->stats.rx_frame_errors++;
  68.     dev_kfree_skb(skb);
  69. }


   该函数会根据初始化时设置的特性,调用receive_smallreceive_bigreceive_mergeable根据数据初始化好skb。然后设置GSO的相关特性,注意这里的GSO不是对发送而言的,是以后端的角度,说明是后端合并了大包。最终调用netif_receive_skbskb发往协议栈。注意这个版本(linux 3.10virtio-net还不支持GRO,在linux 4.2的时候netif_receive_skb已经替换为了napi_gro_receiv函数,支持GRO功能。

   最后是try_fill_recv,这个函数用于当data中可用的skb不够时,申请构造skb,同时通知后端更新。

try_fill_recv


点击(此处)折叠或打开

  1. static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
  2. {
  3.     struct virtnet_info *vi = rq->vq->vdev->priv;
  4.     int err;
  5.     bool oom;

  6.     do {
  7.         if (vi->mergeable_rx_bufs)
  8.             err = add_recvbuf_mergeable(rq, gfp); /*后端支持VIRTIO_NET_F_MRG_RXBUF*/
  9.         else if (vi->big_packets)
  10.             err = add_recvbuf_big(rq, gfp); /*后端支持GUEST_GSO/GUEST_TSO,相当于LRO*/
  11.         else
  12.             err = add_recvbuf_small(rq, gfp);

  13.         oom = err == -ENOMEM;
  14.         if (err)
  15.             break;
  16.         ++rq->num;
  17.     } while (rq->vq->num_free);
  18.     if (unlikely(rq->num > rq->max))
  19.         rq->max = rq->num;
  20.     virtqueue_kick(rq->vq); /*通知后端avail ring更新*/
  21.     return !oom;
  22. }


  无论是add_recvbuf_small还是add_recvbuf_big都是申请skb,然后将skb转换为desc。然后调用virtqueue_kick通知后端,以供后端继续放入收包数据。

  整个virtio-net的收包逻辑如下所示。

其中黄色代码中断(上半部)的处理过程,绿色代表软中断(下半部)的处理过程。可见网络中断只负责将napi加入链表,而真正的收包逻辑都是由软中断来处理的

Guest内部发送数据

      Guest的发送我们要从virtio_net驱动注册到netdevicendo_start_xmit说起。这部分内容需要有virtio-net初始化”一节作为背景。virtio-netnetdevicenetdev_ops注册为virtnet_netdev,所以对应的ndo_start_xmit函数即为start_xmit


点击(此处)折叠或打开

  1. static const struct net_device_ops virtnet_netdev = {
  2.     .ndo_open = virtnet_open,
  3.     .ndo_stop      = virtnet_close,
  4.     .ndo_start_xmit = start_xmit,
  5. ……
  6. }


首先看下整体的发送流程:

start_xmit


点击(此处)折叠或打开

  1. static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
  2. {
  3.     struct virtnet_info *vi = netdev_priv(dev);
  4.     /*从skb->queue_mapping获取队列编号*/
  5.     int qnum = skb_get_queue_mapping(skb);
  6.     struct send_queue *sq = &vi->sq[qnum];
  7.     int err;

  8.     /* Free up any pending old buffers before queueing new ones. */
  9.     /* 在发送数据包前首先释放掉发送队列中之前残留的数据包 */
  10.     free_old_xmit_skbs(sq);

  11.     /* Try to transmit */
  12.     /*发送数据包*/
  13.     err = xmit_skb(sq, skb);

  14.     /* This should not */
  15.     if (unlikely(err)) {
  16.         dev->stats.tx_fifo_errors++;
  17.         if (net_ratelimit())
  18.             dev_warn(&dev->dev,
  19.                 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
  20.         dev->stats.tx_dropped++;
  21.         kfree_skb(skb);
  22.         return NETDEV_TX_OK;
  23.     }
  24.     /*通知后端接受数据包*/
  25.     virtqueue_kick(sq->vq);

  26.     /* Don't wait up for transmitted skbs to be freed. */
  27.     skb_orphan(skb);
  28.     nf_reset(skb);

  29.     /* Apparently nice girls don't return TX_BUSY; stop the queue
  30.      * before it gets out of hand. Naturally, this wastes entries. */
  31.     if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
  32.         netif_stop_subqueue(dev, qnum);
  33.         if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
  34.             /* More just got used, free them then recheck. */
  35.             free_old_xmit_skbs(sq);
  36.             if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
  37.                 netif_start_subqueue(dev, qnum);
  38.                 virtqueue_disable_cb(sq->vq);
  39.             }
  40.         }
  41.     }

  42.     return NETDEV_TX_OK;
  43. }


   可以看到发送数据包的工作主要由xmit_skb函数完成,然后通过virtqueue_kick(sq->vq)通知后端收包。这里我们先来看下virtqueue_kick

virtqueue_kick


点击(此处)折叠或打开

  1. void virtqueue_kick(struct virtqueue *vq)
  2. {
  3.     if (virtqueue_kick_prepare(vq))
  4.         virtqueue_notify(vq);
  5. }


其中首先要调用virtqueue_kick_prepare来确定是否真的需要kick

virtqueue_kick_prepare


点击(此处)折叠或打开

  1. bool virtqueue_kick_prepare(struct virtqueue *_vq)
  2. {
  3.     struct vring_virtqueue *vq = to_vvq(_vq);
  4.     u16 new, old;
  5.     bool needs_kick;

  6.     START_USE(vq);
  7.     /* We need to expose available array entries before checking avail
  8.      * event. */
  9.     virtio_mb(vq->weak_barriers);
  10.     /*old是add_sg之前的avail.idx*/
  11.     old = vq->vring.avail->idx - vq->num_added;
  12.     /*new是当前的avail.idx*/
  13.     new = vq->vring.avail->idx;
  14.     vq->num_added = 0;

  15. #ifdef DEBUG
  16.     if (vq->last_add_time_valid) {
  17.         WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
  18.                      vq->last_add_time)) > 100);
  19.     }
  20.     vq->last_add_time_valid = false;
  21. #endif

  22.     if (vq->event) {
  23.         needs_kick = vring_need_event(vring_avail_event(&vq->vring),
  24.                      new, old);
  25.     } else {
  26.         needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
  27.     }
  28.     END_USE(vq);
  29.     return needs_kick;
  30. }


     这里面涉及到几个变量,oldadd_sg之前的avail.idx,而new是当前的avail.idx,还有一个是vring_avail_event(&vq->vring),看具体的实现:

#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num])

      可以看到这里是VRingUsed中的ring数组最后一项的值,该值在后端驱动从virtqueuepop一个elem之前设置成相应队列的下一个将要使用的index,last_avail_index

看下vring_need_event函数:

vring_need_event


点击(此处)折叠或打开

  1. static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
  2. {
  3.     return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
  4. }


      前后端通过对比 (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old)来判断是否需要notify后端

 下面回头看发送数据的过程,也就是xmit_skb

xmit_skb


点击(此处)折叠或打开

  1. static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
  2. {
  3.     struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); /*struct skb_vnet_hdr存放在skb的cb中*/
  4.     const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
  5.     struct virtnet_info *vi = sq->vq->vdev->priv;
  6.     unsigned num_sg;

  7.     pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);

  8.     if (skb->ip_summed == CHECKSUM_PARTIAL) { /*校验和由硬件计算,这里也就是交给后端vhost-net或vhost-user*/
  9.         hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
  10.         hdr->hdr.csum_start = skb_checksum_start_offset(skb); /*记录硬件计算校验和的相关信息,校验起始位置和偏移*/
  11.         hdr->hdr.csum_offset = skb->csum_offset;
  12.     } else {
  13.         hdr->hdr.flags = 0;
  14.         hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
  15.     }

  16.     if (skb_is_gso(skb)) { /*注意这里判断的不是GSO,而是TSO和UFO,GSO的逻辑在进入驱动前就过了*/
  17.         hdr->hdr.hdr_len = skb_headlen(skb); /*设置TSO/UFO相关特性,方便带给后端,让后端识别*/
  18.         hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
  19.         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  20.             hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; /*TSO*/
  21.         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  22.             hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; /*TSO*/
  23.         else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
  24.             hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; /*UFO*/
  25.         else
  26.             BUG();
  27.         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
  28.             hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
  29.     } else { /*不支持硬件GSO(TSO/UFO)的情况*/
  30.         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
  31.         hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
  32.     }

  33.     hdr->mhdr.num_buffers = 0;

  34.     /* Encode metadata header at front. */
  35.     /*将cb中的skb_vnet_hdr拷贝到数据之前*/
  36.     if (vi->mergeable_rx_bufs)
  37.         sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
  38.     else
  39.         sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
  40.     /*将skb数据拷贝到发送队列中*/
  41.     num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
  42.     return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
  43. }


  这里要先说一下整个发送过程中的数据结构转换。首先所有数据(不管是skb中的data还是skb->cb中的virtio hdr)都要先转换为send_queue上的struct scatterlist结构(也叫做sg),然后再将这些sg转换为vring上的struct vring_desc结构(也叫做desc),然后通知后端取包。

  而这里的sg_set_buf就是将skb->cb中的virtio hdr转换为sgskb_to_sgvec则是将skb中的data转换为sg,最后virtqueue_add_outbuf则是将之前转换的sg转换为desc。下面我们一个一个的来看。

sg_set_buf


点击(此处)折叠或打开

  1. /**
  2.  * sg_set_buf - Set sg entry to point at given data
  3.  * @sg:         SG entry
  4.  * @buf:     Data
  5.  * @buflen:     Data length
  6.  *
  7.  **/
  8. static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  9.              unsigned int buflen)
  10. {
  11. #ifdef CONFIG_DEBUG_SG
  12.     BUG_ON(!virt_addr_valid(buf));
  13. #endif
  14.     /*virt_to_page根据数据的起始地址找到页对齐的地址,offset_in_page获取数据起始地址在这一页的偏移*/
  15.     sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
  16. }


  这里的参数bufbuflen分别是cb中的skb_vnet_hdr及其长度。然后调用sg_set_page使sgskb_vnet_hdr对应的page关联(任何数据最终都是在page上)。

sg_set_page


点击(此处)折叠或打开

  1. static inline void sg_set_page(struct scatterlist *sg, struct page *page,
  2.              unsigned int len, unsigned int offset)
  3. {
  4.     sg_assign_page(sg, page);
  5.     sg->offset = offset;
  6.     sg->length = len;
  7. }


sg_assign_page主要是将page对应的地址赋值个sg->page_link,只不过这个赋值有点特殊。

sg_assign_page


点击(此处)折叠或打开

  1. static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
  2. {
  3.     unsigned long page_link = sg->page_link & 0x3;

  4.     /*
  5.      * In order for the low bit stealing approach to work, pages
  6.      * must be aligned at a 32-bit boundary as a minimum.
  7.      */
  8.     BUG_ON((unsigned long) page & 0x03); /*后两位有特殊作用,用来标识结尾*/
  9. #ifdef CONFIG_DEBUG_SG
  10.     BUG_ON(sg->sg_magic != SG_MAGIC);
  11.     BUG_ON(sg_is_chain(sg));
  12. #endif
  13.     sg->page_link = page_link | (unsigned long) page;
  14. }


   我们知道一个page的地址一定是4字节对齐的,所以其地址的低两位肯定是0。这样我们用sg->page_link存放其地址的时候,就可以用低两位存储些其他信息了。Sg用这个低两位存放sg链表结尾的标识。

这样就将cb中的skb_vnet_hdr转换成了对应的sg了。下面看skbdata是如何被转换为sg的。

num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;

   首先我们注意参数,是sq->sg + 1,因为sq->sg 已经被用做标识cb中的信息了。其次返回值是skb->data转换为的sg个数加1,也就是加上cb对应的一个sg,即这个skb总共对应的sg个数。

skb_to_sgvec


点击(此处)折叠或打开

  1. int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
  2. {
  3.     int nsg = __skb_to_sgvec(skb, sg, offset, len);

  4.     sg_mark_end(&sg[nsg - 1]);

  5.     return nsg;
  6. }


转换工作主要由__skb_to_sgvec进行,而sg_mark_end只是负责标识最后一个sg。我们先看下它是如何标识的。

sg_mark_end


点击(此处)折叠或打开

  1. static inline void sg_mark_end(struct scatterlist *sg)
  2. {
  3. #ifdef CONFIG_DEBUG_SG
  4.     BUG_ON(sg->sg_magic != SG_MAGIC);
  5. #endif
  6.     /*
  7.      * Set termination bit, clear potential chain bit
  8.      */
  9.     sg->page_link |= 0x02;
  10.     sg->page_link &= ~0x01;/*低字节有特殊作用,0x02表示结尾*/
  11. }


   正如前文所述,sg使用sg->page_link(地址)的低两位来标识最后一个sg。下面看__skb_to_sgvec.

__skb_to_sgvec

注意其参数,offsetlen分别为0skb->len


点击(此处)折叠或打开

  1. static int
  2. __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
  3. {
  4.     int start = skb_headlen(skb);/*线性区长度*/
  5.     int i, copy = start - offset;
  6.     struct sk_buff *frag_iter;
  7.     int elt = 0;

  8.     if (copy > 0) {
  9.         if (copy > len) /*数据包全在线性区*/
  10.             copy = len;
  11.         sg_set_buf(sg, skb->data + offset, copy);/*线性区占用一个scatterlist entry*/
  12.         elt++;
  13.         if ((len -= copy) == 0)
  14.             return elt;
  15.         offset += copy;
  16.     }
  17.     /*对非线性区的处理,逐个frag的处理*/
  18.     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  19.         int end;

  20.         WARN_ON(start > offset + len);

  21.         end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
  22.         if ((copy = end - offset) > 0) {
  23.             skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

  24.             if (copy > len) /*copy为一个frag的长度*/
  25.                 copy = len;
  26.             sg_set_page(&sg[elt], skb_frag_page(frag), copy, /*每一个frag对应一个scatterlist entry*/
  27.                     frag->page_offset+offset-start);
  28.             elt++;
  29.             if (!(len -= copy))
  30.                 return elt;
  31.             offset += copy; //offset为已经拷贝的长度
  32.         }
  33.         start = end;
  34.     }

  35.     /*处理skb_shinfo(skb)->frag_list,*/
  36.     skb_walk_frags(skb, frag_iter) {
  37.         int end;

  38.         WARN_ON(start > offset + len);

  39.         end = start + frag_iter->len;
  40.         if ((copy = end - offset) > 0) {
  41.             if (copy > len)
  42.                 copy = len;
  43.             /*对链表上的每个skb递归调用__skb_to_sgvec,转换为对应的scatterlist entry*/
  44.             elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
  45.                      copy);
  46.             if ((len -= copy) == 0)
  47.                 return elt;
  48.             offset += copy;
  49.         }
  50.         start = end;
  51.     }
  52.     BUG_ON(len);
  53.     return elt;
  54. }


  具体转为过程已经在代码中的注释解释的很清楚了,这里不再分析。最后看sgdesc的转换,也就是virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC)的实现。

virtqueue_add_outbuf


点击(此处)折叠或打开

  1. int virtqueue_add_outbuf(struct virtqueue *vq,
  2.              struct scatterlist sg[], unsigned int num,
  3.              void *data,
  4.              gfp_t gfp)
  5. {
  6.     return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
  7. }


其中主要调用了virtqueue_add

virtqueue_add


点击(此处)折叠或打开

  1. static inline int virtqueue_add(struct virtqueue *_vq, /*添加的目的队列*/
  2.         struct scatterlist *sgs[], /*要添加的scatterlist*/
  3.         struct scatterlist *(*next) /*一个函数指针,用于获取下一个scatterlist entry*/
  4.         (struct scatterlist *, unsigned int *),
  5.         unsigned int total_out, /*输入的scatterlist entry个数,即skb转换为的scatterlist entry的个数*/
  6.         unsigned int total_in, /*输出的scatterlist entry个数,对于发送total_in为0*/
  7.         unsigned int out_sgs, /*输出的scatterlist list的个数,这里一个out_sgs代表一个完整的skb_buffer,对于发送out_sgs为1*/
  8.         unsigned int in_sgs, /*输入的scatterlist list的个数,这里一个in_sgs代表一个完整的skb_buffer,对于发送in_sgs为0*/
  9.         void *data, /* data为skb* */
  10.         gfp_t gfp)
  11. {
  12.     struct vring_virtqueue *vq = to_vvq(_vq);
  13.     struct scatterlist *sg;
  14.     unsigned int i, n, avail, uninitialized_var(prev), total_sg;
  15.     int head;

  16.     START_USE(vq);

  17.     BUG_ON(data == NULL);

  18.     total_sg = total_in + total_out;

  19.     /* If the host supports indirect descriptor tables, and we have multiple
  20.      * buffers, then go indirect. FIXME: tune this threshold */
  21.     if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
  22.         head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
  23.                      total_in,
  24.                      out_sgs, in_sgs, gfp);
  25.         if (likely(head >= 0))
  26.             goto add_head;
  27.     }

  28.     BUG_ON(total_sg > vq->vring.num);
  29.     BUG_ON(total_sg == 0);

  30.     if (vq->vq.num_free < total_sg) {
  31.         pr_debug("Can't add buf len %i - avail = %i\n",
  32.              total_sg, vq->vq.num_free);
  33.         /* FIXME: for historical reasons, we force a notify here if
  34.          * there are outgoing parts to the buffer. Presumably the
  35.          * host should service the ring ASAP. */
  36.         if (out_sgs)
  37.             vq->notify(&vq->vq);
  38.         END_USE(vq);
  39.         return -ENOSPC;
  40.     }

  41.     /* We're about to use some buffers from the free list. */
  42.     vq->vq.num_free -= total_sg; /*total_sg为skb对应的scatterlist entry总数*/
  43.     /*一个desc对应一个scatterlist entry*/
  44.     head = i = vq->free_head;
  45.     for (n = 0; n < out_sgs; n++) {
  46.         for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
  47.             vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
  48.             vq->vring.desc[i].addr = sg_phys(sg);
  49.             vq->vring.desc[i].len = sg->length;
  50.             prev = i;
  51.             i = vq->vring.desc[i].next;
  52.         }
  53.     }
  54.     for (; n < (out_sgs + in_sgs); n++) { /*对于发送方向,没有in_sgs*/
  55.         for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
  56.             vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
  57.             vq->vring.desc[i].addr = sg_phys(sg);
  58.             vq->vring.desc[i].len = sg->length;
  59.             prev = i;
  60.             i = vq->vring.desc[i].next;
  61.         }
  62.     }
  63.     /* Last one doesn't continue. */
  64.     vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;

  65.     /* Update free pointer */
  66.     vq->free_head = i;

  67. add_head:
  68.     /* Set token.记录数组记录本次发送的skb */
  69.     vq->data[head] = data; /*head 为记录次skb的首个desc的下标,data为本skb的地址*/

  70.     /* Put entry in available array (but don't update avail->idx until they
  71.      * do sync). */
  72.      /*更新avail*/
  73.     avail = (vq->vring.avail->idx & (vq->vring.num-1));
  74.     vq->vring.avail->ring[avail] = head; /*将本次要发送的首个desc下标记录在avail->ring[vq->vring.avail->idx]*/

  75.     /* Descriptors and available array need to be set before we expose the
  76.      * new available array entries. */
  77.     virtio_wmb(vq->weak_barriers);
  78.     vq->vring.avail->idx++;
  79.     vq->num_added++;

  80.     /* This is very unlikely, but theoretically possible. Kick
  81.      * just in case. */
  82.      /*如果avail的数量太多,则kick后端收包,这种情况是你很难发生的*/
  83.     if (unlikely(vq->num_added == (1 << 16) - 1))
  84.         virtqueue_kick(_vq);

  85.     pr_debug("Added buffer head %i to %p\n", head, vq);
  86.     END_USE(vq);

  87.     return 0;
  88. }


   整个skb->sg->desc的转换相关数据结构关系图如下所示。



Guest->Host

  下面看guest发出数据包后,后端vhost如何接收。首先看一些准备工作:

vhost_net_open中会分别针对vhost_virtqueuevhost_netvhost_poll结构进行初始化。分别对应下图的绿色和橙色部分。

 

然后我们从qemu通过ioctl下发VHOST_SET_VRING_KICK命令说起。这个命令在vhost-net端由vhost_vring_ioctl处理。

点击(此处)折叠或打开

  1. long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
  2. {
  3.     struct file *eventfp, *filep = NULL;
  4.     bool pollstart = false, pollstop = false;
  5.     struct eventfd_ctx *ctx = NULL;
  6.     ......

  7.     switch (ioctl) {
  8.      ......
  9.     case VHOST_SET_VRING_KICK:
  10.         if (copy_from_user(&f, argp, sizeof f)) {
  11.             r = -EFAULT;
  12.             break;
  13.         }
  14.         eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
  15.         if (IS_ERR(eventfp)) {
  16.             r = PTR_ERR(eventfp);
  17.             break;
  18.         }
  19.         if (eventfp != vq->kick) {
  20.             pollstop = (filep = vq->kick) != NULL;
  21.             pollstart = (vq->kick = eventfp) != NULL; /*设置vq->kick fd,且设置pollstart为true*/
  22.         } else
  23.             filep = eventfp;
  24.         break;
  25.     case VHOST_SET_VRING_CALL:
  26.         ......
  27.         break;
  28.     default:
  29.         r = -ENOIOCTLCMD;
  30.     }

  31.     if (pollstop && vq->handle_kick)
  32.         vhost_poll_stop(&vq->poll);

  33.     if (ctx)
  34.         eventfd_ctx_put(ctx);
  35.     if (filep)
  36.         fput(filep);

  37.     if (pollstart && vq->handle_kick) /*如果设置了kickfd*/
  38.         r = vhost_poll_start(&vq->poll, vq->kick);

  39.     mutex_unlock(&vq->mutex);

  40.     if (pollstop && vq->handle_kick)
  41.         vhost_poll_flush(&vq->poll);
  42.     return r;
  43. }

  可以看到对VHOST_SET_VRING_KICK的处理,除了设置了vq->kick,同时还会调用vhost_poll_start函数。

vhost_poll_start

注意:这里调用vhost_poll_start的参数分别是vq->poll,和vq->kick,也就是eventfd对应的struct file结构。

点击(此处)折叠或打开

  1. int vhost_poll_start(struct vhost_poll *poll, struct file *file)
  2. {
  3.     unsigned long mask;
  4.     int ret = 0;

  5.     if (poll->wqh)
  6.         return 0;
  7. /*这里的poll函数也就是vq->kick对应的poll*/
  8.     mask = file->f_op->poll(file, &poll->table);
  9.     if (mask)
  10.         vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
  11.     if (mask & POLLERR) {
  12.         if (poll->wqh)
  13.             remove_wait_queue(poll->wqh, &poll->wait);
  14.         ret = -EINVAL;
  15.     }

  16.     return ret;
  17. }

  所以其中调用file->f_op->poll,其实就是eventfdfile结构的poll函数,由于eventfdfile_operationseventfd_fops

点击(此处)折叠或打开

  1. static const struct file_operations eventfd_fops = {
  2. ……
  3.     .poll        = eventfd_poll,
  4. ……
  5. };

  所以这里的poll函数为eventfd_poll

eventfd_poll

这里的参数file即为eventfd对应的file结构。

点击(此处)折叠或打开

  1. static unsigned int eventfd_poll(struct file *file, poll_table *wait)
  2. {
  3.     struct eventfd_ctx *ctx = file->private_data;
  4.     unsigned int events = 0;
  5.     unsigned long flags;
  6. /*这里的wait即vhost_virtqueue.poll.table*/
  7.     poll_wait(file, &ctx->wqh, wait);

  8.     spin_lock_irqsave(&ctx->wqh.lock, flags);
  9.     if (ctx->count > 0)
  10.         events |= POLLIN;
  11.     if (ctx->count == ULLONG_MAX)
  12.         events |= POLLERR;
  13.     if (ULLONG_MAX - 1 > ctx->count)
  14.         events |= POLLOUT;
  15.     spin_unlock_irqrestore(&ctx->wqh.lock, flags);

  16.     return events;
  17. }

poll_wait

点击(此处)折叠或打开

  1. static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
  2. {
  3.  /*由于vhost_virtqueue.poll.table._qproc被初始化为vhost_poll_func,
  4.      *所以这里实际是调用vhost_poll_func
  5.      */
  6.     if (p && p->_qproc && wait_address)
  7.         p->_qproc(filp, wait_address, p);
  8. }

vhost_poll_func

点击(此处)折叠或打开

  1. static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  2.              poll_table *pt)
  3. {
  4.     struct vhost_poll *poll;

  5.     poll = container_of(pt, struct vhost_poll, table);
  6.     poll->wqh = wqh;
  7.     add_wait_queue(wqh, &poll->wait);
  8. }

  这里就是vhost_virtqueue.poll.wait添加到了eventfd file的私有字段eventfd_ctx中的等待队列中

  以上就是vhost发包的基础。下面看guest发包后的情况。Guest发完包会最终会导致内核kvm模块write eventfd(至于具体过程我们后面单独分析,这里不再展开)。我们看下evenfdwrite函数的实现,也就是eventfd_write

eventfd_write

点击(此处)折叠或打开

  1. static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
  2.              loff_t *ppos)
  3. {
  4.     struct eventfd_ctx *ctx = file->private_data;
  5.     ssize_t res;
  6.     ......
  7.     if (likely(res > 0)) {
  8.         ctx->count += ucnt;
  9.         if (waitqueue_active(&ctx->wqh))/*如果ctx的等待队列不空*/
  10.             wake_up_locked_poll(&ctx->wqh, POLLIN); /*调用等待队列每个节点的func函数*/
  11.     }
  12.     spin_unlock_irq(&ctx->wqh.lock);

  13.     return res;
  14. }

kick eventfd进行write会导致调用其eventfd_ctx等待队列的每个节点注册的func函数。之前分析过vhost_poll_start会最终将vhost_virtqueue.poll.wait添加到eventfd file的私有字段eventfd_ctx中的等待队列中。而vhost_virtqueue.poll.waitfunc被初始化为vhost_poll_wakeup。所以这里就会调用vhost_poll_wakeup

vhost_poll_wakeup

点击(此处)折叠或打开

  1. static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  2.              void *key)
  3. {
  4.     struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);

  5.     if (!((unsigned long)key & poll->mask))
  6.         return 0;

  7.     vhost_poll_queue(poll);
  8.     return 0;
  9. }

其中又会调用vhost_poll_queue

vhost_poll_queue

点击(此处)折叠或打开

  1. void vhost_poll_queue(struct vhost_poll *poll)
  2. {
  3.     vhost_work_queue(poll->dev, &poll->work);
  4. }

其中又是调用vhost_work_queue

vhost_work_queue

点击(此处)折叠或打开

  1. void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
  2. {
  3.     unsigned long flags;

  4.     spin_lock_irqsave(&dev->work_lock, flags);
  5.     if (list_empty(&work->node)) {
  6.         /*将 vhost_poll->work添加到vhost_dev->work_list*/
  7.         list_add_tail(&work->node, &dev->work_list);
  8.         work->queue_seq++;
  9. /*唤醒vhost线程*/
  10.         wake_up_process(dev->worker);
  11.     }
  12.     spin_unlock_irqrestore(&dev->work_lock, flags);
  13. }

    vhost_work_queue在将vhost_poll->work添加到vhost_dev->work_list后,又会唤醒vhost线程,而vhost线程的处理函数为vhost_worker

vhost_worker

点击(此处)折叠或打开

  1. static int vhost_worker(void *data)
  2. {
  3.     struct vhost_dev *dev = data;
  4.     ......
  5.     for (;;) {
  6.          ......
  7.         /*判断dev->work_list是否为NULL*/
  8.         if (!list_empty(&dev->work_list)) {
  9.             work = list_first_entry(&dev->work_list,
  10.                         struct vhost_work, node);
  11.             list_del_init(&work->node);
  12.             seq = work->queue_seq;
  13.         } else
  14.             work = NULL;
  15.         spin_unlock_irq(&dev->work_lock);

  16.         if (work) {
  17.             __set_current_state(TASK_RUNNING);
  18.             work->fn(work);/*调用dev->work_list上的vhost_work的函数*/
  19.             if (need_resched())
  20.                 schedule();
  21.         } else
  22.             schedule();

  23.     }
  24.     ......
  25.     return 0;
  26. }

vhost_worker是个死循环,一直在检查dev->work_list是否为空,如果不空,就去下其上的vhost_work结构,并调用其对应的注册函数fn。而之前分析过vhost_work_queue会将vhost_poll->work添加到vhost_dev->work_list中。而这里的vhost_poll->work的注册函数为handle_tx_kick

handle_tx_kick

点击(此处)折叠或打开

  1. static void handle_tx_kick(struct vhost_work *work)
  2. {
  3.     struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  4.                          poll.work);
  5.     struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

  6.     handle_tx(net);
  7. }

其中主要是调用handle_tx

handle_tx

点击(此处)折叠或打开

  1. static void handle_tx(struct vhost_net *net)
  2. {
  3.     struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
  4.     struct vhost_virtqueue *vq = &nvq->vq;
  5.     unsigned out, in, s;
  6.     int head;
  7.     struct msghdr msg = {
  8.         .msg_name = NULL,
  9.         .msg_namelen = 0,
  10.         .msg_control = NULL,
  11.         .msg_controllen = 0,
  12.         .msg_iov = vq->iov,
  13.         .msg_flags = MSG_DONTWAIT,
  14.     };
  15.     size_t len, total_len = 0;
  16.     int err;
  17.     size_t hdr_size;
  18.     struct socket *sock;
  19.     struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
  20.     bool zcopy, zcopy_used;

  21.     /* TODO: check that we are running from vhost_worker? */
  22.     /*对应tap设备关联的sock结构*/
  23.     sock = rcu_dereference_check(vq->private_data, 1);
  24.     if (!sock)
  25.         return;

  26.     mutex_lock(&vq->mutex);
  27.     /*disable virtqueue的notify通知,通过VRING_USED_F_NO_NOTIFY标志位*/
  28.     vhost_disable_notify(&net->dev, vq);

  29.     hdr_size = nvq->vhost_hlen;
  30.     zcopy = nvq->ubufs;

  31.     for (;;) {
  32.         /* Release DMAs done buffers first */
  33.         if (zcopy)
  34.             vhost_zerocopy_signal_used(net, vq);
  35. /*根据vq->avail_idx将desc转换为iovec*/
  36.         head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
  37.                      ARRAY_SIZE(vq->iov),
  38.                     &out, &in,
  39.                      NULL, NULL);
  40.         /* On error, stop handling until the next kick. */
  41.         if (unlikely(head < 0))
  42.             break;
  43.         /* Nothing new? Wait for eventfd to tell us they refilled. */
  44.         if (head == vq->num) {
  45.             int num_pends;

  46.             /* If more outstanding DMAs, queue the work.
  47.              * Handle upend_idx wrap around
  48.              */
  49.             num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
  50.                  (nvq->upend_idx - nvq->done_idx) :
  51.                  (nvq->upend_idx + UIO_MAXIOV -
  52.                  nvq->done_idx);
  53.             if (unlikely(num_pends > VHOST_MAX_PEND))
  54.                 break;
  55.             if (unlikely(vhost_enable_notify(&net->dev, vq))) {
  56.                 vhost_disable_notify(&net->dev, vq);
  57.                 continue;
  58.             }
  59.             break;
  60.         }
  61.         if (in) {
  62.             vq_err(vq, "Unexpected descriptor format for TX: "
  63.             "out %d, int %d\n", out, in);
  64.             break;
  65.         }
  66.         /* Skip header. TODO: support TSO. */
  67.         s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
  68.         msg.msg_iovlen = out;
  69.         len = iov_length(vq->iov, out);
  70.         /* Sanity check */
  71.         if (!len) {
  72.             vq_err(vq, "Unexpected header len for TX: "
  73.             "%zd expected %zd\n",
  74.              iov_length(nvq->hdr, s), hdr_size);
  75.             break;
  76.         }
  77.         zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
  78.                  nvq->upend_idx != nvq->done_idx);

  79.         /* use msg_control to pass vhost zerocopy ubuf info to skb */
  80.         if (zcopy_used) {
  81.             vq->heads[nvq->upend_idx].id = head;
  82.             if (!vhost_net_tx_select_zcopy(net) ||
  83.              len < VHOST_GOODCOPY_LEN) {
  84.                 /* copy don't need to wait for DMA done */
  85.                 vq->heads[nvq->upend_idx].len =
  86.                             VHOST_DMA_DONE_LEN;
  87.                 msg.msg_control = NULL;
  88.                 msg.msg_controllen = 0;
  89.                 ubufs = NULL;
  90.             } else {
  91.                 struct ubuf_info *ubuf;
  92.                 ubuf = nvq->ubuf_info + nvq->upend_idx;

  93.                 vq->heads[nvq->upend_idx].len =
  94.                     VHOST_DMA_IN_PROGRESS;
  95.                 ubuf->callback = vhost_zerocopy_callback;
  96.                 ubuf->ctx = nvq->ubufs;
  97.                 ubuf->desc = nvq->upend_idx;
  98.                 msg.msg_control = ubuf;
  99.                 msg.msg_controllen = sizeof(ubuf);
  100.                 ubufs = nvq->ubufs;
  101.                 kref_get(&ubufs->kref);
  102.             }
  103.             nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
  104.         } else
  105.             msg.msg_control = NULL;
  106.         /* TODO: Check specific error and bomb out unless ENOBUFS? */
  107.         /*调用tap设备sock关联的发送函数,即tun_sendmsg*/
  108.         err = sock->ops->sendmsg(NULL, sock, &msg, len);
  109.         if (unlikely(err < 0)) {
  110.             if (zcopy_used) {
  111.                 if (ubufs)
  112.                     vhost_net_ubuf_put(ubufs);
  113.                 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
  114.                     % UIO_MAXIOV;
  115.             }
  116.             vhost_discard_vq_desc(vq, 1);
  117.             break;
  118.         }
  119.         if (err != len)
  120.             pr_debug("Truncated TX packet: "
  121.                 " len %d != %zd\n", err, len);
  122.         /*更新used*/
  123.         if (!zcopy_used)
  124.             vhost_add_used_and_signal(&net->dev, vq, head, 0);
  125.         else
  126.             vhost_zerocopy_signal_used(net, vq);
  127.         total_len += len;
  128.         vhost_net_tx_packet(net);
  129.  /* 超出了quota,重新入队列等待调度 */
  130.         if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  131.             vhost_poll_queue(&vq->poll);
  132.             break;
  133.         }
  134.     }

  135.     mutex_unlock(&vq->mutex);
  136. }

  其中调用了tap设备的sock结构上的发送函数,即tun_sendmsg

tun_sendmsg

点击(此处)折叠或打开

  1. static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
  2.          struct msghdr *m, size_t total_len)
  3. {
  4.     int ret;
  5.     struct tun_file *tfile = container_of(sock, struct tun_file, socket);
  6.     struct tun_struct *tun = __tun_get(tfile);

  7.     if (!tun)
  8.         return -EBADFD;
  9.     ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
  10.              m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
  11.     tun_put(tun);
  12.     return ret;
  13. }

tun_get_user

tun_get_user内部主要调用tun_alloc_skb分配skb,然后将数据包从virtio ringcopyskb中。最后调用netif_rx_ni进入协议栈。

这个vhost发送相关数据结构和调用路径如下图红色线条所示,整个发送过程一部分处于进程上下文(vhost内核线程),一部分处于软中断中。


阅读(5123) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~