插入队列中将数据包丢弃释放掉了,摘取队列的操作却什么也没有得到。显然这个队列规则只是临时使用,那么什么时候正式登记队列规则呢?我们知道要想启用网卡,让网卡进入运行状态除了通过ifconfig配置ip外还要ifconfig eth0 up启动网卡才行。Ifconfig会调用socket的ioctl,进入下面的流程
ioctl()->sock_ioctl()->dev_ioctl()->SIOCSIFFLAGS-> dev_ifsioc()->dev_change_flags()->IFF_UP->dev_open()
|
我们看到其最后进入了内核中的dev_open()
int dev_open(struct net_device *dev) { 。。。。。。 */ dev_activate(dev);/*****wumingxiaozu*******/
。。。。。。 }
|
代码中调用了dev_activate()重新对网卡的队列规则qdisc进行设置
ioctl()->sock_ioctl()->dev_ioctl()->SIOCSIFFLAGS-> dev_ifsioc()->dev_change_flags()->IFF_UP->dev_open()-->dev_activate()
void dev_activate(struct net_device *dev) { /* No queueing discipline is attached to device; create default one i.e. pfifo_fast for devices, which need queueing and noqueue_qdisc for virtual interfaces */
if (dev->qdisc_sleeping == &noop_qdisc) { struct Qdisc *qdisc; if (dev->tx_queue_len) { qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops, TC_H_ROOT); if (qdisc == NULL) { printk(KERN_INFO "%s: activation failed\n", dev->name); return; } list_add_tail(&qdisc->list, &dev->qdisc_list); } else { qdisc = &noqueue_qdisc; } dev->qdisc_sleeping = qdisc; }
if (!netif_carrier_ok(dev)) /* Delay activation until next carrier-on event */ return;
spin_lock_bh(&dev->queue_lock); rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); if (dev->qdisc != &noqueue_qdisc) { dev->trans_start = jiffies; dev_watchdog_up(dev); } spin_unlock_bh(&dev->queue_lock); }
|
我是无名小卒,转载请注明出处http://qinjiana0786.cublog.cn
上面代码中使用qdisc_create_dflt()函数重新为网卡创建了一个队列规则qdisc。之后赋值给网卡的结构dev中。这样就正式建立了cs8900和dm9000网卡的队列规则。我们注意这个规则保存到了qdisc_sleeping中,qdisc_sleeping是为了网卡处于激活状态时使用的规则,而网卡处于关闭或者未插入网线时,会使用noqueue_qdisc规则,我们看到他发送数据包时的插入队列操作时就是丢掉释放掉数据包。我们来看分配规则的过程
ioctl()->sock_ioctl()->dev_ioctl()->SIOCSIFFLAGS-> dev_ifsioc()->dev_change_flags()->IFF_UP->dev_open()-->dev_activate()-->qdisc_create_dflt()
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, unsigned int parentid) { struct Qdisc *sch;
sch = qdisc_alloc(dev, ops); if (IS_ERR(sch)) goto errout; sch->stats_lock = &dev->queue_lock; sch->parent = parentid;
if (!ops->init || ops->init(sch, NULL) == 0) return sch;
qdisc_destroy(sch); errout: return NULL; }
|
在dev_activate()调用这个函数的时候,传递给这里的struct Qdisc_ops 结构参数指针是&pfifo_fast_ops,struct Qdisc_ops结构是用于队列操作的结构体
struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[IFNAMSIZ]; int priv_size;
int (*enqueue)(struct sk_buff *, struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *); int (*requeue)(struct sk_buff *, struct Qdisc *); unsigned int (*drop)(struct Qdisc *);
int (*init)(struct Qdisc *, struct nlattr *arg); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *arg);
int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
struct module *owner; };
|
里面大多是函数指针,我们先不解释了。我们看一下传递下来的pfifo_fast_ops结构变量
static struct Qdisc_ops pfifo_fast_ops __read_mostly = { .id = "pfifo_fast", .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), .enqueue = pfifo_fast_enqueue, .dequeue = pfifo_fast_dequeue, .requeue = pfifo_fast_requeue, .init = pfifo_fast_init, .reset = pfifo_fast_reset, .dump = pfifo_fast_dump, .owner = THIS_MODULE, };
|
这是一个先进先出的队列操作结构变量。我们在qdisc_create_dflt()函数中看到调用了qdisc_alloc()函数
ioctl()->sock_ioctl()->dev_ioctl()->SIOCSIFFLAGS-> dev_ifsioc()->dev_change_flags()->IFF_UP->dev_open()-->dev_activate()-->qdisc_create_dflt()-->qdisc_alloc()
struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) { void *p; struct Qdisc *sch; unsigned int size; int err = -ENOBUFS;/*****wumingxiaozu*******/
/* ensure that the Qdisc and the private data are 32-byte aligned */ size = QDISC_ALIGN(sizeof(*sch)); size += ops->priv_size + (QDISC_ALIGNTO - 1);
p = kzalloc(size, GFP_KERNEL); if (!p) goto errout; sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); sch->padded = (char *) sch - (char *) p;
INIT_LIST_HEAD(&sch->list); skb_queue_head_init(&sch->q); sch->ops = ops; sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev = dev; dev_hold(dev); atomic_set(&sch->refcnt, 1);
return sch; errout: return ERR_PTR(err); }
|
很显然我们的队列规则中的插入和摘下的函数指针被赋值为上面的先进先出的队列操作函数指针pfifo_fast_enqueue和pfifo_fast_dequeue。函数的其余部分很简单我们不看了,然后在qdisc_create_dflt()函数中调用了初始化3个队列的操作函数
ioctl()->sock_ioctl()->dev_ioctl()->SIOCSIFFLAGS-> dev_ifsioc()->dev_change_flags()->IFF_UP->dev_open()-->dev_activate()-->qdisc_create_dflt()-->pfifo_fast_init()
static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) { int prio; struct sk_buff_head *list = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) skb_queue_head_init(list + prio);
return 0; }
|
所以网卡激活后,使用的Qdisc的规则是上面我们看到的先进先出队列操作结构中指定的。我们可能听说过Qos接口编程,是通过调用dev_graft_qdisc()创建自己定义的队列规则和队列操作函数替换掉网卡设备中的规则和操作函数。这个函数我们不看了。这里我们直接返回到dev_queue_xmit()函数中,继续往下分析我们前面讲到了这里发送数据包就要执行队列中的插入队列的函数指针,所以rc = q->enqueue(skb, q);这句代码结合上面的先进先出的赋值的函数指针是pfifo_fast_enqueue()
sys_socketcall()-->sys_connect()-->inet_stream_connect()-->tcp_v4_connect()-->tcp_connect()-->tcp_transmit_skb()-->ip_queue_xmit()-->ip_local_out()-->dst_output()-->ip_output()-->ip_finish_output()-->ip_finish_output2()-->neigh_resolve_output()-->dev_queue_xmit()-->pfifo_fast_enqueue()
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { struct sk_buff_head *list = prio2list(skb, qdisc);
if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { qdisc->q.qlen++; return __qdisc_enqueue_tail(skb, qdisc, list); }
return qdisc_drop(skb, qdisc); }
|
这个函数根据数据包的优先级将数据包插入到队列的尾部,这个函数我们就不细看了。我们继续看dev_queue_xmit()函数,我们看到
sys_socketcall()-->sys_connect()-->inet_stream_connect()-->tcp_v4_connect()-->tcp_connect()-->tcp_transmit_skb()-->ip_queue_xmit()-->ip_local_out()-->dst_output()-->ip_output()-->ip_finish_output()-->ip_finish_output2()-->neigh_resolve_output()-->dev_queue_xmit()
if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */
if (dev->xmit_lock_owner != cpu) {
HARD_TX_LOCK(dev, cpu);
if (!netif_queue_stopped(dev) && !netif_subqueue_stopped(dev, skb)) { rc = 0; if (!dev_hard_start_xmit(skb, dev)) { HARD_TX_UNLOCK(dev); goto out; } } HARD_TX_UNLOCK(dev); if (net_ratelimit()) printk(KERN_CRIT "Virtual device %s asks to " "queue packet!\n", dev->name); } else { /* Recursion is detected! It is possible, * unfortunately */ if (net_ratelimit())/*****wumingxiaozu*******/ printk(KERN_CRIT "Dead loop on virtual device " "%s, fix it urgently!\n", dev->name); } }
|
如果网卡设备已经处于工作的打开状态,如果其标志位表明可以发送数据的话就要进入dev_hard_start_xmit()函数开始发送
sys_socketcall()-->sys_connect()-->inet_stream_connect()-->tcp_v4_connect()-->tcp_connect()-->tcp_transmit_skb()-->ip_queue_xmit()-->ip_local_out()-->dst_output()-->ip_output()-->ip_finish_output()-->ip_finish_output2()-->neigh_resolve_output()-->dev_queue_xmit()-->dev_hard_start_xmit()
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { if (likely(!skb->next)) { if (!list_empty(&ptype_all)) dev_queue_xmit_nit(skb, dev);
if (netif_needs_gso(dev, skb)) { if (unlikely(dev_gso_segment(skb))) goto out_kfree_skb; if (skb->next) goto gso; }
return dev->hard_start_xmit(skb, dev); }
gso: do { struct sk_buff *nskb = skb->next; int rc;
skb->next = nskb->next; nskb->next = NULL; rc = dev->hard_start_xmit(nskb, dev); if (unlikely(rc)) { nskb->next = skb->next; skb->next = nskb; return rc; } if (unlikely((netif_queue_stopped(dev) || netif_subqueue_stopped(dev, skb)) && skb->next)) return NETDEV_TX_BUSY; } while (skb->next);
skb->destructor = DEV_GSO_CB(skb)->destructor;
out_kfree_skb: kfree_skb(skb); return 0; }
|
同样我们只围绕重点发送来看,这里重要的地方就是调用网卡设备 dev->hard_start_xmit()开始发送队列中的数据包。拿我们上边的cs8900和dm9000来说他们在初始化函数中分别设置了各自的发送数据包函数,cs8900中的是通过dev->hard_start_xmit = net_send_packet;而dm9000中是通过ndev->hard_start_xmit = &dm9000_start_xmit;
我们看一下这二个驱动中的发送函数,首先是cs8900的发送函数
sys_socketcall()-->sys_connect()-->inet_stream_connect()-->tcp_v4_connect()-->tcp_connect()-->tcp_transmit_skb()-->ip_queue_xmit()-->ip_local_out()-->dst_output()-->ip_output()-->ip_finish_output()-->ip_finish_output2()-->neigh_resolve_output()-->dev_queue_xmit()-->dev_hard_start_xmit()--> net_send_packet()
static int net_send_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev);
if (net_debug > 3) { printk("%s: sent %d byte packet of type %x\n", dev->name, skb->len, (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); }
/* keep the upload from being interrupted, since we ask the chip to start transmitting before the whole packet has been completely uploaded. */
spin_lock_irq(&lp->lock); netif_stop_queue(dev);
/* initiate a transmit sequence */ writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd); writeword(dev->base_addr, TX_LEN_PORT, skb->len);
/* Test to see if the chip has allocated memory for the packet */ if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) { /* * Gasp! It hasn't. But that shouldn't happen since * we're waiting for TxOk, so return 1 and requeue this packet. */
spin_unlock_irq(&lp->lock); if (net_debug) printk("cs89x0: Tx buffer not free!\n"); return 1; } /* Write the contents of the packet */ writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); spin_unlock_irq(&lp->lock); lp->stats.tx_bytes += skb->len; dev->trans_start = jiffies; dev_kfree_skb (skb);
/* * We DO NOT call netif_wake_queue() here. * We also DO NOT call netif_start_queue(). * * Either of these would cause another bottom half run through * net_send_packet() before this packet has fully gone out. That causes * us to hit the "Gasp!" above and the send is rescheduled. it runs like * a dog. We just return and wait for the Tx completion interrupt handler * to restart the netdevice layer */
return 0; }
|
我们看到上面的代码除了对硬件的相关操作以外最重要的是writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);将数据交由网卡负责发送出去。发送后要调用dev_kfree_skb()释放掉发送的数据包。同样我们看一下dm9000的函数
sys_socketcall()-->sys_connect()-->inet_stream_connect()-->tcp_v4_connect()-->tcp_connect()-->tcp_transmit_skb()-->ip_queue_xmit()-->ip_local_out()-->dst_output()-->ip_output()-->ip_finish_output()-->ip_finish_output2()-->neigh_resolve_output()-->dev_queue_xmit()-->dev_hard_start_xmit()-->dm9000_start_xmit()
static int dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; board_info_t *db = (board_info_t *) dev->priv;
dm9000_dbg(db, 3, "%s:\n", __func__);
if (db->tx_pkt_cnt > 1) return 1;
spin_lock_irqsave(&db->lock, flags);
/* Move data to DM9000 TX RAM */ writeb(DM9000_MWCMD, db->io_addr);
(db->outblk)(db->io_data, skb->data, skb->len); dev->stats.tx_bytes += skb->len;
db->tx_pkt_cnt++; /* TX control: First packet immediately send, second packet queue */ if (db->tx_pkt_cnt == 1) { /* Set TX length to DM9000 */ iow(db, DM9000_TXPLL, skb->len); iow(db, DM9000_TXPLH, skb->len >> 8);
/* Issue TX polling command */ iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
dev->trans_start = jiffies; /* save the time stamp */ } else { /* Second packet */ db->queue_pkt_len = skb->len;/*****wumingxiaozu*******/ netif_stop_queue(dev); }
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */ dev_kfree_skb(skb);
return 0; }
|
二个网卡的发送过程不同,但是无非都是对发送数据包交由网卡来完成,在上面的dm9000发送函数中,我们看到了对其寄存器和所带的ram的操作,具体过程因电路的不同而不同这也就是有驱动移植的由来,我们看到上面最重要的发送语句是(db->outblk)(db->io_data, skb->data, skb->len);这里调用了其驱动中设置的函数。
sys_socketcall()-->sys_connect()-->inet_stream_connect()-->tcp_v4_connect()-->tcp_connect()-->tcp_transmit_skb()-->ip_queue_xmit()-->ip_local_out()-->dst_output()-->ip_output()-->ip_finish_output()-->ip_finish_output2()-->neigh_resolve_output()-->dev_queue_xmit()-->dev_hard_start_xmit()-->dm9000_start_xmit()-->dm9000_outblk_32bit()
static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count) { writesl(reg, data, (count+3) >> 2); }
|
至于writewords和witesl最终都要调用__raw_writel()函数向指定硬件的io地址处写出数据。剩下的事就是DM9000硬件内部根据以太网协议执行数据的发送了。至此我们的连接请求数据包已经通过网卡设备向服务器端的网卡发送了出去。