int dev_queue_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct Qdisc *q;
int rc = -ENOMEM;
/* 处理GSO 数据包. */
if (netif_needs_gso(dev, skb))
goto gso;
/*如果设备不支持sg,并且数据是分片的;数据包分片中至少有一个数据在高端内存位置,并且设备不支持dma
* 作线性华处理
*/
if (skb_shinfo(skb)->frag_list &&
!(dev->features & NETIF_F_FRAGLIST) &&
__skb_linearize(skb))
goto out_kfree_skb;
if (skb_shinfo(skb)->nr_frags &&
(!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
__skb_linearize(skb))
goto out_kfree_skb;
/* 如果数据包为作校验和,并且设备对该协议不支持校验和计算,则在此处计算校验
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb, skb->csum_start -
skb_headroom(skb));
if (!(dev->features & NETIF_F_GEN_CSUM) &&
!((dev->features & NETIF_F_IP_CSUM) &&
skb->protocol == htons(ETH_P_IP)) &&
!((dev->features & NETIF_F_IPV6_CSUM) &&
skb->protocol == htons(ETH_P_IPV6)))
if (skb_checksum_help(skb))
goto out_kfree_skb;
}
gso:
spin_lock_prefetch(&dev->queue_lock);
.....
/* 设备支持队列,则数据包入队列操作:如保序计算等*/
if (q->enqueue) {
/* Grab device queue */
spin_lock(&dev->queue_lock);
q = dev->qdisc;
if (q->enqueue) {
/* reset queue_mapping to zero */
skb->queue_mapping = 0;
rc = q->enqueue(skb, q);
qdisc_run(dev);
spin_unlock(&dev->queue_lock);
rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
goto out;
}
spin_unlock(&dev->queue_lock);
}
/* 设备无队列*/
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
/* 防止数据包循环处理 */
if (dev->xmit_lock_owner != cpu) {
HARD_TX_LOCK(dev, cpu);
if (!netif_queue_stopped(dev) &&
!netif_subqueue_stopped(dev, skb->queue_mapping)) {
rc = 0;
/* 发送数据*/
if (!dev_hard_start_xmit(skb, dev)) {
HARD_TX_UNLOCK(dev);
goto out;
}
}
HARD_TX_UNLOCK(dev);
if (net_ratelimit())
printk(KERN_CRIT "Virtual device %s asks to "
"queue packet!\n", dev->name);
} else {
if (net_ratelimit())
printk(KERN_CRIT "Dead loop on virtual device "
"%s, fix it urgently!\n", dev->name);
}
}
....
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
if (likely(!skb->next)) {
..............
/* 调用设备的发送函数接口*/
return dev->hard_start_xmit(skb, dev);
}
out_kfree_skb:
kfree_skb(skb);
return 0;
}
/* 这里借用了gre设备来掩饰设备发送接口的*/
static void ipgre_tunnel_setup(struct net_device *dev)
{
SET_MODULE_OWNER(dev);
dev->uninit = ipgre_tunnel_uninit;
dev->destructor = free_netdev;
dev->hard_start_xmit = ipgre_tunnel_xmit;
dev->get_stats = ipgre_tunnel_get_stats;
dev->do_ioctl = ipgre_tunnel_ioctl;
dev->change_mtu = ipgre_tunnel_change_mtu;
dev->type = ARPHRD_IPGRE;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
dev->flags = IFF_NOARP;
dev->iflink = 0;
dev->addr_len = 4;
}
在ipgre_tunnel_xmit的实现代码中 :有段代码如下 ,说明网卡先复制该skb,然后释放该skb
所以,释放 skb的动作是由网卡驱动来做的 .也就是说如果我们自己构造一个数据包调用xmit后.该包的释放不用我们自己来完成
if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
stats->tx_dropped++;
dev_kfree_skb(skb);
tunnel->recursion--;
return 0;
}
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
dev_kfree_skb(skb);
skb = new_skb;
}
阅读(9760) | 评论(0) | 转发(1) |