xxx
分类:
2009-12-13 00:39:40
DECLARE_PER_CPU(struct softnet_data,softnet_data); 又
宏DECLARE_PER_CPU在/include/linux/percpu.h中
#ifdef CONFIG_SMP
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
这个宏在使用smp多cpu的时候有效,这里有一个名词per_cpu_##name变量,使用smp架构时创建一个per_cpu_##name变量后,smp系统中每个处理器都会获得该变量的副本。它的优点就是对per-CPU变量的访问不需要加锁,因为每个处理器都使用自己的副本.softnet_data,在系统初始化时进行了相关的初始化操作,初始化在\linux\net\core\dev.c的net_dev_init函数中,函数如下;
static int __init net_dev_init(void)
{
int i, rc = -ENOMEM;
BUG_ON(!dev_boot_phase);
if (dev_proc_init())
goto out;
if (netdev_sysfs_init())
goto out;
INIT_LIST_HEAD(&ptype_all);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&ptype_base[i]);
for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
INIT_HLIST_HEAD(&dev_name_head[i]);
for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
INIT_HLIST_HEAD(&dev_index_head[i]);
/*
* Initialise the packet receive queues.
*/
for_each_possible_cpu(i) {
struct softnet_data *queue;
queue = &per_cpu(softnet_data, i);
skb_queue_head_init(&queue->input_pkt_queue);
queue->completion_queue = NULL;
INIT_LIST_HEAD(&queue->poll_list);
set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
queue->backlog_dev.weight = weight_p;
queue->backlog_dev.poll = process_backlog;
atomic_set(&queue->backlog_dev.refcnt, 1);
}
netdev_dma_register();
dev_boot_phase = 0;
open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
hotcpu_notifier(dev_cpu_callback, 0);
dst_init();
dev_mcast_init();
rc = 0;
out:
return rc;
}
此处的queue->input_pkt_queue,当有数据到来时,分配一个skb来接收数据并将接收到的数据
挂接在queue->input_pkt_queue队列中.在中断轮询的时候,软中断总函数do_softirq()直接到达网卡的接收软中断函数net_rx_action(),在此函数中调用queue->backlog_dev.poll=process_backlog;
即process_backlog()函数,它将queue->input_pkt_queue队列中的数据向上层协议传输,比如网络层的ip协议等。
struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
* to the per-cpu poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
unsigned long state;
int weight;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
spinlock_t poll_lock;
int poll_owner;
struct net_device *dev;
struct list_head dev_list;
#endif
};
我们在net_rx()>netif_rx()>napi_schedule(&queue->backlog)>__napi_schedule()的调用顺序;在__napi_schedule(struct napi_struct *n)函数中将看到这样一句:
list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);此处的n就是结构napi_struct,并且napi_struct 结构中的(*poll)(struct napi_struct*,int)函数指针在刚才的net_dev_init()函数中有queue->backlog_dev.poll=process_backlog;即初始化为process_backlog函数了。在此函数中最终会调用netif_receive_skb()此函数中最终会查找ptype_base[]数组,此数组解析查看http://blog.chinaunix.net/u3/105305/showart_2119511.html。在此数组中存储了对应了各网络层协议对应的信息,比如ip协议对应的ip_rcv()函数。
以上内容参考Blog:http://blog.chinaunix.net/u2/64681/showart.php?id=1432417