linux kernel 工程师
全部博文(99)
分类: LINUX
2014-02-10 10:40:22
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
* to the per-cpu poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list; /* 用来链入softnet_data的poll_list */
unsigned long state;
int weight;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
spinlock_t poll_lock;
int poll_owner;
#endif
unsigned int gro_count;
struct net_device *dev;
struct list_head dev_list; /* 用来将NAPI 挂在net_device的napi_list */
struct sk_buff *gro_list;
struct sk_buff *skb;
};
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
INIT_LIST_HEAD(&napi->poll_list); /* napi->poll_list 此时初始化为 prev next都指向自己, 等调用napi_schedule时,链入softnet_data的poll_list */
napi->gro_count = 0;
napi->gro_list = NULL;
napi->skb = NULL
napi->poll = poll;
napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list); /* napi->dev_list 挂在 dev->napi_list里面 */
napi->dev = dev;
#ifdef CONFIG_NETPOLL
spin_lock_init(&napi->poll_lock);
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
}