linux kernel 工程师
全部博文(99)
分类: LINUX
2014-02-11 12:00:56
netif_rx相比NAPI是旧的接口, 在中断中进行调用。将skb放入softnet_data的input_queue中。
中断一次只收一个包。
/**
* netif_rx - post buffer to the network code
* @skb: buffer to post
*
* This function receives a packet from a device driver and queues it for
* the upper (protocol) levels to process. It always succeeds. The buffer
* may be dropped during processing for congestion control or by the
* protocol layers.
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped)
*
*/
int netif_rx(struct sk_buff *skb)
{
int ret;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
return NET_RX_DROP;
trace_lttng_net_dev_receive(skb);
net_timestamp_check(netdev_tstamp_prequeue, skb);
trace_netif_rx(skb);
#ifdef CONFIG_RPS
if (static_key_false(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
preempt_disable();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu < 0)
cpu = smp_processor_id();
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
preempt_enable();
} else
#endif
{
unsigned int qtail;
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);//插入per_cpu(softnet_data, cpu)->input_pkt_queue
put_cpu();
}
return ret;
}
/*
* enqueue_to_backlog is called to queue an skb to a per CPU backlog
* queue (may be a remote CPU queue).
*/
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
unsigned int *qtail)
{
struct softnet_data *sd;
unsigned long flags;
sd = &per_cpu(softnet_data, cpu);
local_irq_save(flags);
rps_lock(sd);
if (unlikely(skb->dev->rx_hook != NULL)) {
int ret;
ret = skb->dev->rx_hook(skb);
if (ret == NET_RX_DROP)
goto drop;
}
if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { //默认值1000,/proc/sys/net/core/netdev_budget中设置
if (skb_queue_len(&sd->input_pkt_queue)) { //如果队列不空,直接插入队列尾部,不需要调度softirq
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb); //skb放入input_pkt_queue的tail
input_queue_tail_incr_save(sd, qtail);
rps_unlock(sd);
local_irq_restore(flags);
return NET_RX_SUCCESS;
}
if (unlikely(skb->dev->rx_hook != NULL)) {
int ret;
ret = skb->dev->rx_hook(skb);
if (ret == NET_RX_DROP)
goto drop;
}
/* Schedule NAPI for backlog device
* We can use non atomic operation since we own the queue lock
*/
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
if (!rps_ipi_queued(sd))
____napi_schedule(sd, &sd->backlog); // 调度backlog,触发软中断。 backlog是一个伪装的NAPI目的是让NAPI 和none_NAPI具备同样的接口。
}
goto enqueue; // 调度backlog之后,skb进入input_pkt_queue的tail
}
drop:
sd->dropped++;
rps_unlock(sd);
local_irq_restore(flags);
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}