0000001f 00000000 00000001 00000000 00000000 00000000 00000000 00000000
00000000
00000016 00000000 00000000 00000000 00000000 00000000 00000000
00000000 00000000
00000018 00000000 00000000 00000000 00000000 00000000
00000000 00000000 00000000
00001143 00000000 00000000 00000000 00000000
00000000 00000000 00000000 00000000
解释:
4行代表有4个cpu。
第一列为该CPU所接收到的所有数据包,有些内核输出的是每秒钟接受的数据包数,
第二列为该CPU缺省queue满的时候, 所删除的包的个数,有些内核输出的是每秒钟丢掉的数据包数,
(没有统计对于使用NAPI的adapter, 由于ring 满而导致删除的包,在网卡驱动中统计),
第三列表示time_squeeze, 就是说,一次的软中断的触发还不能处理完目前已经接收的数据,因而要设置下轮软中断,time_squeeze
就表示设置的次数.
最后一列,cpu冲突次数。
内核代码:
static int softnet_seq_show(struct seq_file *seq, void *v)
{
struct
netif_rx_stats *s = v;
seq_printf(seq, "%08x %08x %08x %08x %08x %08x
%08x %08x %08x\n",
s->total, s->dropped, s->time_squeeze, 0,
0,
0, 0, 0, /* was fastroute */
s->cpu_collision );
return 0;
}
include/linux/netdevice.h:DECLARE_PER_CPU(struct
netif_rx_stats,
netdev_rx_stat);
struct
netif_rx_stats
{
unsigned total;
unsigned dropped;
unsigned
time_squeeze;
unsigned cpu_collision;
};
int netif_rx(struct sk_buff *skb)
{
struct softnet_data
*queue;
unsigned long flags;
/* if netpoll wants it, pretend
we never saw it */
if (netpoll_rx(skb))
return
NET_RX_DROP;
if (!skb->tstamp.off_sec)
net_timestamp(skb);
/*
* The code is rearranged so that the
path is the most
* short when CPU is congested, but is still
operating.
*/
local_irq_save(flags);
queue =
&__get_cpu_var(softnet_data);
__get_cpu_var(netdev_rx_stat).total++;
if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
if
(queue->input_pkt_queue.qlen) {
enqueue:
dev_hold(skb->dev);
__skb_queue_tail(&queue->input_pkt_queue, skb);
local_irq_restore(flags);
return NET_RX_SUCCESS;
}
netif_rx_schedule(&queue->backlog_dev);
goto
enqueue;
}
__get_cpu_var(netdev_rx_stat).dropped++;
local_irq_restore(flags);
kfree_skb(skb);
return
NET_RX_DROP;
}