分类: LINUX
2011-11-04 18:21:22
Cpu(s): 0.0% user, 0.5% system, 0.0% nice, 50.3% idle
Cpu0 : 1.0% user, 0.0% system, 0.0% nice, 1.0% idle
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 100.0% idle
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 50.8% idle
Cpu0 : 0.0% user, 0.0% system, 0.0% nice, 1.0% idle
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 100.0% idle
Cpu(s): 0.5% user, 0.0% system, 0.0% nice, 50.8% idle
Cpu0 : 0.0% user, 1.0% system, 0.0% nice, 2.0% idle
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 100.0% idle
[root@SkyNet ~]# cat /proc/interrupts
CPU0 CPU1
74: 154789 1 PCI-MSI eth1
82: 16393 2102221 PCI-MSI eth2
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 38.5% idle
Cpu0 : 1.0% user, 1.0% system, 0.0% nice, 2.0% idle
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 73.7% idle
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 37.2% idle
Cpu0 : 0.0% user, 0.0% system, 0.0% nice, 2.1% idle
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 72.4% idle
Cpu(s): 0.5% user, 0.5% system, 0.0% nice, 38.2% idle
Cpu0 : 0.0% user, 0.0% system, 0.0% nice, 3.0% idle
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 73.7% idle
“CPU1此时只分担到了发送数据帧的中断工作,网络内核栈的工作,从net_rx_action开始,包括网桥、Netfilter、队列调度等等工作,全部集中到了CPU0上,网络栈的工作,并没有实现负载均衡,换句话说,net_rx_action这个软中断,只在一个CPU上运行了,并没有实现多个CPU的同时运行和调度(通过后面的实验和ShadowStar同学 的指点,最后这一句的结论是错的,我最后会说明)”
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 78.6% idle, 0.0% x, 2.1% y
Cpu0 : 0.0% user, 0.0% system, 0.0% nice, 57.0% idle, 0.0% x, 5.4% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 100.0% idle, 0.0% x, 0.0% y
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 78.1% idle, 0.0% x, 2.7% y
Cpu0 : 0.0% user, 0.0% system, 0.0% nice, 55.3% idle, 0.0% x, 5.3% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 100.0% idle, 0.0% x, 0.0% y
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 80.1% idle, 0.0% x, 2.2% y
Cpu0 : 0.0% user, 0.0% system, 0.0% nice, 60.6% idle, 0.0% x, 4.3% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 100.0% idle, 0.0% x, 0.0% y
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 59.0% idle, 0.0% x, 0.5% y
Cpu0 : 0.0% user, 1.1% system, 0.0% nice, 98.9% idle, 0.0% x, 0.0% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 18.1% idle, 0.0% x, 1.1% y
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 59.6% idle, 0.0% x, 0.5% y
Cpu0 : 0.0% user, 0.0% system, 0.0% nice, 100.0% idle, 0.0% x, 0.0% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 18.1% idle, 0.0% x, 1.1% y
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 59.6% idle, 0.0% x, 0.5% y
Cpu0 : 0.0% user, 0.0% system, 0.0% nice, 100.0% idle, 0.0% x, 0.0% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 20.2% idle, 0.0% x, 1.1% y
static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *queue = &__get_cpu_var(softnet_data);
while (!list_empty(&queue->poll_list)) {
struct net_device *dev;
dev = list_entry(queue->poll_list.next,
struct net_device, poll_list);
netpoll_poll_lock(dev);
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
list_del(&dev->poll_list);
list_add_tail(&dev->poll_list, &queue->poll_list);
if (dev->quota < 0)
dev->quota += dev->weight;
else
dev->quota = dev->weight;
} else {
}
}
out:
local_irq_enable();
return;
softnet_break:
__get_cpu_var(netdev_rx_stat).time_squeeze++;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
static inline void netif_rx_schedule(struct net_device *dev)
{
if (netif_rx_schedule_prep(dev))
__netif_rx_schedule(dev);
}
static inline void __netif_rx_schedule(struct net_device *dev)
{
unsigned long flags;
local_irq_save(flags);
dev_hold(dev);
list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
if (dev->quota < 0)
dev->quota += dev->weight;
else
dev->quota = dev->weight;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
local_irq_restore(flags);
}
Cpu(s): 0.5% user, 0.5% system, 0.0% nice, 16.6% idle, 0.0% x, 1.6% y
Cpu0 : 1.1% user, 0.0% system, 0.0% nice, 11.6% idle, 0.0% x, 0.0% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 21.9% idle, 0.0% x, 2.1% y
Cpu(s): 0.0% user, 0.0% system, 0.0% nice, 16.8% idle, 0.0% x, 2.1% y
Cpu0 : 0.0% user, 1.1% system, 0.0% nice, 10.5% idle, 0.0% x, 2.1% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 22.1% idle, 0.0% x, 2.1% y
Cpu(s): 0.5% user, 0.5% system, 0.0% nice, 15.1% idle, 0.0% x, 2.1% y
Cpu0 : 1.0% user, 0.0% system, 0.0% nice, 11.5% idle, 0.0% x, 1.0% y
Cpu1 : 0.0% user, 0.0% system, 0.0% nice, 19.8% idle, 0.0% x, 3.1% y