2014年(84)
分类: LINUX
2014-05-15 15:59:28
原文地址:内核通知链原理及机制 作者:linuxDOS
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 | /* * Notifier chain core routines. The exported routines below * are layered on top of these, with appropriate locking added. */ static int notifier_chain_register( struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if (n->priority > (*nl)->priority) break ; nl = &((*nl)->next); } n->next = *nl; rcu_assign_pointer(*nl, n); return 0; } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | /** * notifier_call_chain - Informs the registered notifiers about an event. * @nl: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: Number of notifier functions to be called. Don't care * value of this parameter is -1. * @nr_calls: Records the number of notifications sent. Don't care * value of this field is NULL. * @returns: notifier_call_chain returns the value returned by the * last notifier function called. */ static int __kprobes notifier_call_chain( struct notifier_block **nl, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; struct notifier_block *nb, *next_nb; nb = rcu_dereference_raw(*nl); while (nb && nr_to_call) { next_nb = rcu_dereference_raw(nb->next); #ifdef CONFIG_DEBUG_NOTIFIERS if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) { WARN(1, "Invalid notifier called!" ); nb = next_nb; continue ; } #endif ret = nb->notifier_call(nb, val, v); if (nr_calls) (*nr_calls)++; if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) break ; nb = next_nb; nr_to_call--; } return ret; } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 | /** * netif_carrier_on - set carrier * @dev: network device * * Device has detected that carrier. */ void netif_carrier_on( struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { if (dev->reg_state == NETREG_UNINITIALIZED) return ; linkwatch_fire_event(dev); if (netif_running(dev)) __netdev_watchdog_up(dev); } } |
1 2 3 4 5 6 7 8 9 10 11 | static void linkwatch_add_event( struct net_device *dev) { unsigned long flags; spin_lock_irqsave(&lweventlist_lock, flags); if (list_empty(&dev->link_watch_list)) { list_add_tail(&dev->link_watch_list, &lweventlist); dev_hold(dev); } spin_unlock_irqrestore(&lweventlist_lock, flags); } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | static void linkwatch_schedule_work( int urgent) { unsigned long delay = linkwatch_nextevent - jiffies; if (test_bit(LW_URGENT, &linkwatch_flags)) return ; /* Minimise down-time: drop delay for up event. */ if (urgent) { if (test_and_set_bit(LW_URGENT, &linkwatch_flags)) return ; delay = 0; } /* If we wrap around we'll delay it by at most HZ. */ if (delay > HZ) delay = 0; /* * This is true if we've scheduled it immeditately or if we don't * need an immediate execution and it's already pending. */ if (schedule_delayed_work(&linkwatch_work, delay) == !delay) return ; /* Don't bother if there is nothing urgent. */ if (!test_bit(LW_URGENT, &linkwatch_flags)) return ; /* It's already running which is good enough. */ if (!__cancel_delayed_work(&linkwatch_work)) return ; /* Otherwise we reschedule it again for immediate execution. */ schedule_delayed_work(&linkwatch_work, 0); } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | static void __linkwatch_run_queue( int urgent_only) { struct net_device *dev; LIST_HEAD(wrk); /* * Limit the number of linkwatch events to one * per second so that a runaway driver does not * cause a storm of messages on the netlink * socket. This limit does not apply to up events * while the device qdisc is down. */ if (!urgent_only) linkwatch_nextevent = jiffies + HZ; /* Limit wrap-around effect on delay. */ else if (time_after(linkwatch_nextevent, jiffies + HZ)) linkwatch_nextevent = jiffies; clear_bit(LW_URGENT, &linkwatch_flags); spin_lock_irq(&lweventlist_lock); list_splice_init(&lweventlist, &wrk); while (!list_empty(&wrk)) { dev = list_first_entry(&wrk, struct net_device, link_watch_list); list_del_init(&dev->link_watch_list); if (urgent_only && !linkwatch_urgent_event(dev)) { list_add_tail(&dev->link_watch_list, &lweventlist); continue ; } spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); spin_lock_irq(&lweventlist_lock); } if (!list_empty(&lweventlist)) linkwatch_schedule_work(0); spin_unlock_irq(&lweventlist_lock); } |
}