此生既入苦寒山,何妨再攀险峰!
分类:
2011-09-20 10:38:34
原文地址:linux 2.6.23时钟中断与调度分析(2) 作者:KYlinux
/* * This is the same as the above, except we _also_ save the current * Time Stamp Counter value at the time of the timer interrupt, so that * we later on can estimate the time of day more exactly. */ irqreturn_t timer_interrupt(int irq, void *dev_id) { #ifdef CONFIG_X86_IO_APIC if (timer_ack) { /* * Subtle, when I/O APICs are used we have to ack timer IRQ * manually to reset the IRR bit for do_slow_gettimeoffset(). * This will also deassert NMI lines for the watchdog if run * on an 82489DX-based system. */ spin_lock(&i8259A_lock); outb(0x0c, PIC_MASTER_OCW3); /* Ack the IRQ; AEOI will end it automatically. */ inb(PIC_MASTER_POLL); spin_unlock(&i8259A_lock); } #endif do_timer_interrupt_hook(); if (MCA_bus) { /* The PS/2 uses level-triggered interrupts. You can't turn them off, nor would you want to (any attempt to enable edge-triggered interrupts usually gets intercepted by a special hardware circuit). Hence we have to acknowledge the timer interrupt. Through some incredibly stupid design idea, the reset for IRQ 0 is done by setting the high bit of the PPI port B (0x61). Note that some PS/2s, notably the 55SX, work fine if this is removed. */ u8 irq_v = inb_p( 0x61 ); /* read the current state */ outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */ } return IRQ_HANDLED; } |
初始化dev->event_handler函数的过程: clockevents_register_device() > clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev) > raw_notifier_call_chain(&clockevents_chain, reason, dev) > __raw_notifier_call_chain(nh, val, v, -1, NULL) > notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls) : ... ret = nb->notifier_call(nb, val, v); /* 注意这个参数v,指向clockevents_register_device * 的参数clock_event_device *global_clock_event; * 而参数nb,就是&clockevents_chain */ 然而,这个notifier_call函数指针赋值在start_kernel() > tick_init() : clockevents_register_notifier(&tick_notifier); 而tick_notifier的定义: static struct notifier_block tick_notifier = { .notifier_call = tick_notify, }; 还没有完呢, tick_notify> tick_check_new_device()> tick_setup-device()> tick_setup_periodic()> tick_set_periodic_handler() 这个dev->event_handler()处理函数终于初始化了 |
void tick_handle_periodic(struct clock_event_device *dev) { int cpu = smp_processor_id(); ktime_t next; tick_periodic(cpu); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return; /* * Setup the next period for devices, which do not have * periodic mode: */ next = ktime_add(dev->next_event, tick_period); for (;;) { if (!clockevents_program_event(dev, next, ktime_get())) return; tick_periodic(cpu); next = ktime_add(next, tick_period); } } 其中tick_periodic调用就是以前的一系列更新操作,包括更新进程时间片等等. static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { write_seqlock(&xtime_lock); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); write_sequnlock(&xtime_lock); } //更新当前运行进程的与时钟相关的信息 update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); } |
static inline void __run_timers(tvec_base_t *base) { struct timer_list *timer; spin_lock_irq(&base->lock); /*这里进入定时器处理循环,利用系统全局jiffies与定时器基准jiffies进行对比,如果前者大,则表明某些定时器进行处理了,否则表示所有的 定时器都没有超时.因为CPU可能关闭中断,引起时钟中断信号丢失.可能jiffies要大base->timer_jiffies while (time_after_eq(jiffies, base->timer_jiffies)) { //定义并初始化一个链表 struct list_head work_list; struct list_head *head = &work_list; int index = base->timer_jiffies & TVR_MASK; /* * Cascade timers: */ //当index == 0时,说明已经循环了一个周期 //则将tv2填充tv1.如果tv2为空,则用tv3填充tv2.依次类推...... if (!index && (!cascade(base, &base->tv2, INDEX(0))) && (!cascade(base, &base->tv3, INDEX(1))) && !cascade(base, &base->tv4, INDEX(2))) cascade(base, &base->tv5, INDEX(3)); //更新base->timer_jiffies ++base->timer_jiffies; //将base->tv1.vec项移至work_list.并将base->tv1.vec置空 list_replace_init(base->tv1.vec + index, &work_list); /*如果当前找到的时间数组对应的列表不为空,则表明该列表上串连的所有定时器都已经超时,循环调用每个定时器的处理 函数,并将其从列表中删除,直到列表为空为止。*/ while (!list_empty(head)) { void (*fn)(unsigned long); unsigned long data; //遍历链表中的每一项.运行它所对应的函数,并将定时器从链表上脱落 timer = list_first_entry(head, struct timer_list,entry); fn = timer->function; data = timer->data; timer_stats_account_timer(timer); set_running_timer(base, timer); detach_timer(timer, 1); spin_unlock_irq(&base->lock); { int preempt_count = preempt_count(); fn(data); if (preempt_count != preempt_count()) { printk(KERN_WARNING "huh, entered %p " "with preempt_count %08x, exited" " with %08x?\n", fn, preempt_count, preempt_count()); BUG(); } } spin_lock_irq(&base->lock); } } set_running_timer(base, NULL); spin_unlock_irq(&base->lock); } |
void scheduler_tick(void) { int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); struct task_struct *curr = rq->curr; u64 next_tick = rq->tick_timestamp + TICK_NSEC; spin_lock(&rq->lock); __update_rq_clock(rq); /* * Let rq->clock advance by at least TICK_NSEC: */ if (unlikely(rq->clock < next_tick)) rq->clock = next_tick; rq->tick_timestamp = rq->clock; update_cpu_load(rq); if (curr != rq->idle) /* FIXME: needed? */ curr->sched_class->task_tick(rq, curr); spin_unlock(&rq->lock); #ifdef CONFIG_SMP rq->idle_at_tick = idle_cpu(cpu); trigger_load_balance(rq, cpu); #endif } |