一个时钟中断产生时,会对被中断的进程进行时间片计算,如果时间片用完,那就应该引起进程切换,就是你这里提到的“rescheduling activity”
具体代码路径如下:
- timer_interrupt() ---> do_timer_interrupt() --->do_timer_interrupt_hook() ---> update_process_times() ---> scheduler_tick()
- void scheduler_tick(void)
- {
- int cpu = smp_processor_id();
- runqueue_t *rq = this_rq();
- task_t *p = current;
- unsigned long long now = sched_clock();
- update_cpu_clock(p, rq, now);
- rq->timestamp_last_tick = now;
- if (p == rq->idle) {
- if (wake_priority_sleeper(rq))
- goto out;
- rebalance_tick(cpu, rq, SCHED_IDLE);
- return;
- }
- /* Task might have expired already, but not scheduled off yet */
- if (p->array != rq->active) {
- set_tsk_need_resched(p);
- goto out;
- }
- spin_lock(&rq->lock);
- /*
- * The task was running during this tick - update the
- * time slice counter. Note: we do not update a thread's
- * priority until it either goes to sleep or uses up its
- * timeslice. This makes it possible for interactive tasks
- * to use up their timeslices at their highest priority levels.
- */
- if (rt_task(p)) {
- /*
- * RR tasks need a special form of timeslice management.
- * FIFO tasks have no timeslices.
- */
- if ((p->policy == SCHED_RR) && !--p->time_slice) {
- p->time_slice = task_timeslice(p);
- p->first_time_slice = 0;
- set_tsk_need_resched(p);
- /* put it at the end of the queue: */
- requeue_task(p, rq->active);
- }
- goto out_unlock;
- }
- if (!--p->time_slice) {
- dequeue_task(p, rq->active);
- set_tsk_need_resched(p);
- p->prio = effective_prio(p);
- p->time_slice = task_timeslice(p);
- p->first_time_slice = 0;
- if (!rq->expired_timestamp)
- rq->expired_timestamp = jiffies;
- if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
- enqueue_task(p, rq->expired);
- if (p->static_prio < rq->best_expired_prio)
- rq->best_expired_prio = p->static_prio;
- } else
- enqueue_task(p, rq->active);
- } else {
- /*
- * Prevent a too long timeslice allowing a task to monopolize
- * the CPU. We do this by splitting up the timeslice into
- * smaller pieces.
- *
- * Note: this does not mean the task's timeslices expire or
- * get lost in any way, they just might be preempted by
- * another task of equal priority. (one with higher
- * priority would have preempted this task already.) We
- * requeue this task to the end of the list on this priority
- * level, which is in essence a round-robin of tasks with
- * equal priority.
- *
- * This only applies to tasks in the interactive
- * delta range with at least TIMESLICE_GRANULARITY to requeue.
- */
- if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
- p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
- (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
- (p->array == rq->active)) {
- requeue_task(p, rq->active);
- set_tsk_need_resched(p);
- }
- }
- out_unlock:
- spin_unlock(&rq->lock);
- out:
- rebalance_tick(cpu, rq, NOT_IDLE);
- }
- 这里面有几个地方都调用了set_tsk_need_resched(p)
- static inline void set_tsk_need_resched(struct task_struct *tsk)
- {
- set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
- }
- static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
- {
- set_ti_thread_flag(task_thread_info(tsk), flag);
- }
- static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
- {
- set_bit(flag,&ti->flags);
- }
复制代码 最后,在中断处理结束的点上,调用了ret_from_intr(在entry.S中)
- ret_from_intr:
- GET_THREAD_INFO(p)
- movl EFLAGS(%esp), x # mix EFLAGS and CS
- movb CS(%esp), %al
- testl $(VM_MASK | 3), x
- jz resume_kernel
- ENTRY(resume_userspace)
- cli # make sure we don't miss an interrupt
- # setting need_resched or sigpending
- # between sampling and the iret
- movl TI_flags(p), x
- andl $_TIF_WORK_MASK, x # is there any work to be done on
- # int/exception return?
- jne work_pending
- jmp restore_all
- #ifdef CONFIG_PREEMPT
- ENTRY(resume_kernel)
- cli
- cmpl $0,TI_preempt_count(p) # non-zero preempt_count ?
- jnz restore_nocheck
- need_resched:
- movl TI_flags(p), x # need_resched set ?
- testb $_TIF_NEED_RESCHED, %cl
- jz restore_all
- testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
- call preempt_schedule_irq
- jmp need_resched
- #endif
复制代码 可以看到,最后是通过call preempt_schedule_irq进行进程切换的
- asmlinkage void __sched preempt_schedule_irq(void)
- {
- struct thread_info *ti = current_thread_info();
- #ifdef CONFIG_PREEMPT_BKL
- struct task_struct *task = current;
- int saved_lock_depth;
- #endif
- /* Catch callers which need to be fixed*/
- BUG_ON(ti->preempt_count || !irqs_disabled());
- need_resched:
- add_preempt_count(PREEMPT_ACTIVE);
- /*
- * We keep the big kernel semaphore locked, but we
- * clear ->lock_depth so that schedule() doesnt
- * auto-release the semaphore:
- */
- #ifdef CONFIG_PREEMPT_BKL
- saved_lock_depth = task->lock_depth;
- task->lock_depth = -1;
- #endif
- local_irq_enable();
- [b]schedule();[/b] local_irq_disable();
- #ifdef CONFIG_PREEMPT_BKL
- task->lock_depth = saved_lock_depth;
- #endif
- sub_preempt_count(PREEMPT_ACTIVE);
- /* we could miss a preemption opportunity between schedule and now */
- barrier();
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
- goto need_resched;
- }
复制代码 |
|