static inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
struct mm_struct *mm, *oldmm;
/* before a task_switch, a prepare_arch_switch hook that must be defined by every architecture was called from prepare_task_switch(), This enables kernel to execute architecture-
specific code to prepare for the switch */
prepare_task_switch(rq, prev, next);
trace_sched_switch(rq, prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch(prev);
/* kernel threads do not have their own user space address space, its mm is NULL */
if (likely(!mm)) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
/* notifies the underlying architecture that exchanging the userspace portion of virtual address space is not required. This speeds up the context switch and is known as the lazy TLB technique. */
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next);
/* If previous tasks are kernel thread, its active_mm pointer must be reset to NULL to disconnect it from the borrowed address space */
if (likely(!prev->mm)) {
prev->active_mm = NULL;
rq->prev_mm = oldmm;
}
/*
* Since the runqueue lock will be released by the next
* task (which is an invalid locking op but in the case
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
/*The barrier statement is a directive for the compiler that ensures that the order in which the switch_to and finish_task_switch statements are executed is not changed by any unfortunate optimizations */
barrier();
/*
* this_rq must be evaluated again because prev may have moved
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
/* perform some cleanup and allows for correctly releasing lock */
finish_task_switch(this_rq(), prev);
}
|