Chinaunix首页 | 论坛 | 博客
  • 博客访问: 353184
  • 博文数量: 197
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 303
  • 用 户 组: 普通用户
  • 注册时间: 2013-09-02 14:21
文章分类

全部博文(197)

文章存档

2014年(89)

2013年(108)

我的朋友

分类: LINUX

2013-11-14 17:59:40

2. Arm体系架构中进程切换过程

在之后的文章里,可能会有很大部分的篇幅是介绍内核如何调度和管理进程。学习了解这部分内容,很多时候是和task structrun queueschedule entity等数据结构打交道。关于linux进程调度的文章网上已经有很多,大多数都比较详细的介绍调度算法。调度算法的内容说白了就是在什么时间,内核会用什么样的进程替换目前正在运行的进程。而最后进程间的切换是如何完成的,介绍的文章较少。在此,我们在修炼高深内功之前,必须了解一下这个内功中的内功,打通一下任通二脉。下面介绍一下进程切换的过程。

 
/*
* context_switch - switch to the new MM and the new
* thread's register state.
*/ 
static inline void 
context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
{
    struct mm_struct *mm, *oldmm;
 
    prepare_task_switch(rq, prev, next);
    trace_sched_switch(rq, prev, next);
    mm = next->mm;
    oldmm = prev->active_mm;
    /*
     * For paravirt, this is coupled with an exit in switch_to to
     * combine the page table reload and the switch backend into
     * one hypercall.
     */ 
    arch_enter_lazy_cpu_mode();
 
    if (unlikely(!mm))
    {
        next->active_mm = oldmm;
        atomic_inc(&oldmm->mm_count);
        enter_lazy_tlb(oldmm, next);
    }
    else 
        switch_mm(oldmm, mm, next);
 
    if (unlikely(!prev->mm))
    {
        prev->active_mm = NULL;
        rq->prev_mm = oldmm;
    }
    /*
     * Since the runqueue lock will be released by the next
     * task (which is an invalid locking op but in the case
     * of the scheduler it's an obvious special-case), so we
     * do an early lockdep release here:
     */ 
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
    spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif 
 
    /* Here we just switch the register state and the stack. */ 
    switch_to(prev, next, prev);
 
    barrier();
    /*
     * this_rq must be evaluated again because prev may have moved
     * CPUs since it called schedule(), thus the 'rq' on its stack
     * frame will be invalid.
     */ 
    finish_task_switch(this_rq(), prev);
}
 

context_switch函数就是在进程调度过程中完成进程切换的函数,prev是被切换掉的进程,而next是切换到的进程。

代码首先判断切换掉的进程有没有用户空间,如果mm为空,表示next指向的进程是一个内核线程,根本就没有用户空间。这个时候,next进程根本就不会去访问用户空间的地址,因此,不需要更换MMU的页目录表。enter_lazy_tlb函数对于ARM架构就是一个空实现。如果next是个普通的用户进程,就执行switch_mm执行更换MMU的页目录表。

下面代码就是更换MMU页目录表的过程,其实很简单,就是操作设置新的页目录表的基地址到cp15协处理中。每个进程的页目录表基地址保存在mm_struct中的pgd中。

/*
* cpu_arm920_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/ 
    .align    5 
ENTRY(cpu_arm920_switch_mm)
#ifdef CONFIG_MMU
    mov    ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
    mcr    p15, 0, ip, c7, c6, 0        @ invalidate D cache
#else 
@ && 'Clean & Invalidate whole DCache' 
@ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip
 
    mov    r1, #(CACHE_DSEGMENTS - 1) << 5    @ 8 segments
1:    orr    r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2:    mcr    p15, 0, r3, c7, c14, 2        @ clean & invalidate D index
    subs    r3, r3, #1 << 26 
    bcs    2b                @ entries 63 to 0 
    subs    r1, r1, #1 << 5 
    bcs    1b                @ segments 7 to 0 
#endif 
    mcr    p15, 0, ip, c7, c5, 0        @ invalidate I cache
    mcr    p15, 0, ip, c7, c10, 4        @ drain WB
    mcr    p15, 0, r0, c2, c0, 0        @ load page table pointer
    mcr    p15, 0, ip, c8, c7, 0        @ invalidate I & D TLBs
#endif 
    mov    pc, lr

完成了页目录表的替换,再次回到context_switch函数中来,最后要完成进程切换,即切换到next进程的执行流程上去。

这个切换过程也比较简单,首先保存当前cpu现场,然后设置了一下cp15协处理器的domain寄存器,之后恢复next进程的cpu现场。切换到了next进程。

/*

* switch_to(prev, next) should switch from task `prev' to `next'

* `prev' will never be the same as `next'.  schedule() itself

* contains the memory barrier to tell GCC not to cache `current'.

*/

extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);

#define switch_to(prev,next,last) \

do { \

last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \

} while (0)

ENTRY(__switch_to)

add ip, r1, #TI_CPU_SAVE

ldr r3, [r2, #TI_TP_VALUE]

stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack

#ifdef CONFIG_MMU

ldr r6, [r2, #TI_CPU_DOMAIN]

#endif

#ifdef CONFIG_MMU

mcr p15, 0, r6, c3, c0, 0 @ Set domain register

#endif

mov r5, r0

add r4, r2, #TI_CPU_SAVE

ldr r0, =thread_notify_head

mov r1, #THREAD_NOTIFY_SWITCH

bl atomic_notifier_call_chain

mov r0, r5

ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously

ENDPROC(__switch_to)

阅读(1510) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~