Chinaunix首页 | 论坛 | 博客
  • 博客访问: 306678
  • 博文数量: 57
  • 博客积分: 1435
  • 博客等级: 上尉
  • 技术积分: 644
  • 用 户 组: 普通用户
  • 注册时间: 2007-02-21 22:51
文章分类

全部博文(57)

文章存档

2013年(7)

2012年(1)

2011年(7)

2009年(7)

2008年(7)

2007年(28)

我的朋友

分类: LINUX

2013-07-07 12:14:44

网上讲CFS的文章很多,可能版本不一,理解不尽相同。我以问题追溯方式,跟踪源码写下我对CFS的理解,有的问题我也还没理解透,欢迎对内核有兴趣的朋友一起交流学习,源码版本是与LKD3配套的Linux2.6.34

背景知识:

(1) Linux的调度器类主要实现两类进程调度算法:实时调度算法完全公平调度算法(CFS),实时调度算法SCHED_FIFO和SCHED_RR,按优先级执行,一般不会被抢占。直到实时进程执行完,才会执行普通进程。而大多数的普通进程,用的就是CFS算法。

(2) 进程调度的时机

①进程状态转换时刻:进程终止、进程睡眠;

②当前进程的”时间片”用完;

③主动让出处理器,用户调用sleep()或者内核调用schedule();

④从中断,系统调用或异常返回时;

(3) 每个进程task_struct中都有一个struct sched_entity se成员,这就是调度器的实体结构,进程调度算法实际上就是管理所有进程的这个se。

点击(此处)折叠或打开

  1. struct task_struct {
  2.     volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
  3.     void *stack;
  4.     atomic_t usage;
  5.     unsigned int flags;    /* per process flags, defined below */
  6.     unsigned int ptrace;

  7.     int lock_depth;        /* BKL lock depth */

  8. #ifdef CONFIG_SMP
  9. #ifdef __ARCH_WANT_UNLOCKED_CTXSW
  10.     int oncpu;
  11. #endif
  12. #endif

  13.     int prio, static_prio, normal_prio;
  14.     unsigned int rt_priority;
  15.     const struct sched_class *sched_class;
  16.     struct sched_entity se; //进程调度实体
  17.     struct sched_rt_entity rt;
  18.     …
  19. }


CFS基于一个简单的理念:所有任务都应该公平的分配处理器。理想情况下,n个进程的调度系统中,每个进程获得1/n处理器时间,所有进程的vruntime也是相同的。

CFS完全抛弃了时间片的概念,而是分配一个处理器使用比来度量。

每个进程一个调度周期内分配的时间(类似于传统的“时间片”)跟三个因素有关:进程总数,优先级,调度周期

其他进程调度详细基础知识参见上篇博文:http://blog.chinaunix.net/uid-24708340-id-3778555.html

1.理解CFS的首先要理解vruntime的含义

简单说vruntime就是该进程的运行时间,但这个时间是通过优先级和系统负载等加权过的时间,而非物理时钟时间,按字面理解为虚拟运行时间,也很恰当。

每个进程的调度实体se都保存着本进程的虚拟运行时间。

点击(此处)折叠或打开

  1. struct sched_entity {
  2.     struct load_weight    load;        /* for load-balancing */
  3.     struct rb_node        run_node;
  4.     struct list_head    group_node;
  5.     unsigned int        on_rq;

  6.     u64            exec_start;
  7.     u64            sum_exec_runtime;
  8.     u64            vruntime; //虚拟运行时间
  9.     u64            prev_sum_exec_runtime;

  10. }

而进程相关的调度方法如下

点击(此处)折叠或打开

  1. static const struct sched_class fair_sched_class = {
  2.     .next            = &idle_sched_class,
  3.     .enqueue_task        = enqueue_task_fair,
  4.     .dequeue_task        = dequeue_task_fair,
  5.     .yield_task        = yield_task_fair,

  6.     .check_preempt_curr    = check_preempt_wakeup,

  7.     .pick_next_task        = pick_next_task_fair,
  8.     .put_prev_task        = put_prev_task_fair,

  9. #ifdef CONFIG_SMP
  10.     .select_task_rq        = select_task_rq_fair,

  11.     .rq_online        = rq_online_fair,
  12.     .rq_offline        = rq_offline_fair,

  13.     .task_waking        = task_waking_fair,
  14. #endif

  15.     .set_curr_task = set_curr_task_fair,
  16.     .task_tick        = task_tick_fair,
  17.     .task_fork        = task_fork_fair,

  18.     .prio_changed        = prio_changed_fair,
  19.     .switched_to        = switched_to_fair,

  20.     .get_rr_interval    = get_rr_interval_fair,

  21. #ifdef CONFIG_FAIR_GROUP_SCHED
  22.     .task_move_group    = task_move_group_fair,
  23. #endif
  24. };

2.vruntime的值如何跟新?

时钟中断产生时,会依次调用tick_periodic()-> update_process_times()->scheduler_tick()

点击(此处)折叠或打开

  1. void scheduler_tick(void)
  2. {

  3.     raw_spin_lock(&rq->lock);
  4.     update_rq_clock(rq);
  5.     update_cpu_load(rq);
  6.     curr->sched_class->task_tick(rq, curr, 0); //执行调度器tick,更新进程vruntime
  7.     raw_spin_unlock(&rq->lock);

  8. }
  9. task_tick_fair ()调用entity_tick()如下:
  10. static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  11. {
  12.     update_curr(cfs_rq);

  13.     if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
  14.         check_preempt_tick(cfs_rq, curr); //检查当前进程是否需要调度
  15. }

这里分析两个重要函数update_curr()check_preempt_tick()

点击(此处)折叠或打开

  1. static void update_curr(struct cfs_rq *cfs_rq)
  2. {
  3.     struct sched_entity *curr = cfs_rq->curr;
  4.     u64 now = rq_of(cfs_rq)->clock;
  5.     unsigned long delta_exec;

  6.     if (unlikely(!curr))
  7.         return;

  8. // delta_exec获得最后一次修改后,当前进程所运行的实际时间
  9.     delta_exec = (unsigned long)(now - curr->exec_start);
  10.     if (!delta_exec)
  11.         return;

  12.     __update_curr(cfs_rq, curr, delta_exec);
  13.     curr->exec_start = now; //运行时间已保存,更新起始执行时间

  14.     if (entity_is_task(curr)) {
  15.         struct task_struct *curtask = task_of(curr);

  16.         trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
  17.         cpuacct_charge(curtask, delta_exec);
  18.         account_group_exec_runtime(curtask, delta_exec);
  19.     }
  20. }

主要关心__update_curr()函数

点击(此处)折叠或打开

  1. static inline void __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, unsigned long delta_exec)
  2. {
  3.     unsigned long delta_exec_weighted;

  4.     schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));

  5.     curr->sum_exec_runtime += delta_exec;//累计实际运行时间
  6.     schedstat_add(cfs_rq, exec_clock, delta_exec);
  7.     delta_exec_weighted = calc_delta_fair(delta_exec, curr);//对delta_exec加权

  8.     curr->vruntime += delta_exec_weighted;//累计入vruntime
  9.     update_min_vruntime(cfs_rq); //更新cfs_rq最小vruntime(保存所有进程中的最小vruntime)
  10. }

关注calc_delta_fair()加权函数如何实现

点击(此处)折叠或打开

  1. /*
  2.  * delta /= w
  3.  */
  4. static inline unsigned long
  5. calc_delta_fair(unsigned long delta, struct sched_entity *se)
  6. {
  7.     if (unlikely(se->load.weight != NICE_0_LOAD))
  8.         delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);

  9.     return delta;
  10. }

若当前进程nice0,直接返回实际运行时间,其他所有nice值的加权都是以0nice值为参考增加或减少的。

点击(此处)折叠或打开

  1. /*
  2.  * delta *= weight / lw
  3.  */
  4. static unsigned long
  5. calc_delta_mine(unsigned long delta_exec, unsigned long weight,
  6.         struct load_weight *lw)
  7. {
  8.     u64 tmp;

  9.     if (!lw->inv_weight) {
  10.         if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
  11.             lw->inv_weight = 1;
  12.         else
  13.             lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
  14.                 / (lw->weight+1);//这公式没弄明白
  15.     }

  16.     tmp = (u64)delta_exec * weight;
  17.     /*
  18.      * Check whether we'd overflow the 64-bit multiplication:
  19.      */
  20.     if (unlikely(tmp > WMULT_CONST))
  21.         tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
  22.             WMULT_SHIFT/2);
  23.     else
  24.         tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);//做四舍五入

  25.     return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
  26. }

nice!=0时,实际是按公式delta *= weight / lw来计算的weight=1024nice0的权重,lw是当前进程的权重,该lwnice值的换算后面介绍,上面还书的lw计算公式没弄明白,总之这个函数就是把实际运行时间加权为进程调度里的虚拟运行时间,从而更新vruntime

更新完vruntime之后,会检查是否需要进程调度

点击(此处)折叠或打开

  1. 回到static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  2. {
  3.     update_curr(cfs_rq);

  4.     if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
  5.         check_preempt_tick(cfs_rq, curr); //检查当前进程是否需要调度
  6. }

更新完cfs_rq之后,会检查当前进程是否已经用完自己的“时间片”

点击(此处)折叠或打开

  1. /*
  2.  * Preempt the current task with a newly woken task if needed:
  3.  */
  4. static void
  5. check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  6. {
  7.     unsigned long ideal_runtime, delta_exec;

  8.     ideal_runtime = sched_slice(cfs_rq, curr);//ideal_runtime是理论上的处理器运行时间片    
  9.     delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;//该进程本轮调度累计运行时间
  10.     if (delta_exec > ideal_runtime) {// 假如实际运行超过调度器分配的时间,就标记调度标志
  11.         resched_task(rq_of(cfs_rq)->curr);
  12.         /*
  13.          * The current task ran long enough, ensure it doesn't get
  14.          * re-elected due to buddy favours.
  15.          */
  16.         clear_buddies(cfs_rq, curr);
  17.         return;
  18.     }

  19.     /*
  20.      * Ensure that a task that missed wakeup preemption by a
  21.      * narrow margin doesn't have to wait for a full slice.
  22.      * This also mitigates buddy induced latencies under load.
  23.      */
  24.     if (!sched_feat(WAKEUP_PREEMPT))
  25.         return;

  26.     if (delta_exec < sysctl_sched_min_granularity)
  27.         return;

  28.     if (cfs_rq->nr_running > 1) {
  29.         struct sched_entity *se = __pick_next_entity(cfs_rq);
  30.         s64 delta = curr->vruntime - se->vruntime;

  31.         if (delta > ideal_runtime)
  32.             resched_task(rq_of(cfs_rq)->curr);
  33.     }
  34. }

当该进程运行时间超过实际分配的“时间片”,就标记调度标志resched_task(rq_of(cfs_rq)->curr);,否则本进程继续执行。中断退出,调度函数schedule()会检查此标记,以选取新的进程来抢占当前进程。


3.
如何选择下一个可执行进程

CFS选择具有最小vruntime值的进程作为下一个可执行进程,CFS用红黑树来组织调度实体,而键值就是vruntime。那么CFS只要查找选择最左叶子节点作为下一个可执行进程即可。实际上CFS缓存了最左叶子,可以直接选取left_most叶子。

上面代码跟踪到timer tick中断退出,若“ideal_runtime”已经用完,就会调用schedule()函数选中新进程并且完成切换。

点击(此处)折叠或打开

  1. asmlinkage void __sched schedule(void)
  2. {
  3.     if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  4.         if (unlikely(signal_pending_state(prev->state, prev)))
  5.             prev->state = TASK_RUNNING;
  6.         else
  7.             deactivate_task(rq, prev, 1);//如果状态已经不可运行,将其移除运行队列
  8.         switch_count = &prev->nvcsw;
  9.     }

  10.     pre_schedule(rq, prev);

  11.     if (unlikely(!rq->nr_running))
  12.         idle_balance(cpu, rq);

  13.     put_prev_task(rq, prev); //处理上一个进程
  14.     next = pick_next_task(rq);//选出下一个进程

  15.     context_switch(rq, prev, next); /* unlocks the rq *///完成进程切换

  16. }

如果进程状态已经不是可运行,那么会将该进程移出可运行队列,如果继续可运行

put_prev_task()会依次调用put_prev_task_fair()->put_prev_entity()

点击(此处)折叠或打开

  1. static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
  2. {
  3.     /*
  4.      * If still on the runqueue then deactivate_task()
  5.      * was not called and update_curr() has to be done:
  6.      */
  7.     if (prev->on_rq)
  8.         update_curr(cfs_rq);

  9.     check_spread(cfs_rq, prev);
  10.     if (prev->on_rq) {
  11.         update_stats_wait_start(cfs_rq, prev);
  12.         /* Put 'current' back into the tree. */
  13.         __enqueue_entity(cfs_rq, prev);
  14.     }
  15.     cfs_rq->curr = NULL;
  16. }

__enqueue_entity(cfs_rq, prev) 将上一个进程重新插入红黑树(注意,当前运行进程是不在红黑树中的)

pick_next_task()会依次调用pick_next_task_fair()

点击(此处)折叠或打开

  1. static struct task_struct *pick_next_task_fair(struct rq *rq)
  2. {
  3.     struct task_struct *p;
  4.     struct cfs_rq *cfs_rq = &rq->cfs;
  5.     struct sched_entity *se;

  6.     if (!cfs_rq->nr_running)
  7.         return NULL;

  8.     do {
  9.         se = pick_next_entity(cfs_rq);//选出下一个可执行进程
  10.         set_next_entity(cfs_rq, se); //把选中的进程(left_most叶子)从红黑树移除,更新红黑树
  11.         cfs_rq = group_cfs_rq(se);
  12.     } while (cfs_rq);

  13.     p = task_of(se);
  14.     hrtick_start_fair(rq, p);

  15.     return p;
  16. }

set_next_entity()函数会调用__dequeue_entity(cfs_rq, se)把选中的下一个进程即最左叶子移出红黑树。

最后context_switch()完成进程的切换。

 

4.何时更新rbtree

①上一个进程执行完ideal_time,还可继续执行时,会插入红黑树;

②下一个进程被选中移出rbtree红黑树时;

③新建进程;

④进程由睡眠态被激活,变为可运行态时;

⑤调整优先级时也会更新rbtree;

 

5.新建进程如何加入红黑树

新建进程会做一系列复杂的工作,这里我们只关心与红黑树有关部分

Linux使用forkclone或者vfork等系统调用创建进程,最终都会到do_fork函数实现,如果没有设置CLONE_STOPPEDdo_fork会执行两个与红黑树相关的函数: copy_process()wake_up_new_task()

(1) copy_process()->sched_fork()->task_fork()

点击(此处)折叠或打开

  1. static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
  2. {
  3.     u64 vruntime = cfs_rq->min_vruntime;//以cfs的最小vruntime为基准

  4.     /*
  5.      * The 'current' period is already promised to the current tasks,
  6.      * however the extra weight of the new task will slow them down a
  7.      * little, place the new task so that it fits in the slot that
  8.      * stays open at the end.
  9.      */
  10.     if (initial && sched_feat(START_DEBIT))
  11.         vruntime += sched_vslice(cfs_rq, se);// 加上一个调度周期内的"时间片"

  12.     /* sleeps up to a single latency don't count. */
  13.     if (!initial && sched_feat(FAIR_SLEEPERS)) {
  14.         unsigned long thresh = sysctl_sched_latency;

  15.         /*
  16.          * Convert the sleeper threshold into virtual time.
  17.          * SCHED_IDLE is a special sub-class. We care about
  18.          * fairness only relative to other SCHED_IDLE tasks,
  19.          * all of which have the same weight.
  20.          */
  21.         if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
  22.                  task_of(se)->policy != SCHED_IDLE))
  23.             thresh = calc_delta_fair(thresh, se);

  24.         /*
  25.          * Halve their sleep time's effect, to allow
  26.          * for a gentler effect of sleepers:
  27.          */
  28.         if (sched_feat(GENTLE_FAIR_SLEEPERS))
  29.             thresh >>= 1;

  30.         vruntime -= thresh;
  31.     }

  32.     /* ensure we never gain time by being placed backwards. */
  33.     vruntime = max_vruntime(se->vruntime, vruntime);

  34.     se->vruntime = vruntime;
  35. }

计算新进程的vruntime值,加上一个“平均时间片”表示刚执行完,避免新建进程立马抢占CPU

(2)调用wake_up_new_task函数

点击(此处)折叠或打开

  1. void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
  2. {

  3.     rq = task_rq_lock(p, &flags);
  4.     update_rq_clock(rq);
  5.     activate_task(rq, p, 0);//激活当前进程,添加入红黑树
  6. check_preempt_curr(rq, p, WF_FORK);//确认是否需要抢占
  7.     …
  8. }

更新时钟,激活新建的进程activate_task()会调用

点击(此处)折叠或打开

  1. static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
  2. {
  3.     if (wakeup)
  4.         p->se.start_runtime = p->se.sum_exec_runtime;

  5.     sched_info_queued(p);
  6.     p->sched_class->enqueue_task(rq, p, wakeup, head);
  7.     p->se.on_rq = 1;
  8. }

将新建的进程加入rbtree;

6.唤醒进程 调用try_to_wake_up()->activate_task()->enqueue_task_fair()->enqueue_entity()

注意enqueue_entity 函数调用place_entity对进程vruntime做补偿计算,再次考察place_entity(cfs_rq, se, 0)

点击(此处)折叠或打开

  1. static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
  2. {
  3.     u64 vruntime = cfs_rq->min_vruntime;//以cfs的最小vruntime为基准

  4.     /*
  5.      * The 'current' period is already promised to the current tasks,
  6.      * however the extra weight of the new task will slow them down a
  7.      * little, place the new task so that it fits in the slot that
  8.      * stays open at the end.
  9.      */
  10.     if (initial && sched_feat(START_DEBIT))
  11.         vruntime += sched_vslice(cfs_rq, se);//一个调度周期内的"时间片"

  12.     /* sleeps up to a single latency don't count. */
  13.     if (!initial && sched_feat(FAIR_SLEEPERS)) {
  14.         unsigned long thresh = sysctl_sched_latency;

  15.         /*
  16.          * Convert the sleeper threshold into virtual time.
  17.          * SCHED_IDLE is a special sub-class. We care about
  18.          * fairness only relative to other SCHED_IDLE tasks,
  19.          * all of which have the same weight.
  20.          */
  21.         if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
  22.                  task_of(se)->policy != SCHED_IDLE))
  23.             thresh = calc_delta_fair(thresh, se);

  24.         /*
  25.          * Halve their sleep time's effect, to allow
  26.          * for a gentler effect of sleepers:
  27.          */
  28.         if (sched_feat(GENTLE_FAIR_SLEEPERS))
  29.             thresh >>= 1;

  30.         vruntime -= thresh;//对于睡眠进程唤醒,给予vruntime补偿
  31.     }

  32.     /* ensure we never gain time by being placed backwards. */
  33.     vruntime = max_vruntime(se->vruntime, vruntime);//避免通过睡眠来获得运行时间

  34.     se->vruntime = vruntime;
  35. }

initial=1时,新建进程vruntime=cfs最小vruntime+时间片,放入红黑树最右端。

initial=0时,表示唤醒进程,vruntime要减去一个thresh.这个thresh由调度周期sysctl_sched_latency加权得到虚拟时间,这样做可以对睡眠进程做一个补偿,唤醒时会得到一个较小的vruntime, 使它可以尽快抢占CPU(可以快速响应I/O消耗型进程)

注意注释/* ensure we never gain time by being placed backwards. */

这个设计是为了给睡眠较长时间的进程做时间补偿的,既使其可以快速抢占,又避免因太小的vruntime值而长期占用CPU。但有些进程只是短时间睡眠,这样唤醒时自身vruntime还是大于min_vruntime的,为了不让进程通过睡眠获得额外运行时间补偿,最后vruntime取计算出的补偿时间和进程本身的vruntime较大者。从这可以看出,虽然CFS不再区分I/O消耗型,CPU消耗型进程,但是CFS模型对IO消耗型天然的提供了快速的响应。

 

7.改变进程优先级,如何调整rbtree

Linux中改变进程优先级会调用底层的set_user_nice()

点击(此处)折叠或打开

  1. void set_user_nice(struct task_struct *p, long nice)
  2. {

  3.     dequeue_task(rq, p, 0); //把进程从红黑树中取出

  4.     p->static_prio = NICE_TO_PRIO(nice);//将nice(-20~19)值映射到100~139,0~99是实时进程优先级
  5.     set_load_weight(p);

  6.     enqueue_task(rq, p, 0, false);
  7. }

set_user_nice把进程从红黑树取出,调整优先级(nice值对应权重),再重新加入红黑树.

set_load_weight()函数是设置nice值对应的权重,

点击(此处)折叠或打开

  1. static void set_load_weight(struct task_struct *p)
  2. {
  3.     if (task_has_rt_policy(p)) {
  4.         p->se.load.weight = 0;
  5.         p->se.load.inv_weight = WMULT_CONST;
  6.         return;
  7.     }

  8.     /*
  9.      * SCHED_IDLE tasks get minimal weight:
  10.      */
  11.     if (p->policy == SCHED_IDLE) {
  12.         p->se.load.weight = WEIGHT_IDLEPRIO;
  13.         p->se.load.inv_weight = WMULT_IDLEPRIO;
  14.         return;
  15.     }

  16.     p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
  17.     p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
  18. }

数组prio_to_weight[]是将nice值(-20~19)转化为以nici 0(1024)值为基准的加权值,根据内核注释每一个nice差值,权重相差10%,即在负载一定的条件下,每增加或减少一个nice值,获得的CPU时间相应增加或减少10%

点击(此处)折叠或打开

  1. static const int prio_to_weight[40] = {
  2.  /* -20 */ 88761, 71755, 56483, 46273, 36291,
  3.  /* -15 */ 29154, 23254, 18705, 14949, 11916,
  4.  /* -10 */ 9548, 7620, 6100, 4904, 3906,
  5.  /* -5 */ 3121, 2501, 1991, 1586, 1277,
  6.  /* 0 */ 1024, 820, 655, 526, 423,
  7.  /* 5 */ 335, 272, 215, 172, 137,
  8.  /* 10 */ 110, 87, 70, 56, 45,
  9.  /* 15 */ 36, 29, 23, 18, 15,
  10. };

上面calc_delta_mine()函数用到这个数组加权值,这个转化过程还没弄明白,有明白的朋友,指点一二,不胜感激

点击(此处)折叠或打开

  1. /*
  2.  * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
  3.  *
  4.  * In cases where the weight does not change often, we can use the
  5.  * precalculated inverse to speed up arithmetics by turning divisions
  6.  * into multiplications:
  7.  */
  8. static const u32 prio_to_wmult[40] = {
  9.  /* -20 */ 48388, 59856, 76040, 92818, 118348,
  10.  /* -15 */ 147320, 184698, 229616, 287308, 360437,
  11.  /* -10 */ 449829, 563644, 704093, 875809, 1099582,
  12.  /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
  13.  /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
  14.  /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
  15.  /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
  16.  /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
  17. };

最后,说下对CFS “完全公平” 的理解:
①不再区分进程类型,所有进程公平对待
②对I/O消耗型进程,仍然会提供快速响应(对睡眠进程做时间补偿)
优先级高的进程,获得CPU时间更多(vruntime增长的更慢)

可见CFS的完全公平,并不是说所有进程绝对的平等,占用CPU时间完全相同,而是体现在vruntime数值上,所有进程都用虚拟时间来度量,总是让vruntime最小的进程抢占,这样看起来是完全公平的,但实际上vruntime的更新,增长速度,不同进程是不尽一样的。CFS利用这么个简单的vruntime机制,实现了以往需要相当复杂算法实现的进度调度需求,高明!




阅读(763) | 评论(0) | 转发(0) |
0

上一篇:基于VMware调试linux内核

下一篇:没有了

给主人留下些什么吧!~~