static void idle_balance(int this_cpu, struct rq *this_rq)
{
struct sched_domain *sd;
int pulled_task = 0;
unsigned long next_balance = jiffies + HZ;
/* store current clock to idle_stamp, indicates that at this moment, this_rq is idle. */
this_rq->idle_stamp = this_rq->clock;
/* sysctl_sched_migration_cost is migration overhead defined by kernel hack, if this_rq's average idle time less than it, means that after average idle time there will be at least one task executing on this run queue, however, if by migrating tasks, it will take cycles (sysctl_sched_migration_cost). If average idle time is smaller, there is no need to migrate tasks, the best choice is just waiting. */
if (this_rq->avg_idle < sysctl_sched_migration_cost)
return;
/*
* Drop the rq->lock, but keep IRQ/preempt disabled.
*/
raw_spin_unlock(&this_rq->lock);
/* #define for_each_domain(cpu, __sd) \ for (__sd =rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) */
for_each_domain(this_cpu, sd) {
unsigned long interval;
int balance = 1;
/*#ifdef CONFIG_SMP #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ */
/*Doing load balance working must under the condition of schedule domain set as SD_LOAD_BALANCE, if this schedule domain' flags set as SD_LOAD_BALANCE, it is permitted to do load balance working, otherwise, check the next schedule domain*/
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
/*if the type of this schedule domain is SD_BALANCE_NEWIDLE, which means the action of load balance is triggered when this cpu is about to idle. */ /* static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *balance) * load balance happens here: this_cpu * related run queue: this_rq * limited domain: sd * idle type of this cpu: idle * balance ??
* Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. */
if (sd->flags & SD_BALANCE_NEWIDLE) {
/* If we've pulled tasks over stop searching: */
pulled_task = load_balance(this_cpu, this_rq,
sd, CPU_NEWLY_IDLE, &balance);
}
/* calculate this time for next balance */
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
next_balance = sd->last_balance + interval;
if (pulled_task) {
this_rq->idle_stamp = 0;
break;
}
}
raw_spin_lock(&this_rq->lock);
if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
/*
* We are going idle. next_balance may be set based on
* a busy processor. So reset next_balance.
*/
this_rq->next_balance = next_balance;
}
}
|