aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-04-20 16:57:22 +0200
committerIngo Molnar <mingo@kernel.org>2012-05-09 15:00:51 +0200
commitc22402a2f76e88b04b7a8b6c0597ad9ba6fd71de (patch)
treedba3b3dc85c1290310968ce78d36ccb9fd670366 /kernel/sched
parentc82513e513556a04f81aa511cd890acd23349c48 (diff)
downloadkernel_goldelico_gta04-c22402a2f76e88b04b7a8b6c0597ad9ba6fd71de.tar.gz
kernel_goldelico_gta04-c22402a2f76e88b04b7a8b6c0597ad9ba6fd71de.tar.bz2
kernel_goldelico_gta04-c22402a2f76e88b04b7a8b6c0597ad9ba6fd71de.zip
sched/fair: Let minimally loaded cpu balance the group
Currently we let the leftmost (or first idle) cpu ascend the sched_domain tree and perform load-balancing. The result is that the busiest cpu in the group might be performing this function and pull more load to itself. The next load balance pass will then try to equalize this again. Change this to pick the least loaded cpu to perform higher domain balancing. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-v8zlrmgmkne3bkcy9dej1fvm@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 678966ca393..968ffee2472 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3781,7 +3781,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
{
unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
int i;
- unsigned int balance_cpu = -1, first_idle_cpu = 0;
+ unsigned int balance_cpu = -1;
+ unsigned long balance_load = ~0UL;
unsigned long avg_load_per_task = 0;
if (local_group)
@@ -3797,12 +3798,11 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
/* Bias balancing toward cpus of our domain */
if (local_group) {
- if (idle_cpu(i) && !first_idle_cpu) {
- first_idle_cpu = 1;
+ load = target_load(i, load_idx);
+ if (load < balance_load || idle_cpu(i)) {
+ balance_load = load;
balance_cpu = i;
}
-
- load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
if (load > max_cpu_load) {