sched: fixlet for group load balance
We should not only correct the increment for the initial group, but should
be consistent and do so for all the groups we encounter.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c208997..0c59da7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1027,7 +1027,6 @@
long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
- long more_w;
if (!tg->parent)
return wl;
@@ -1039,18 +1038,17 @@
if (!wl && sched_feat(ASYM_EFF_LOAD))
return wl;
- /*
- * Instead of using this increment, also add the difference
- * between when the shares were last updated and now.
- */
- more_w = se->my_q->load.weight - se->my_q->rq_weight;
- wl += more_w;
- wg += more_w;
-
for_each_sched_entity(se) {
-#define D(n) (likely(n) ? (n) : 1)
-
long S, rw, s, a, b;
+ long more_w;
+
+ /*
+ * Instead of using this increment, also add the difference
+ * between when the shares were last updated and now.
+ */
+ more_w = se->my_q->load.weight - se->my_q->rq_weight;
+ wl += more_w;
+ wg += more_w;
S = se->my_q->tg->shares;
s = se->my_q->shares;
@@ -1059,7 +1057,11 @@
a = S*(rw + wl);
b = S*rw + s*wg;
- wl = s*(a-b)/D(b);
+ wl = s*(a-b);
+
+ if (likely(b))
+ wl /= b;
+
/*
* Assume the group is already running and will
* thus already be accounted for in the weight.
@@ -1068,7 +1070,6 @@
* alter the group weight.
*/
wg = 0;
-#undef D
}
return wl;