sched: wake-balance fixes
We have logic to detect whether the system has migratable tasks, but we are
not using it when deciding whether to push tasks away. So we add support
for considering this new information.
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a9d7d44..87d7b3ff 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -16,6 +16,7 @@
}
static inline void rt_set_overload(struct rq *rq)
{
+ rq->rt.overloaded = 1;
cpu_set(rq->cpu, rt_overload_mask);
/*
* Make sure the mask is visible before we set
@@ -32,6 +33,7 @@
/* the order here really doesn't matter */
atomic_dec(&rto_count);
cpu_clear(rq->cpu, rt_overload_mask);
+ rq->rt.overloaded = 0;
}
static void update_rt_migration(struct rq *rq)
@@ -448,6 +450,9 @@
assert_spin_locked(&rq->lock);
+ if (!rq->rt.overloaded)
+ return 0;
+
next_task = pick_next_highest_task_rt(rq, -1);
if (!next_task)
return 0;
@@ -675,7 +680,7 @@
* the lock was owned by prev, we need to release it
* first via finish_lock_switch and then reaquire it here.
*/
- if (unlikely(rq->rt.rt_nr_running > 1)) {
+ if (unlikely(rq->rt.overloaded)) {
spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
@@ -687,7 +692,8 @@
{
if (unlikely(rt_task(p)) &&
!task_running(rq, p) &&
- (p->prio >= rq->curr->prio))
+ (p->prio >= rq->rt.highest_prio) &&
+ rq->rt.overloaded)
push_rt_tasks(rq);
}