]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sched: wake-balance fixes
authorGregory Haskins <ghaskins@novell.com>
Fri, 25 Jan 2008 20:08:12 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:12 +0000 (21:08 +0100)
We have logic to detect whether the system has migratable tasks, but we are
not using it when deciding whether to push tasks away.  So we add support
for considering this new information.

Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_rt.c

index 3344ba776b9711d96ca3b43f96a7e47321f8b2fa..c591abd9ca38427e6247e554e8c49fdb8848d310 100644 (file)
@@ -346,6 +346,7 @@ struct rt_rq {
        unsigned long rt_nr_migratory;
        /* highest queued rt task prio */
        int highest_prio;
+       int overloaded;
 };
 
 /*
@@ -6770,6 +6771,7 @@ void __init sched_init(void)
                rq->migration_thread = NULL;
                INIT_LIST_HEAD(&rq->migration_queue);
                rq->rt.highest_prio = MAX_RT_PRIO;
+               rq->rt.overloaded = 0;
 #endif
                atomic_set(&rq->nr_iowait, 0);
 
index a9d7d4408160c405f7562913adaad4a10a1e344c..87d7b3ff38615436d9d5625350f16badfde78bfe 100644 (file)
@@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void)
 }
 static inline void rt_set_overload(struct rq *rq)
 {
+       rq->rt.overloaded = 1;
        cpu_set(rq->cpu, rt_overload_mask);
        /*
         * Make sure the mask is visible before we set
@@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq)
        /* the order here really doesn't matter */
        atomic_dec(&rto_count);
        cpu_clear(rq->cpu, rt_overload_mask);
+       rq->rt.overloaded = 0;
 }
 
 static void update_rt_migration(struct rq *rq)
@@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq)
 
        assert_spin_locked(&rq->lock);
 
+       if (!rq->rt.overloaded)
+               return 0;
+
        next_task = pick_next_highest_task_rt(rq, -1);
        if (!next_task)
                return 0;
@@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq)
         * the lock was owned by prev, we need to release it
         * first via finish_lock_switch and then reaquire it here.
         */
-       if (unlikely(rq->rt.rt_nr_running > 1)) {
+       if (unlikely(rq->rt.overloaded)) {
                spin_lock_irq(&rq->lock);
                push_rt_tasks(rq);
                spin_unlock_irq(&rq->lock);
@@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
 {
        if (unlikely(rt_task(p)) &&
            !task_running(rq, p) &&
-           (p->prio >= rq->curr->prio))
+           (p->prio >= rq->rt.highest_prio) &&
+           rq->rt.overloaded)
                push_rt_tasks(rq);
 }