]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sched: fix overload performance: buddy wakeups
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 14 Mar 2008 20:12:12 +0000 (21:12 +0100)
committerIngo Molnar <mingo@elte.hu>
Sat, 15 Mar 2008 02:02:50 +0000 (03:02 +0100)
Currently we schedule to the leftmost task in the runqueue. When the
runtimes are very short because of some server/client ping-pong,
especially in over-saturated workloads, this will cycle through all
tasks trashing the cache.

Reduce cache trashing by keeping dependent tasks together by running
newly woken tasks first. However, by not running the leftmost task first
we could starve tasks because the wakee can gain unlimited runtime.

Therefore we only run the wakee if its within a small
(wakeup_granularity) window of the leftmost task. This preserves
fairness, but does alternate server/client task groups.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_fair.c

index 6b06f23261c0c37e5e616319bc0c76deed64affa..d1ad69b270ca7abc418871ce3160cc4bfbc04135 100644 (file)
@@ -301,7 +301,7 @@ struct cfs_rq {
        /* 'curr' points to currently running entity on this cfs_rq.
         * It is set to NULL otherwise (i.e when none are currently running).
         */
-       struct sched_entity *curr;
+       struct sched_entity *curr, *next;
 
        unsigned long nr_spread_over;
 
index 9d003c9d2a48090a2210d9fff456a94a66efb585..31c4a2988b64a0644e9e8289a8a6233c7b25f185 100644 (file)
@@ -207,6 +207,9 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
                }
        }
 
+       if (cfs_rq->next == se)
+               cfs_rq->next = NULL;
+
        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 }
 
@@ -626,12 +629,32 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
        se->prev_sum_exec_runtime = se->sum_exec_runtime;
 }
 
+static struct sched_entity *
+pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       s64 diff, gran;
+
+       if (!cfs_rq->next)
+               return se;
+
+       diff = cfs_rq->next->vruntime - se->vruntime;
+       if (diff < 0)
+               return se;
+
+       gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load);
+       if (diff > gran)
+               return se;
+
+       return cfs_rq->next;
+}
+
 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
 {
        struct sched_entity *se = NULL;
 
        if (first_fair(cfs_rq)) {
                se = __pick_next_entity(cfs_rq);
+               se = pick_next(cfs_rq, se);
                set_next_entity(cfs_rq, se);
        }
 
@@ -1070,6 +1093,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
                resched_task(curr);
                return;
        }
+
+       cfs_rq_of(pse)->next = pse;
+
        /*
         * Batch tasks do not preempt (their preemption is driven by
         * the tick):