]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sched: RT-balance, replace hooks with pre/post schedule and wakeup methods
authorSteven Rostedt <rostedt@goodmis.org>
Fri, 25 Jan 2008 20:08:22 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:22 +0000 (21:08 +0100)
To make the main sched.c code more agnostic to the schedule classes.
Instead of having specific hooks in the schedule code for the RT class
balancing. They are replaced with a pre_schedule, post_schedule
and task_wake_up methods. These methods may be used by any of the classes
but currently, only the sched_rt class implements them.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/sched.h
kernel/sched.c
kernel/sched_rt.c

index 2e69f19369e4d863878a4d5437611cec01247c6e..c67d2c2f0111754e76b824dba2d05df8fda981c1 100644 (file)
@@ -843,6 +843,9 @@ struct sched_class {
        int (*move_one_task) (struct rq *this_rq, int this_cpu,
                              struct rq *busiest, struct sched_domain *sd,
                              enum cpu_idle_type idle);
+       void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+       void (*post_schedule) (struct rq *this_rq);
+       void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
 #endif
 
        void (*set_curr_task) (struct rq *rq);
index 9d6fb731559b07106bfcd08ccb4190937c7b9a66..2368a0d882e3c9c06034a2a1b26024650dd14f3e 100644 (file)
@@ -1625,7 +1625,10 @@ out_activate:
 
 out_running:
        p->state = TASK_RUNNING;
-       wakeup_balance_rt(rq, p);
+#ifdef CONFIG_SMP
+       if (p->sched_class->task_wake_up)
+               p->sched_class->task_wake_up(rq, p);
+#endif
 out:
        task_rq_unlock(rq, &flags);
 
@@ -1748,7 +1751,10 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                inc_nr_running(p, rq);
        }
        check_preempt_curr(rq, p);
-       wakeup_balance_rt(rq, p);
+#ifdef CONFIG_SMP
+       if (p->sched_class->task_wake_up)
+               p->sched_class->task_wake_up(rq, p);
+#endif
        task_rq_unlock(rq, &flags);
 }
 
@@ -1869,7 +1875,10 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        prev_state = prev->state;
        finish_arch_switch(prev);
        finish_lock_switch(rq, prev);
-       schedule_tail_balance_rt(rq);
+#ifdef CONFIG_SMP
+       if (current->sched_class->post_schedule)
+               current->sched_class->post_schedule(rq);
+#endif
 
        fire_sched_in_preempt_notifiers(current);
        if (mm)
@@ -3638,7 +3647,10 @@ need_resched_nonpreemptible:
                switch_count = &prev->nvcsw;
        }
 
-       schedule_balance_rt(rq, prev);
+#ifdef CONFIG_SMP
+       if (prev->sched_class->pre_schedule)
+               prev->sched_class->pre_schedule(rq, prev);
+#endif
 
        if (unlikely(!rq->nr_running))
                idle_balance(cpu, rq);
index 3ea0cae513d2e7210e3f22d41529a271e60325f3..a5a45104603a442d2d58742a09089abd4339fa77 100644 (file)
@@ -689,14 +689,14 @@ static int pull_rt_task(struct rq *this_rq)
        return ret;
 }
 
-static void schedule_balance_rt(struct rq *rq, struct task_struct *prev)
+static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
 {
        /* Try to pull RT tasks here if we lower this rq's prio */
        if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
                pull_rt_task(rq);
 }
 
-static void schedule_tail_balance_rt(struct rq *rq)
+static void post_schedule_rt(struct rq *rq)
 {
        /*
         * If we have more than one rt_task queued, then
@@ -713,10 +713,9 @@ static void schedule_tail_balance_rt(struct rq *rq)
 }
 
 
-static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
+static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
 {
-       if (unlikely(rt_task(p)) &&
-           !task_running(rq, p) &&
+       if (!task_running(rq, p) &&
            (p->prio >= rq->rt.highest_prio) &&
            rq->rt.overloaded)
                push_rt_tasks(rq);
@@ -780,11 +779,6 @@ static void leave_domain_rt(struct rq *rq)
        if (rq->rt.overloaded)
                rt_clear_overload(rq);
 }
-
-#else /* CONFIG_SMP */
-# define schedule_tail_balance_rt(rq)  do { } while (0)
-# define schedule_balance_rt(rq, prev) do { } while (0)
-# define wakeup_balance_rt(rq, p)      do { } while (0)
 #endif /* CONFIG_SMP */
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p)
@@ -840,6 +834,9 @@ const struct sched_class rt_sched_class = {
        .set_cpus_allowed       = set_cpus_allowed_rt,
        .join_domain            = join_domain_rt,
        .leave_domain           = leave_domain_rt,
+       .pre_schedule           = pre_schedule_rt,
+       .post_schedule          = post_schedule_rt,
+       .task_wake_up           = task_wake_up_rt,
 #endif
 
        .set_curr_task          = set_curr_task_rt,