From 4df64c0bfb7e0e260d10ebc005f7d0ba1308eed7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 25 Jan 2008 21:08:15 +0100 Subject: [PATCH] sched: clean up find_lock_lowest_rq() clean up find_lock_lowest_rq(). Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index b8435fd47f7..0749c1837b1 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -438,12 +438,11 @@ static int find_lowest_rq(struct task_struct *task) } /* Will lock the rq it finds */ -static struct rq *find_lock_lowest_rq(struct task_struct *task, - struct rq *rq) +static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) { struct rq *lowest_rq = NULL; - int cpu; int tries; + int cpu; for (tries = 0; tries < RT_MAX_TRIES; tries++) { cpu = find_lowest_rq(task); @@ -462,9 +461,11 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || - !cpu_isset(lowest_rq->cpu, task->cpus_allowed) || + !cpu_isset(lowest_rq->cpu, + task->cpus_allowed) || task_running(rq, task) || !task->se.on_rq)) { + spin_unlock(&lowest_rq->lock); lowest_rq = NULL; break; -- 2.41.1