]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
Preempt-RCU: fix rcu_barrier for preemptive environment.
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Fri, 25 Jan 2008 20:08:24 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:24 +0000 (21:08 +0100)
Fix rcu_barrier() to work properly in preemptive kernel environment.
Also, the ordering of callback must be preserved while moving
callbacks to another CPU during CPU hotplug.

Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Dipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/rcuclassic.c
kernel/rcupdate.c

index 18369e3386e23f49095648bb4937ef9efaf5ac43..ce0cf16cab67436037ae3eceac791bb13c5090f3 100644 (file)
@@ -371,9 +371,9 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp,
        if (rcp->cur != rcp->completed)
                cpu_quiet(rdp->cpu, rcp);
        spin_unlock_bh(&rcp->lock);
+       rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
        rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
        rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
-       rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
 }
 
 static void rcu_offline_cpu(int cpu)
index 0ccd0095ebdc574693556388b8e38edc622eb2a3..760dfc233a00ae103fdce2bfe335ad9b14ca39cc 100644 (file)
@@ -115,7 +115,17 @@ void rcu_barrier(void)
        mutex_lock(&rcu_barrier_mutex);
        init_completion(&rcu_barrier_completion);
        atomic_set(&rcu_barrier_cpu_count, 0);
+       /*
+        * The queueing of callbacks in all CPUs must be atomic with
+        * respect to RCU, otherwise one CPU may queue a callback,
+        * wait for a grace period, decrement barrier count and call
+        * complete(), while other CPUs have not yet queued anything.
+        * So, we need to make sure that grace periods cannot complete
+        * until all the callbacks are queued.
+        */
+       rcu_read_lock();
        on_each_cpu(rcu_barrier_func, NULL, 0, 1);
+       rcu_read_unlock();
        wait_for_completion(&rcu_barrier_completion);
        mutex_unlock(&rcu_barrier_mutex);
 }