]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
SELinux fixups needed for preemptable RCU from -rt
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 22 Apr 2008 01:12:33 +0000 (18:12 -0700)
committerJames Morris <jmorris@namei.org>
Tue, 22 Apr 2008 05:37:23 +0000 (15:37 +1000)
The attached patch needs to move from -rt to mainline given preemptable RCU.
This patch fixes SELinux code that implicitly assumes that disabling
preemption prevents an RCU grace period from completing, an assumption that
is valid for Classic RCU, but not necessarily for preemptable RCU.  Explicit
rcu_read_lock() calls are thus added.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: James Morris <jmorris@namei.org>
security/selinux/avc.c
security/selinux/netif.c

index 1d69f6649bff81a8ac9bdc87d4a2bdc38c5e0763..95a8ef4a5073fbcde9f2fcec8a5301cf7db27eb2 100644 (file)
@@ -312,6 +312,7 @@ static inline int avc_reclaim_node(void)
                if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags))
                        continue;
 
+               rcu_read_lock();
                list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
                        if (atomic_dec_and_test(&node->ae.used)) {
                                /* Recently Unused */
@@ -319,11 +320,13 @@ static inline int avc_reclaim_node(void)
                                avc_cache_stats_incr(reclaims);
                                ecx++;
                                if (ecx >= AVC_CACHE_RECLAIM) {
+                                       rcu_read_unlock();
                                        spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
                                        goto out;
                                }
                        }
                }
+               rcu_read_unlock();
                spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
        }
 out:
@@ -821,8 +824,14 @@ int avc_ss_reset(u32 seqno)
 
        for (i = 0; i < AVC_CACHE_SLOTS; i++) {
                spin_lock_irqsave(&avc_cache.slots_lock[i], flag);
+               /*
+                * With preemptable RCU, the outer spinlock does not
+                * prevent RCU grace periods from ending.
+                */
+               rcu_read_lock();
                list_for_each_entry(node, &avc_cache.slots[i], list)
                        avc_node_delete(node);
+               rcu_read_unlock();
                spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag);
        }
 
index c658b84c31966b177e3c9cb3e980c5ad667898af..b4e14bc0bf32bf6d0a9ae948d697912ca11e8769 100644 (file)
@@ -239,11 +239,13 @@ static void sel_netif_kill(int ifindex)
 {
        struct sel_netif *netif;
 
+       rcu_read_lock();
        spin_lock_bh(&sel_netif_lock);
        netif = sel_netif_find(ifindex);
        if (netif)
                sel_netif_destroy(netif);
        spin_unlock_bh(&sel_netif_lock);
+       rcu_read_unlock();
 }
 
 /**