]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
uids: merge multiple error paths in alloc_uid() into one
authorPavel Emelyanov <xemul@openvz.org>
Fri, 25 Jan 2008 20:08:26 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:26 +0000 (21:08 +0100)
There are already 4 error paths in alloc_uid() that do incremental rollbacks.
I think it's time to merge them.  This costs us 8 lines of code :)

Maybe it would be better to merge this patch with the previous one, but I
remember that some time ago I sent a similar patch (fixing the error path and
cleaning it), but I was told to make two patches in such cases.

Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Acked-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/user.c

index ab4fd706993b1e22dccae4d3fafcf0cf0f48fbb3..bc1c48d35cb32726623b37b47a09d4325aaf4074 100644 (file)
@@ -319,7 +319,7 @@ void free_uid(struct user_struct *up)
 struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
 {
        struct hlist_head *hashent = uidhashentry(ns, uid);
-       struct user_struct *up;
+       struct user_struct *up, *new;
 
        /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
         * atomic.
@@ -331,13 +331,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
        spin_unlock_irq(&uidhash_lock);
 
        if (!up) {
-               struct user_struct *new;
-
                new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
-               if (!new) {
-                       uids_mutex_unlock();
-                       return NULL;
-               }
+               if (!new)
+                       goto out_unlock;
 
                new->uid = uid;
                atomic_set(&new->__count, 1);
@@ -353,28 +349,14 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
 #endif
                new->locked_shm = 0;
 
-               if (alloc_uid_keyring(new, current) < 0) {
-                       kmem_cache_free(uid_cachep, new);
-                       uids_mutex_unlock();
-                       return NULL;
-               }
+               if (alloc_uid_keyring(new, current) < 0)
+                       goto out_free_user;
 
-               if (sched_create_user(new) < 0) {
-                       key_put(new->uid_keyring);
-                       key_put(new->session_keyring);
-                       kmem_cache_free(uid_cachep, new);
-                       uids_mutex_unlock();
-                       return NULL;
-               }
+               if (sched_create_user(new) < 0)
+                       goto out_put_keys;
 
-               if (uids_user_create(new)) {
-                       sched_destroy_user(new);
-                       key_put(new->uid_keyring);
-                       key_put(new->session_keyring);
-                       kmem_cache_free(uid_cachep, new);
-                       uids_mutex_unlock();
-                       return NULL;
-               }
+               if (uids_user_create(new))
+                       goto out_destoy_sched;
 
                /*
                 * Before adding this, check whether we raced
@@ -402,6 +384,17 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
        uids_mutex_unlock();
 
        return up;
+
+out_destoy_sched:
+       sched_destroy_user(new);
+out_put_keys:
+       key_put(new->uid_keyring);
+       key_put(new->session_keyring);
+out_free_user:
+       kmem_cache_free(uid_cachep, new);
+out_unlock:
+       uids_mutex_unlock();
+       return NULL;
 }
 
 void switch_uid(struct user_struct *new_user)