]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
coredump: move mm->core_waiters into struct core_state
authorOleg Nesterov <oleg@tv-sign.ru>
Fri, 25 Jul 2008 08:47:41 +0000 (01:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Jul 2008 17:53:39 +0000 (10:53 -0700)
Move mm->core_waiters into "struct core_state" allocated on stack.  This
shrinks mm_struct a little bit and allows further changes.

This patch mostly does s/core_waiters/core_state.  The only essential
change is that coredump_wait() must clear mm->core_state before return.

The coredump_wait()'s path is uglified and .text grows by 30 bytes, this
is fixed by the next patch.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/exec.c
include/linux/mm_types.h
kernel/exit.c
kernel/fork.c
kernel/signal.c

index 71734568f0188f1df9cb7f21349980c4df45395e..50de3aaff4d008921a62fb54ea4cc809babeb2d7 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -722,12 +722,10 @@ static int exec_mmap(struct mm_struct *mm)
                 * Make sure that if there is a core dump in progress
                 * for the old mm, we get out and die instead of going
                 * through with the exec.  We must hold mmap_sem around
-                * checking core_waiters and changing tsk->mm.  The
-                * core-inducing thread will increment core_waiters for
-                * each thread whose ->mm == old_mm.
+                * checking core_state and changing tsk->mm.
                 */
                down_read(&old_mm->mmap_sem);
-               if (unlikely(old_mm->core_waiters)) {
+               if (unlikely(old_mm->core_state)) {
                        up_read(&old_mm->mmap_sem);
                        return -EINTR;
                }
@@ -1514,7 +1512,7 @@ static void zap_process(struct task_struct *start)
        t = start;
        do {
                if (t != current && t->mm) {
-                       t->mm->core_waiters++;
+                       t->mm->core_state->nr_threads++;
                        sigaddset(&t->pending.signal, SIGKILL);
                        signal_wake_up(t, 1);
                }
@@ -1538,11 +1536,11 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        if (err)
                return err;
 
-       if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
+       if (atomic_read(&mm->mm_users) == mm->core_state->nr_threads + 1)
                goto done;
        /*
         * We should find and kill all tasks which use this mm, and we should
-        * count them correctly into mm->core_waiters. We don't take tasklist
+        * count them correctly into ->nr_threads. We don't take tasklist
         * lock, but this is safe wrt:
         *
         * fork:
@@ -1590,7 +1588,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        }
        rcu_read_unlock();
 done:
-       return mm->core_waiters;
+       return mm->core_state->nr_threads;
 }
 
 static int coredump_wait(int exit_code)
@@ -1603,9 +1601,12 @@ static int coredump_wait(int exit_code)
 
        init_completion(&mm->core_done);
        init_completion(&core_state.startup);
+       core_state.nr_threads = 0;
        mm->core_state = &core_state;
 
        core_waiters = zap_threads(tsk, mm, exit_code);
+       if (core_waiters < 0)
+               mm->core_state = NULL;
        up_write(&mm->mmap_sem);
 
        if (unlikely(core_waiters < 0))
@@ -1623,8 +1624,8 @@ static int coredump_wait(int exit_code)
 
        if (core_waiters)
                wait_for_completion(&core_state.startup);
+       mm->core_state = NULL;
 fail:
-       BUG_ON(mm->core_waiters);
        return core_waiters;
 }
 
@@ -1702,7 +1703,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
        /*
         * If another thread got here first, or we are not dumpable, bail out.
         */
-       if (mm->core_waiters || !get_dumpable(mm)) {
+       if (mm->core_state || !get_dumpable(mm)) {
                up_write(&mm->mmap_sem);
                goto fail;
        }
index 97819efd233348370f4d16585299989cce77bacd..c0b1747b61a5526c3aed33bb1975d46a50c7fe3b 100644 (file)
@@ -160,6 +160,7 @@ struct vm_area_struct {
 };
 
 struct core_state {
+       int nr_threads;
        struct completion startup;
 };
 
@@ -179,7 +180,6 @@ struct mm_struct {
        atomic_t mm_users;                      /* How many users with user space? */
        atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
        int map_count;                          /* number of VMAs */
-       int core_waiters;
        struct rw_semaphore mmap_sem;
        spinlock_t page_table_lock;             /* Protects page tables and some counters */
 
index f7fa21dbced4b17d86347582a410e97c2f5c105c..988e232254e9ace0cf4a4afae854c256102fb849 100644 (file)
@@ -670,16 +670,16 @@ static void exit_mm(struct task_struct * tsk)
                return;
        /*
         * Serialize with any possible pending coredump.
-        * We must hold mmap_sem around checking core_waiters
+        * We must hold mmap_sem around checking core_state
         * and clearing tsk->mm.  The core-inducing thread
-        * will increment core_waiters for each thread in the
+        * will increment ->nr_threads for each thread in the
         * group with ->mm != NULL.
         */
        down_read(&mm->mmap_sem);
-       if (mm->core_waiters) {
+       if (mm->core_state) {
                up_read(&mm->mmap_sem);
                down_write(&mm->mmap_sem);
-               if (!--mm->core_waiters)
+               if (!--mm->core_state->nr_threads)
                        complete(&mm->core_state->startup);
                up_write(&mm->mmap_sem);
 
index eeaec6893b0d29f75a33efb7194d2b41305d7387..813d5c89b9d520393d61038edbcf65e9813ab465 100644 (file)
@@ -400,7 +400,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
        INIT_LIST_HEAD(&mm->mmlist);
        mm->flags = (current->mm) ? current->mm->flags
                                  : MMF_DUMP_FILTER_DEFAULT;
-       mm->core_waiters = 0;
+       mm->core_state = NULL;
        mm->nr_ptes = 0;
        set_mm_counter(mm, file_rss, 0);
        set_mm_counter(mm, anon_rss, 0);
index 39c1706edf031264f1d237f3cd1127b7ceaee92c..5c7b7eaa0dc6dee6556c7dbbd5814945be87e1bd 100644 (file)
@@ -1480,10 +1480,10 @@ static inline int may_ptrace_stop(void)
         * is a deadlock situation, and pointless because our tracer
         * is dead so don't allow us to stop.
         * If SIGKILL was already sent before the caller unlocked
-        * ->siglock we must see ->core_waiters != 0. Otherwise it
+        * ->siglock we must see ->core_state != NULL. Otherwise it
         * is safe to enter schedule().
         */
-       if (unlikely(current->mm->core_waiters) &&
+       if (unlikely(current->mm->core_state) &&
            unlikely(current->mm == current->parent->mm))
                return 0;