]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
mm: fix atomic_t overflow in vm
authorAlan Cox <alan@redhat.com>
Fri, 23 May 2008 20:04:31 +0000 (13:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 24 May 2008 16:56:09 +0000 (09:56 -0700)
The atomic_t type is 32bit but a 64bit system can have more than 2^32
pages of virtual address space available.  Without this we overflow on
ludicrously large mappings

Signed-off-by: Alan Cox <alan@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/proc_misc.c
include/linux/mman.h
mm/mmap.c
mm/nommu.c
mm/swap.c

index 74a323d2b850884f06c2206fc4d288e957c8bef6..32dc14cd890058dcdd1424a3c1d8ff5c3134c28e 100644 (file)
@@ -139,7 +139,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
 #define K(x) ((x) << (PAGE_SHIFT - 10))
        si_meminfo(&i);
        si_swapinfo(&i);
-       committed = atomic_read(&vm_committed_space);
+       committed = atomic_long_read(&vm_committed_space);
        allowed = ((totalram_pages - hugetlb_total_pages())
                * sysctl_overcommit_ratio / 100) + total_swap_pages;
 
index 87920a0852a37e8945cdfd63bad2f6aa5f58110a..dab8892e6ff124817f7312b23e8055d3a50b2ce8 100644 (file)
 
 extern int sysctl_overcommit_memory;
 extern int sysctl_overcommit_ratio;
-extern atomic_t vm_committed_space;
+extern atomic_long_t vm_committed_space;
 
 #ifdef CONFIG_SMP
 extern void vm_acct_memory(long pages);
 #else
 static inline void vm_acct_memory(long pages)
 {
-       atomic_add(pages, &vm_committed_space);
+       atomic_long_add(pages, &vm_committed_space);
 }
 #endif
 
index fac66337da2a3bcd3a003ba9d33096938c17fb57..669499e7c2f533291f21157da4da4978f3f6ea69 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(vm_get_page_prot);
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50;      /* default is 50% */
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
+atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
 
 /*
  * Check that a process has enough memory to allocate a new virtual
@@ -177,7 +177,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
         * cast `allowed' as a signed long because vm_committed_space
         * sometimes has a negative value
         */
-       if (atomic_read(&vm_committed_space) < (long)allowed)
+       if (atomic_long_read(&vm_committed_space) < (long)allowed)
                return 0;
 error:
        vm_unacct_memory(pages);
index ef8c62cec697a06495b13f2658aff4e30000065c..dca93fcb8b7a6517ad041221343bddf61e572644 100644 (file)
@@ -39,7 +39,7 @@ struct page *mem_map;
 unsigned long max_mapnr;
 unsigned long num_physpages;
 unsigned long askedalloc, realalloc;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
+atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50; /* default is 50% */
 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
@@ -1410,7 +1410,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
         * cast `allowed' as a signed long because vm_committed_space
         * sometimes has a negative value
         */
-       if (atomic_read(&vm_committed_space) < (long)allowed)
+       if (atomic_long_read(&vm_committed_space) < (long)allowed)
                return 0;
 error:
        vm_unacct_memory(pages);
index 91e194445a5eae53ee3ae97e3f616cdcb7d442d0..45c9f25a8a3be7e2dccc6a6f807e0eaeade1e697 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -503,7 +503,7 @@ void vm_acct_memory(long pages)
        local = &__get_cpu_var(committed_space);
        *local += pages;
        if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
-               atomic_add(*local, &vm_committed_space);
+               atomic_long_add(*local, &vm_committed_space);
                *local = 0;
        }
        preempt_enable();
@@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
 
        committed = &per_cpu(committed_space, (long)hcpu);
        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               atomic_add(*committed, &vm_committed_space);
+               atomic_long_add(*committed, &vm_committed_space);
                *committed = 0;
                drain_cpu_pagevecs((long)hcpu);
        }