]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] mm: move_pte to remap ZERO_PAGE
authorNick Piggin <nickpiggin@yahoo.com.au>
Wed, 28 Sep 2005 04:45:18 +0000 (21:45 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 28 Sep 2005 14:46:40 +0000 (07:46 -0700)
Move the ZERO_PAGE remapping complexity to the move_pte macro in
asm-generic, have it conditionally depend on
__HAVE_ARCH_MULTIPLE_ZERO_PAGE, which gets defined for MIPS.

For architectures without __HAVE_ARCH_MULTIPLE_ZERO_PAGE, move_pte becomes
a noop.

From: Hugh Dickins <hugh@veritas.com>

Fix nasty little bug we've missed in Nick's mremap move ZERO_PAGE patch.
The "pte" at that point may be a swap entry or a pte_file entry: we must
check pte_present before perhaps corrupting such an entry.

Patch below against 2.6.14-rc2-mm1, but the same bug is in 2.6.14-rc2's
mm/mremap.c, and more dangerous there since it's affecting all arches: I
think the safest course is to send Nick's patch and Yoichi's build fix and
this fix (build tested) on to Linus - so only MIPS can be affected.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/asm-generic/pgtable.h
include/asm-mips/pgtable.h
mm/mremap.c

index f86c1e549466a0809c02fecc750cb5627ed08027..ff28c8b31f5844694de47bf5a8775c55b37d569c 100644 (file)
@@ -158,6 +158,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 #define lazy_mmu_prot_update(pte)      do { } while (0)
 #endif
 
+#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE
+#define move_pte(pte, prot, old_addr, new_addr)        (pte)
+#else
+#define move_pte(pte, prot, old_addr, new_addr)                                \
+({                                                                     \
+       pte_t newpte = (pte);                                           \
+       if (pte_present(pte) && pfn_valid(pte_pfn(pte)) &&              \
+                       pte_page(pte) == ZERO_PAGE(old_addr))           \
+               newpte = mk_pte(ZERO_PAGE(new_addr), (prot));           \
+       newpte;                                                         \
+})
+#endif
+
 /*
  * When walking page tables, get the address of the next boundary,
  * or the end address of the range if that comes earlier.  Although no
index cbd1672c94cb366db294898d470704c2707ac49d..eaf5d9b3a0e18d45d495d706a0dd5ad56bb3a3c6 100644 (file)
@@ -68,6 +68,8 @@ extern unsigned long zero_page_mask;
 #define ZERO_PAGE(vaddr) \
        (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
 
+#define __HAVE_ARCH_MULTIPLE_ZERO_PAGE
+
 extern void paging_init(void);
 
 /*
index a32fed454bd7c60e15da0c720b1631b55a3f8835..f343fc73a8bdd7f0ecd68fb1674438033c85cc49 100644 (file)
@@ -141,10 +141,10 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
                        if (dst) {
                                pte_t pte;
                                pte = ptep_clear_flush(vma, old_addr, src);
+
                                /* ZERO_PAGE can be dependant on virtual addr */
-                               if (pfn_valid(pte_pfn(pte)) &&
-                                       pte_page(pte) == ZERO_PAGE(old_addr))
-                                       pte = pte_wrprotect(mk_pte(ZERO_PAGE(new_addr), new_vma->vm_page_prot));
+                               pte = move_pte(pte, new_vma->vm_page_prot,
+                                                       old_addr, new_addr);
                                set_pte_at(mm, new_addr, dst, pte);
                        } else
                                error = -ENOMEM;