]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] i386: PARAVIRT: Allow paravirt backend to choose kernel PMD sharing
authorJeremy Fitzhardinge <jeremy@goop.org>
Wed, 2 May 2007 17:27:13 +0000 (19:27 +0200)
committerAndi Kleen <andi@basil.nowhere.org>
Wed, 2 May 2007 17:27:13 +0000 (19:27 +0200)
Normally when running in PAE mode, the 4th PMD maps the kernel address space,
which can be shared among all processes (since they all need the same kernel
mappings).

Xen, however, does not allow guests to have the kernel pmd shared between page
tables, so parameterize pgtable.c to allow both modes of operation.

There are several side-effects of this.  One is that vmalloc will update the
kernel address space mappings, and those updates need to be propagated into
all processes if the kernel mappings are not intrinsically shared.  In the
non-PAE case, this is done by maintaining a pgd_list of all processes; this
list is used when all process pagetables must be updated.  pgd_list is
threaded via otherwise unused entries in the page structure for the pgd, which
means that the pgd must be page-sized for this to work.

Normally the PAE pgd is only 4x64 byte entries large, but Xen requires the PAE
pgd to page aligned anyway, so this patch forces the pgd to be page
aligned+sized when the kernel pmd is unshared, to accomodate both these
requirements.

Also, since there may be several distinct kernel pmds (if the user/kernel
split is below 3G), there's no point in allocating them from a slab cache;
they're just allocated with get_free_page and initialized appropriately.  (Of
course the could be cached if there is just a single kernel pmd - which is the
default with a 3G user/kernel split - but it doesn't seem worthwhile to add
yet another case into this code).

[ Many thanks to wli for review comments. ]

Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Christoph Lameter <clameter@sgi.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/i386/kernel/paravirt.c
arch/i386/mm/fault.c
arch/i386/mm/init.c
arch/i386/mm/pageattr.c
arch/i386/mm/pgtable.c
include/asm-i386/paravirt.h
include/asm-i386/pgtable-2level-defs.h
include/asm-i386/pgtable-2level.h
include/asm-i386/pgtable-3level-defs.h
include/asm-i386/pgtable-3level.h
include/asm-i386/pgtable.h

index 47d075bdfb95cc1a85ae415d99079880db24ccee..2040a831d5b3b23a2c0c81fbcc3b963356e7787e 100644 (file)
@@ -132,6 +132,7 @@ struct paravirt_ops paravirt_ops = {
        .name = "bare hardware",
        .paravirt_enabled = 0,
        .kernel_rpl = 0,
+       .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
 
        .patch = native_patch,
        .banner = default_banner,
index c6a0a06258e61e3f448723dcf4c2912cbdc6e766..f534c29e80b265fcc03f23f0f220362b1d79979e 100644 (file)
@@ -603,7 +603,6 @@ do_sigbus:
        force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
 }
 
-#ifndef CONFIG_X86_PAE
 void vmalloc_sync_all(void)
 {
        /*
@@ -616,6 +615,9 @@ void vmalloc_sync_all(void)
        static unsigned long start = TASK_SIZE;
        unsigned long address;
 
+       if (SHARED_KERNEL_PMD)
+               return;
+
        BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
        for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
                if (!test_bit(pgd_index(address), insync)) {
@@ -638,4 +640,3 @@ void vmalloc_sync_all(void)
                        start = address + PGDIR_SIZE;
        }
 }
-#endif
index e8545dcf06c559991e8a52309ccc3c6d0851339d..dbe16f63a5664d551f5f3d9e59383b7328e5996a 100644 (file)
@@ -745,6 +745,8 @@ struct kmem_cache *pmd_cache;
 
 void __init pgtable_cache_init(void)
 {
+       size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t);
+
        if (PTRS_PER_PMD > 1) {
                pmd_cache = kmem_cache_create("pmd",
                                        PTRS_PER_PMD*sizeof(pmd_t),
@@ -754,13 +756,23 @@ void __init pgtable_cache_init(void)
                                        NULL);
                if (!pmd_cache)
                        panic("pgtable_cache_init(): cannot create pmd cache");
+
+               if (!SHARED_KERNEL_PMD) {
+                       /* If we're in PAE mode and have a non-shared
+                          kernel pmd, then the pgd size must be a
+                          page size.  This is because the pgd_list
+                          links through the page structure, so there
+                          can only be one pgd per page for this to
+                          work. */
+                       pgd_size = PAGE_SIZE;
+               }
        }
        pgd_cache = kmem_cache_create("pgd",
-                               PTRS_PER_PGD*sizeof(pgd_t),
-                               PTRS_PER_PGD*sizeof(pgd_t),
+                               pgd_size,
+                               pgd_size,
                                0,
                                pgd_ctor,
-                               PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
+                               (!SHARED_KERNEL_PMD) ? pgd_dtor : NULL);
        if (!pgd_cache)
                panic("pgtable_cache_init(): Cannot create pgd cache");
 }
index ea6b6d4a0a2ac7a4f6388f5d4b1784e806fc9740..47bd477c8eccb4e2691f9a21fc3e09e3e7beb839 100644 (file)
@@ -91,7 +91,7 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
        unsigned long flags;
 
        set_pte_atomic(kpte, pte);      /* change init_mm */
-       if (PTRS_PER_PMD > 1)
+       if (SHARED_KERNEL_PMD)
                return;
 
        spin_lock_irqsave(&pgd_lock, flags);
index 99c09edc3dbb4bfe720e30ec5f7518f99292aa80..9a96c1647428cf76a0e600b7845732b404dfee98 100644 (file)
@@ -232,42 +232,92 @@ static inline void pgd_list_del(pgd_t *pgd)
                set_page_private(next, (unsigned long)pprev);
 }
 
+#if (PTRS_PER_PMD == 1)
+/* Non-PAE pgd constructor */
 void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
        unsigned long flags;
 
-       if (PTRS_PER_PMD == 1) {
-               memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-               spin_lock_irqsave(&pgd_lock, flags);
-       }
+       /* !PAE, no pagetable sharing */
+       memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
+
+       spin_lock_irqsave(&pgd_lock, flags);
 
+       /* must happen under lock */
        clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
                        swapper_pg_dir + USER_PTRS_PER_PGD,
                        KERNEL_PGD_PTRS);
-
-       if (PTRS_PER_PMD > 1)
-               return;
-
-       /* must happen under lock */
        paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
-                       __pa(swapper_pg_dir) >> PAGE_SHIFT,
-                       USER_PTRS_PER_PGD, PTRS_PER_PGD - USER_PTRS_PER_PGD);
-
+                               __pa(swapper_pg_dir) >> PAGE_SHIFT,
+                               USER_PTRS_PER_PGD,
+                               KERNEL_PGD_PTRS);
        pgd_list_add(pgd);
        spin_unlock_irqrestore(&pgd_lock, flags);
 }
+#else  /* PTRS_PER_PMD > 1 */
+/* PAE pgd constructor */
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
+{
+       /* PAE, kernel PMD may be shared */
+
+       if (SHARED_KERNEL_PMD) {
+               clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
+                               swapper_pg_dir + USER_PTRS_PER_PGD,
+                               KERNEL_PGD_PTRS);
+       } else {
+               unsigned long flags;
+
+               memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
+               spin_lock_irqsave(&pgd_lock, flags);
+               pgd_list_add(pgd);
+               spin_unlock_irqrestore(&pgd_lock, flags);
+       }
+}
+#endif /* PTRS_PER_PMD */
 
-/* never called when PTRS_PER_PMD > 1 */
 void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
        unsigned long flags; /* can be called from interrupt context */
 
+       BUG_ON(SHARED_KERNEL_PMD);
+
        paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
        spin_lock_irqsave(&pgd_lock, flags);
        pgd_list_del(pgd);
        spin_unlock_irqrestore(&pgd_lock, flags);
 }
 
+#define UNSHARED_PTRS_PER_PGD                          \
+       (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
+
+/* If we allocate a pmd for part of the kernel address space, then
+   make sure its initialized with the appropriate kernel mappings.
+   Otherwise use a cached zeroed pmd.  */
+static pmd_t *pmd_cache_alloc(int idx)
+{
+       pmd_t *pmd;
+
+       if (idx >= USER_PTRS_PER_PGD) {
+               pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
+
+               if (pmd)
+                       memcpy(pmd,
+                              (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
+                              sizeof(pmd_t) * PTRS_PER_PMD);
+       } else
+               pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+
+       return pmd;
+}
+
+static void pmd_cache_free(pmd_t *pmd, int idx)
+{
+       if (idx >= USER_PTRS_PER_PGD)
+               free_page((unsigned long)pmd);
+       else
+               kmem_cache_free(pmd_cache, pmd);
+}
+
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        int i;
@@ -276,10 +326,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
        if (PTRS_PER_PMD == 1 || !pgd)
                return pgd;
 
-       for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-               pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+       for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
+               pmd_t *pmd = pmd_cache_alloc(i);
+
                if (!pmd)
                        goto out_oom;
+
                paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
                set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
        }
@@ -290,7 +342,7 @@ out_oom:
                pgd_t pgdent = pgd[i];
                void* pmd = (void *)__va(pgd_val(pgdent)-1);
                paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-               kmem_cache_free(pmd_cache, pmd);
+               pmd_cache_free(pmd, i);
        }
        kmem_cache_free(pgd_cache, pgd);
        return NULL;
@@ -302,11 +354,11 @@ void pgd_free(pgd_t *pgd)
 
        /* in the PAE case user pgd entries are overwritten before usage */
        if (PTRS_PER_PMD > 1)
-               for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
+               for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
                        pgd_t pgdent = pgd[i];
                        void* pmd = (void *)__va(pgd_val(pgdent)-1);
                        paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-                       kmem_cache_free(pmd_cache, pmd);
+                       pmd_cache_free(pmd, i);
                }
        /* in the non-PAE case, free_pgtables() clears user pgd entries */
        kmem_cache_free(pgd_cache, pgd);
index c49b44cdd8ee222ca9db46587ae3d27ce58551ea..f93599dc7756d1b1237ccf35cd31f1339267acf2 100644 (file)
@@ -35,6 +35,7 @@ struct desc_struct;
 struct paravirt_ops
 {
        unsigned int kernel_rpl;
+       int shared_kernel_pmd;
        int paravirt_enabled;
        const char *name;
 
index 02518079f816962d59c00ea22812c920f16a954b..0f71c9f13da4a406e8f6e6e7d3225fe30e72c983 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
 #define _I386_PGTABLE_2LEVEL_DEFS_H
 
+#define SHARED_KERNEL_PMD      0
+
 /*
  * traditional i386 two-level paging structure:
  */
index 043a2bcfa86a11e0a1ac19df6c39f465a1d630c5..781fe4bcc96292292834e7c855df5141bc4687dc 100644 (file)
@@ -82,6 +82,4 @@ static inline int pte_exec_kernel(pte_t pte)
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { (pte).pte_low })
 #define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
 
-void vmalloc_sync_all(void);
-
 #endif /* _I386_PGTABLE_2LEVEL_H */
index eb3a1ea88671a1f2bc96600d755e7e9c9c9cfb8a..c0df89f66e8b0a49e81f18e93facf70d63de365b 100644 (file)
@@ -1,6 +1,12 @@
 #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
 #define _I386_PGTABLE_3LEVEL_DEFS_H
 
+#ifdef CONFIG_PARAVIRT
+#define SHARED_KERNEL_PMD      (paravirt_ops.shared_kernel_pmd)
+#else
+#define SHARED_KERNEL_PMD      1
+#endif
+
 /*
  * PGDIR_SHIFT determines what a top-level page table entry can map
  */
index be6017f37a9163ca75b0614ede2a5f2a496f710e..664bfee5a2f20f2f1b63a31413c273cbf26b5e35 100644 (file)
@@ -200,6 +200,4 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 
 #define __pmd_free_tlb(tlb, x)         do { } while (0)
 
-#define vmalloc_sync_all() ((void)0)
-
 #endif /* _I386_PGTABLE_3LEVEL_H */
index 0790ad6ed4404e499bd831e749b9ece0be3bcb3f..5b88a6a1278ed2f01961bd4c7b8b52daa5a078c3 100644 (file)
@@ -243,6 +243,8 @@ static inline pte_t pte_mkyoung(pte_t pte)  { (pte).pte_low |= _PAGE_ACCESSED; re
 static inline pte_t pte_mkwrite(pte_t pte)     { (pte).pte_low |= _PAGE_RW; return pte; }
 static inline pte_t pte_mkhuge(pte_t pte)      { (pte).pte_low |= _PAGE_PSE; return pte; }
 
+extern void vmalloc_sync_all(void);
+
 #ifdef CONFIG_X86_PAE
 # include <asm/pgtable-3level.h>
 #else