]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
xen: efficiently support a holey p2m table
authorJeremy Fitzhardinge <jeremy@goop.org>
Mon, 26 May 2008 22:31:20 +0000 (23:31 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 27 May 2008 08:11:37 +0000 (10:11 +0200)
When using sparsemem and memory hotplug, the kernel's pseudo-physical
address space can be discontigious.  Previously this was dealt with by
having the upper parts of the radix tree stubbed off.  Unfortunately,
this is incompatible with save/restore, which requires a complete p2m
table.

The solution is to have a special distinguished all-invalid p2m leaf
page, which we can point all the hole areas at.  This allows the tools
to see a complete p2m table, but it only costs a page for all memory
holes.

It also simplifies the code since it removes a few special cases.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/xen/mmu.c

index 644232aa7bfb69535ffcf446b42f5e08cb39bab8..da7b45b05066569cb440018843e47af0084bd371 100644 (file)
 #include "mmu.h"
 
 #define P2M_ENTRIES_PER_PAGE   (PAGE_SIZE / sizeof(unsigned long))
+#define TOP_ENTRIES            (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
 
-static unsigned long *p2m_top[MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE];
+/* Placeholder for holes in the address space */
+static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
+       __attribute__((section(".data.page_aligned"))) =
+               { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+
+ /* Array of pointers to pages containing p2m entries */
+static unsigned long *p2m_top[TOP_ENTRIES]
+       __attribute__((section(".data.page_aligned"))) =
+               { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
 
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
@@ -92,9 +101,6 @@ unsigned long get_phys_to_machine(unsigned long pfn)
                return INVALID_P2M_ENTRY;
 
        topidx = p2m_top_index(pfn);
-       if (p2m_top[topidx] == NULL)
-               return INVALID_P2M_ENTRY;
-
        idx = p2m_index(pfn);
        return p2m_top[topidx][idx];
 }
@@ -110,7 +116,7 @@ static void alloc_p2m(unsigned long **pp)
        for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
                p[i] = INVALID_P2M_ENTRY;
 
-       if (cmpxchg(pp, NULL, p) != NULL)
+       if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
                free_page((unsigned long)p);
 }
 
@@ -129,7 +135,7 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
        }
 
        topidx = p2m_top_index(pfn);
-       if (p2m_top[topidx] == NULL) {
+       if (p2m_top[topidx] == p2m_missing) {
                /* no need to allocate a page to store an invalid entry */
                if (mfn == INVALID_P2M_ENTRY)
                        return;