]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] memory hotplug: i386 addition functions
authorDave Hansen <haveblue@us.ibm.com>
Sun, 30 Oct 2005 01:16:57 +0000 (18:16 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 30 Oct 2005 04:40:45 +0000 (21:40 -0700)
Adds the necessary for non-NUMA hot-add of highmem to an existing zone on
i386.

Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/mm/discontig.c
arch/i386/mm/init.c

index 244d8ec66be2a473361bf00d81e81ba53cdf9fe0..c4af9638dbfa133adacfabeb7b112167cbe63baa 100644 (file)
@@ -98,7 +98,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
 
 extern unsigned long find_max_low_pfn(void);
 extern void find_max_pfn(void);
-extern void one_highpage_init(struct page *, int, int);
+extern void add_one_highpage_init(struct page *, int, int);
 
 extern struct e820map e820;
 extern unsigned long init_pg_tables_end;
@@ -427,7 +427,7 @@ void __init set_highmem_pages_init(int bad_ppro)
                        if (!pfn_valid(node_pfn))
                                continue;
                        page = pfn_to_page(node_pfn);
-                       one_highpage_init(page, node_pfn, bad_ppro);
+                       add_one_highpage_init(page, node_pfn, bad_ppro);
                }
        }
        totalram_pages += totalhigh_pages;
index 2ebaf75f732e92e8e901ea408e490718794b8821..542d9298da5e91571bf75f45152e8901d473a684 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
 #include <linux/efi.h>
+#include <linux/memory_hotplug.h>
 
 #include <asm/processor.h>
 #include <asm/system.h>
@@ -266,17 +267,46 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
        pkmap_page_table = pte; 
 }
 
-void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
+void __devinit free_new_highpage(struct page *page)
+{
+       set_page_count(page, 1);
+       __free_page(page);
+       totalhigh_pages++;
+}
+
+void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
 {
        if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
                ClearPageReserved(page);
-               set_page_count(page, 1);
-               __free_page(page);
-               totalhigh_pages++;
+               free_new_highpage(page);
        } else
                SetPageReserved(page);
 }
 
+static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
+{
+       free_new_highpage(page);
+       totalram_pages++;
+#ifdef CONFIG_FLATMEM
+       max_mapnr = max(pfn, max_mapnr);
+#endif
+       num_physpages++;
+       return 0;
+}
+
+/*
+ * Not currently handling the NUMA case.
+ * Assuming single node and all memory that
+ * has been added dynamically that would be
+ * onlined here is in HIGHMEM
+ */
+void online_page(struct page *page)
+{
+       ClearPageReserved(page);
+       add_one_highpage_hotplug(page, page_to_pfn(page));
+}
+
+
 #ifdef CONFIG_NUMA
 extern void set_highmem_pages_init(int);
 #else
@@ -284,7 +314,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
 {
        int pfn;
        for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
-               one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
+               add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
        totalram_pages += totalhigh_pages;
 }
 #endif /* CONFIG_FLATMEM */
@@ -615,6 +645,28 @@ void __init mem_init(void)
 #endif
 }
 
+/*
+ * this is for the non-NUMA, single node SMP system case.
+ * Specifically, in the case of x86, we will always add
+ * memory to the highmem for now.
+ */
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+int add_memory(u64 start, u64 size)
+{
+       struct pglist_data *pgdata = &contig_page_data;
+       struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long nr_pages = size >> PAGE_SHIFT;
+
+       return __add_pages(zone, start_pfn, nr_pages);
+}
+
+int remove_memory(u64 start, u64 size)
+{
+       return -EINVAL;
+}
+#endif
+
 kmem_cache_t *pgd_cache;
 kmem_cache_t *pmd_cache;