]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86, mm: use add_highpages_with_active_regions() for high pages init v2
authorYinghai Lu <yhlu.kernel@gmail.com>
Sun, 15 Jun 2008 01:32:52 +0000 (18:32 -0700)
committerIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 08:37:25 +0000 (10:37 +0200)
use early_node_map to init high pages, so we can remove page_is_ram() and
page_is_reserved_early() in the big loop with add_one_highpage

also remove page_is_reserved_early(), it is not needed anymore.

v2: fix the build of other platforms

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/e820.c
arch/x86/mm/discontig_32.c
arch/x86/mm/init_32.c
include/asm-x86/e820.h
include/asm-x86/highmem.h
include/linux/mm.h
mm/page_alloc.c

index 5051ce744b4e93b2cefcfd4c82b477d568feace3..ed46b7a6bc1375e766b62f1fc45630d48a089286 100644 (file)
@@ -612,17 +612,6 @@ void __init free_early(u64 start, u64 end)
        early_res[j - 1].end = 0;
 }
 
-int __init page_is_reserved_early(unsigned long pagenr)
-{
-       u64 start = (u64)pagenr << PAGE_SHIFT;
-       int i;
-       struct early_res *r;
-
-       i = find_overlapped_early(start, start + PAGE_SIZE);
-       r = &early_res[i];
-       return (i < MAX_EARLY_RES && r->end);
-}
-
 void __init early_res_to_bootmem(u64 start, u64 end)
 {
        int i;
index c3f119e99e0def59f43c7767152ea2562dbdcf92..7c4d0255f8d8b1d1fcc94a510e5b1d0b2da46af4 100644 (file)
@@ -100,7 +100,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
 #endif
 
 extern unsigned long find_max_low_pfn(void);
-extern void add_one_highpage_init(struct page *, int, int);
 extern unsigned long highend_pfn, highstart_pfn;
 
 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
@@ -432,10 +431,10 @@ void __init set_highmem_pages_init(int bad_ppro)
 {
 #ifdef CONFIG_HIGHMEM
        struct zone *zone;
-       struct page *page;
+       int nid;
 
        for_each_zone(zone) {
-               unsigned long node_pfn, zone_start_pfn, zone_end_pfn;
+               unsigned long zone_start_pfn, zone_end_pfn;
 
                if (!is_highmem(zone))
                        continue;
@@ -443,16 +442,12 @@ void __init set_highmem_pages_init(int bad_ppro)
                zone_start_pfn = zone->zone_start_pfn;
                zone_end_pfn = zone_start_pfn + zone->spanned_pages;
 
+               nid = zone_to_nid(zone);
                printk("Initializing %s for node %d (%08lx:%08lx)\n",
-                               zone->name, zone_to_nid(zone),
-                               zone_start_pfn, zone_end_pfn);
-
-               for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {
-                       if (!pfn_valid(node_pfn))
-                               continue;
-                       page = pfn_to_page(node_pfn);
-                       add_one_highpage_init(page, node_pfn, bad_ppro);
-               }
+                               zone->name, nid, zone_start_pfn, zone_end_pfn);
+
+               add_highpages_with_active_regions(nid, zone_start_pfn,
+                                zone_end_pfn, bad_ppro);
        }
        totalram_pages += totalhigh_pages;
 #endif
index abadb1da70df63f574594a39f28e51cb794df301..ba07a489230e7fe1a02423b58423b0fcc1b9a24a 100644 (file)
@@ -287,10 +287,10 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
        pkmap_page_table = pte;
 }
 
-void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
+static void __init
+add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
 {
-       if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn)) &&
-           !page_is_reserved_early(pfn)) {
+       if (!(bad_ppro && page_kills_ppro(pfn))) {
                ClearPageReserved(page);
                init_page_count(page);
                __free_page(page);
@@ -299,18 +299,58 @@ void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
                SetPageReserved(page);
 }
 
+struct add_highpages_data {
+       unsigned long start_pfn;
+       unsigned long end_pfn;
+       int bad_ppro;
+};
+
+static void __init add_highpages_work_fn(unsigned long start_pfn,
+                                        unsigned long end_pfn, void *datax)
+{
+       int node_pfn;
+       struct page *page;
+       unsigned long final_start_pfn, final_end_pfn;
+       struct add_highpages_data *data;
+       int bad_ppro;
+
+       data = (struct add_highpages_data *)datax;
+       bad_ppro = data->bad_ppro;
+
+       final_start_pfn = max(start_pfn, data->start_pfn);
+       final_end_pfn = min(end_pfn, data->end_pfn);
+       if (final_start_pfn >= final_end_pfn)
+               return;
+
+       for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
+            node_pfn++) {
+               if (!pfn_valid(node_pfn))
+                       continue;
+               page = pfn_to_page(node_pfn);
+               add_one_highpage_init(page, node_pfn, bad_ppro);
+       }
+
+}
+
+void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
+                                             unsigned long end_pfn,
+                                             int bad_ppro)
+{
+       struct add_highpages_data data;
+
+       data.start_pfn = start_pfn;
+       data.end_pfn = end_pfn;
+       data.bad_ppro = bad_ppro;
+
+       work_with_active_regions(nid, add_highpages_work_fn, &data);
+}
+
 #ifndef CONFIG_NUMA
 static void __init set_highmem_pages_init(int bad_ppro)
 {
-       int pfn;
+       add_highpages_with_active_regions(0, highstart_pfn, highend_pfn,
+                                               bad_ppro);
 
-       for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
-               /*
-                * Holes under sparsemem might not have no mem_map[]:
-                */
-               if (pfn_valid(pfn))
-                       add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
-       }
        totalram_pages += totalhigh_pages;
 }
 #endif /* !CONFIG_NUMA */
index 6b0ce745a60ce428add1523e9e72da0b0b6e8d36..55d3105969079632ca454046780a203d14dc8a99 100644 (file)
@@ -86,7 +86,6 @@ extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
 extern void reserve_early(u64 start, u64 end, char *name);
 extern void free_early(u64 start, u64 end);
 extern void early_res_to_bootmem(u64 start, u64 end);
-extern int page_is_reserved_early(unsigned long pagenr);
 extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
 
 extern unsigned long e820_end_of_ram(void);
index e153f3b447747d83db0bc54c4f0830266686b85d..85c4fea41ff690fbda48df006130ede5a1b06374 100644 (file)
@@ -74,6 +74,9 @@ struct page *kmap_atomic_to_page(void *ptr);
 
 #define flush_cache_kmaps()    do { } while (0)
 
+extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
+                                       unsigned long end_pfn, int bad_ppro);
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_HIGHMEM_H */
index 034a3156d2f05e749185a45e7b3715fb64c0fe9a..e4de460907c14fc6129daf6dbfaa1d2d91b6770c 100644 (file)
@@ -1011,6 +1011,8 @@ extern unsigned long find_min_pfn_with_active_regions(void);
 extern unsigned long find_max_pfn_with_active_regions(void);
 extern void free_bootmem_with_active_regions(int nid,
                                                unsigned long max_low_pfn);
+typedef void (*work_fn_t)(unsigned long, unsigned long, void *);
+extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
 extern void sparse_memory_present_with_active_regions(int nid);
 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
 extern int early_pfn_to_nid(unsigned long pfn);
index d80e1868e570e5e204812c91025f0542c5f91b3d..41c6e3aa059f885ee2f36b59314a4e750e620b81 100644 (file)
@@ -2929,6 +2929,14 @@ void __init free_bootmem_with_active_regions(int nid,
        }
 }
 
+void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
+{
+       int i;
+
+       for_each_active_range_index_in_nid(i, nid)
+               work_fn(early_node_map[i].start_pfn, early_node_map[i].end_pfn,
+                       data);
+}
 /**
  * sparse_memory_present_with_active_regions - Call memory_present for each active range
  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.