]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: fix 1:1 mapping init on 64-bit (memory hotplug case)
authorJan Beulich <jbeulich@novell.com>
Thu, 21 Aug 2008 13:27:22 +0000 (14:27 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 22 Aug 2008 05:51:53 +0000 (07:51 +0200)
While I don't have a hotplug capable system at hand, I think two issues need
fixing:

- pud_phys (in kernel_physical_ampping_init()) would remain uninitialized in
  the after_bootmem case

- the locking done just around phys_pmd_{init,update}() would leave out pgd
  updates, and it was needlessly covering code portions that do allocations
  (perhaps using a more friendly gfp value in alloc_low_page() would then be
  possible)

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/init_64.c

index a87ea0e4b3dca3e7149aa03321af508cc1216b0d..8f487705c3e630020d5e91e4ea90b33c4a899194 100644 (file)
@@ -336,9 +336,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                }
 
                if (pmd_val(*pmd)) {
-                       if (!pmd_large(*pmd))
+                       if (!pmd_large(*pmd)) {
+                               spin_lock(&init_mm.page_table_lock);
                                last_map_addr = phys_pte_update(pmd, address,
-                                                                end);
+                                                               end);
+                               spin_unlock(&init_mm.page_table_lock);
+                       }
                        /* Count entries we're using from level2_ident_pgt */
                        if (start == 0)
                                pages++;
@@ -347,8 +350,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 
                if (page_size_mask & (1<<PG_LEVEL_2M)) {
                        pages++;
+                       spin_lock(&init_mm.page_table_lock);
                        set_pte((pte_t *)pmd,
                                pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+                       spin_unlock(&init_mm.page_table_lock);
                        last_map_addr = (address & PMD_MASK) + PMD_SIZE;
                        continue;
                }
@@ -357,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                last_map_addr = phys_pte_init(pte, address, end);
                unmap_low_page(pte);
 
+               spin_lock(&init_mm.page_table_lock);
                pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
+               spin_unlock(&init_mm.page_table_lock);
        }
        update_page_count(PG_LEVEL_2M, pages);
        return last_map_addr;
@@ -370,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
        pmd_t *pmd = pmd_offset(pud, 0);
        unsigned long last_map_addr;
 
-       spin_lock(&init_mm.page_table_lock);
        last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
-       spin_unlock(&init_mm.page_table_lock);
        __flush_tlb_all();
        return last_map_addr;
 }
@@ -408,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 
                if (page_size_mask & (1<<PG_LEVEL_1G)) {
                        pages++;
+                       spin_lock(&init_mm.page_table_lock);
                        set_pte((pte_t *)pud,
                                pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+                       spin_unlock(&init_mm.page_table_lock);
                        last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
                        continue;
                }
 
                pmd = alloc_low_page(&pmd_phys);
-
-               spin_lock(&init_mm.page_table_lock);
                last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
                unmap_low_page(pmd);
+
+               spin_lock(&init_mm.page_table_lock);
                pud_populate(&init_mm, pud, __va(pmd_phys));
                spin_unlock(&init_mm.page_table_lock);
-
        }
        __flush_tlb_all();
        update_page_count(PG_LEVEL_1G, pages);
@@ -513,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
                        continue;
                }
 
-               if (after_bootmem)
-                       pud = pud_offset(pgd, start & PGDIR_MASK);
-               else
-                       pud = alloc_low_page(&pud_phys);
-
+               pud = alloc_low_page(&pud_phys);
                last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
                                                 page_size_mask);
                unmap_low_page(pud);
-               pgd_populate(&init_mm, pgd_offset_k(start),
-                            __va(pud_phys));
+
+               spin_lock(&init_mm.page_table_lock);
+               pgd_populate(&init_mm, pgd, __va(pud_phys));
+               spin_unlock(&init_mm.page_table_lock);
        }
 
        return last_map_addr;