]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sh: pmd rework.
authorStuart Menefy <stuart.menefy@st.com>
Tue, 21 Nov 2006 06:38:05 +0000 (15:38 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Wed, 6 Dec 2006 01:45:38 +0000 (10:45 +0900)
Remove extra bits from the pmd structure and store a kernel logical
address rather than a physical address. This allows it to be directly
dereferenced. Another piece of wierdness inherited from x86.

Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/mm/fault.c
arch/sh/mm/init.c
include/asm-sh/pgalloc.h
include/asm-sh/pgtable.h

index 43bed2cb00e38c73c0c10a8e78e405477f7c6faa..128907ef7fcd69ca4cd66e5784f91c1aca21a799 100644 (file)
@@ -46,6 +46,45 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        mm = tsk->mm;
        si_code = SEGV_MAPERR;
 
+       if (unlikely(address >= TASK_SIZE)) {
+               /*
+                * Synchronize this task's top level page-table
+                * with the 'reference' page table.
+                *
+                * Do _not_ use "tsk" here. We might be inside
+                * an interrupt in the middle of a task switch..
+                */
+               int offset = pgd_index(address);
+               pgd_t *pgd, *pgd_k;
+               pud_t *pud, *pud_k;
+               pmd_t *pmd, *pmd_k;
+
+               pgd = get_TTB() + offset;
+               pgd_k = swapper_pg_dir + offset;
+
+               /* This will never happen with the folded page table. */
+               if (!pgd_present(*pgd)) {
+                       if (!pgd_present(*pgd_k))
+                               goto bad_area_nosemaphore;
+                       set_pgd(pgd, *pgd_k);
+                       return;
+               }
+
+               pud = pud_offset(pgd, address);
+               pud_k = pud_offset(pgd_k, address);
+               if (pud_present(*pud) || !pud_present(*pud_k))
+                       goto bad_area_nosemaphore;
+               set_pud(pud, *pud_k);
+
+               pmd = pmd_offset(pud, address);
+               pmd_k = pmd_offset(pud_k, address);
+               if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+                       goto bad_area_nosemaphore;
+               set_pmd(pmd, *pmd_k);
+
+               return;
+       }
+
        /*
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
@@ -109,6 +148,7 @@ survive:
 bad_area:
        up_read(&mm->mmap_sem);
 
+bad_area_nosemaphore:
        if (user_mode(regs)) {
                info.si_signo = SIGSEGV;
                info.si_errno = 0;
index 8c8d39118387ec451c1000e851af8cf5d9a1ad7b..462bfeac6d9ced7b79ee161e944a7f0a016f47b5 100644 (file)
@@ -84,30 +84,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
        pmd_t *pmd;
        pte_t *pte;
 
-       pgd = swapper_pg_dir + pgd_index(addr);
+       pgd = pgd_offset_k(addr);
        if (pgd_none(*pgd)) {
                pgd_ERROR(*pgd);
                return;
        }
 
-       pud = pud_offset(pgd, addr);
-       if (pud_none(*pud)) {
-               pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
-               set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
-               if (pmd != pmd_offset(pud, 0)) {
-                       pud_ERROR(*pud);
-                       return;
-               }
+       pud = pud_alloc(NULL, pgd, addr);
+       if (unlikely(!pud)) {
+               pud_ERROR(*pud);
+               return;
        }
 
-       pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd)) {
-               pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
-               set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
-               if (pte != pte_offset_kernel(pmd, 0)) {
-                       pmd_ERROR(*pmd);
-                       return;
-               }
+       pmd = pmd_alloc(NULL, pud, addr);
+       if (unlikely(!pmd)) {
+               pmd_ERROR(*pmd);
+               return;
        }
 
        pte = pte_offset_kernel(pmd, addr);
index e841465ab4d24214955704852dde0b99a3da35e8..888e4529e6fe3c4bb2108f134b8b7674435836c3 100644 (file)
@@ -1,13 +1,16 @@
 #ifndef __ASM_SH_PGALLOC_H
 #define __ASM_SH_PGALLOC_H
 
-#define pmd_populate_kernel(mm, pmd, pte) \
-               set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+                                      pte_t *pte)
+{
+       set_pmd(pmd, __pmd((unsigned long)pte));
+}
 
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
                                struct page *pte)
 {
-       set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
+       set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
 }
 
 /*
@@ -15,7 +18,16 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  */
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+
+       if (pgd) {
+               memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+               memcpy(pgd + USER_PTRS_PER_PGD,
+                      swapper_pg_dir + USER_PTRS_PER_PGD,
+                      (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+       }
+
+       return pgd;
 }
 
 static inline void pgd_free(pgd_t *pgd)
index 22c3d0b3e11aad1b0e10eacf7924b0a291075bdb..b1f21e765640202e5e3a59617b0d7886145752db 100644 (file)
@@ -203,26 +203,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #ifndef __ASSEMBLY__
 
 #if defined(CONFIG_X2TLB) /* SH-X2 TLB */
-#define _PAGE_TABLE \
-       (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \
-        _PAGE_EXT(_PAGE_EXT_USER_READ | _PAGE_EXT_USER_WRITE))
-
-#define _KERNPG_TABLE \
-       (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \
-        _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_KERN_WRITE))
-
 #define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 
 #define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_USER_READ | \
-                                          _PAGE_EXT_USER_WRITE))
+                                          _PAGE_EXT_USER_WRITE))
 
 #define PAGE_EXECREAD  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_USER_EXEC | \
-                                          _PAGE_EXT_USER_READ))
+                                          _PAGE_EXT_USER_READ))
 
 #define PAGE_COPY      PAGE_EXECREAD
 
@@ -237,14 +229,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define PAGE_RWX       __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_USER_WRITE | \
-                                          _PAGE_EXT_USER_READ  | \
+                                          _PAGE_EXT_USER_READ  | \
                                           _PAGE_EXT_USER_EXEC))
 
 #define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
                                 _PAGE_DIRTY | _PAGE_ACCESSED | \
                                 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_WRITE | \
                                           _PAGE_EXT_KERN_EXEC))
 
 #define PAGE_KERNEL_NOCACHE \
@@ -252,30 +244,25 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
                                 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
                                 _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_WRITE | \
                                           _PAGE_EXT_KERN_EXEC))
 
 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
                                 _PAGE_DIRTY | _PAGE_ACCESSED | \
                                 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-                                          _PAGE_EXT_KERN_EXEC))
+                                          _PAGE_EXT_KERN_EXEC))
 
 #define PAGE_KERNEL_PCC(slot, type) \
                        __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_WRITE | \
                                           _PAGE_EXT_KERN_EXEC) \
                                 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
                                 (type))
 
 #elif defined(CONFIG_MMU) /* SH-X TLB */
-#define _PAGE_TABLE \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-
 #define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 
@@ -390,9 +377,9 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
 #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 
 #define pmd_none(x)    (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_present(x) (pmd_val(x))
 #define pmd_clear(xp)  do { set_pmd(xp, __pmd(0)); } while (0)
-#define        pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define        pmd_bad(x)      (pmd_val(x) & ~PAGE_MASK)
 
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 #define pte_page(x)    phys_to_page(pte_val(x)&PTE_PHYS_MASK)
@@ -477,11 +464,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
        return pte;
 }
 
-#define pmd_page_vaddr(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-#define pmd_page(pmd) \
-       (phys_to_page(pmd_val(pmd)))
+#define pmd_page_vaddr(pmd)    pmd_val(pmd)
+#define pmd_page(pmd)          (virt_to_page(pmd_val(pmd)))
 
 /* to find an entry in a page-table-directory. */
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))