]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[ARM] mm 8: define mem_types table L1 bit 4 to be for ARMv6
authorRussell King <rmk@dyn-67.arm.linux.org.uk>
Sat, 5 May 2007 19:03:35 +0000 (20:03 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Sat, 5 May 2007 19:03:35 +0000 (20:03 +0100)
Change the memory types table to define the L1 descriptor bit 4 to
be in terms of the ARMv6 definition - execute never.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/mm/mmu.c

index 360405515bbdd0197809eb77e9132acc84d30a03..44f385a3eb3fbedf6141f5000e53d6c3bc972bbb 100644 (file)
@@ -181,16 +181,16 @@ static struct mem_type mem_types[] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_WRITE,
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_UNCACHED |
                                PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_IO,
        },
        [MT_CACHECLEAN] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MINICLEAN] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_LOW_VECTORS] = {
@@ -206,25 +206,25 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_USER,
        },
        [MT_MEMORY] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_ROM] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
+               .prot_sect = PMD_TYPE_SECT,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_WRITE,
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_UNCACHED |
                                PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
                                PMD_SECT_TEX(1),
                .domain    = DOMAIN_IO,
        },
        [MT_NONSHARED_DEVICE] = {
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_NONSHARED_DEV |
                                PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_IO,
        }
@@ -260,20 +260,23 @@ static void __init build_mem_type_table(void)
        }
 
        /*
-        * Xscale must not have PMD bit 4 set for section mappings.
+        * ARMv5 and lower, bit 4 must be set for page tables.
+        * (was: cache "update-able on write" bit on ARM610)
+        * However, Xscale cores require this bit to be cleared.
         */
-       if (cpu_is_xscale())
-               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+       if (cpu_is_xscale()) {
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
                        mem_types[i].prot_sect &= ~PMD_BIT4;
-
-       /*
-        * ARMv5 and lower, excluding Xscale, bit 4 must be set for
-        * page tables.
-        */
-       if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
-               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+                       mem_types[i].prot_l1 &= ~PMD_BIT4;
+               }
+       } else if (cpu_arch < CPU_ARCH_ARMv6) {
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
                        if (mem_types[i].prot_l1)
                                mem_types[i].prot_l1 |= PMD_BIT4;
+                       if (mem_types[i].prot_sect)
+                               mem_types[i].prot_sect |= PMD_BIT4;
+               }
+       }
 
        cp = &cache_policies[cachepolicy];
        kern_pgprot = user_pgprot = cp->pte;
@@ -293,13 +296,6 @@ static void __init build_mem_type_table(void)
         * ARMv6 and above have extended page tables.
         */
        if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
-               /*
-                * bit 4 becomes XN which we must clear for the
-                * kernel memory mapping.
-                */
-               mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
-               mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
-
                /*
                 * Mark cache clean areas and XIP ROM read only
                 * from SVC mode and no access from userspace.