]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: add _PAGE_IOMAP pte flag for IO mappings
authorJeremy Fitzhardinge <jeremy@goop.org>
Sun, 7 Sep 2008 22:21:13 +0000 (15:21 -0700)
committerIngo Molnar <mingo@elte.hu>
Mon, 13 Oct 2008 08:20:56 +0000 (10:20 +0200)
Use one of the software-defined PTE bits to indicate that a mapping is
intended for an IO address.  On native hardware this is irrelevent,
since a physical address is a physical address.  But in a virtual
environment, physical addresses are also virtualized, so there needs
to be some way to distinguish between pseudo-physical addresses and
actual hardware addresses; _PAGE_IOMAP indicates this intent.

By default, __supported_pte_mask masks out _PAGE_IOMAP, so it doesn't
even appear in the final pagetable.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
include/asm-x86/pgtable.h

index bbe044dbe01403b0bdf67dbf4fe7e46e4e00a07e..8396868e82c5637772f8d9fe81129731f87d2e24 100644 (file)
@@ -558,7 +558,7 @@ void zap_low_mappings(void)
 
 int nx_enabled;
 
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
+pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 #ifdef CONFIG_X86_PAE
index 3e10054c57319c25626bd1f43bf830c11453b255..dec5c775e92b14208b3010c0744950c5b912ac0c 100644 (file)
@@ -89,7 +89,7 @@ early_param("gbpages", parse_direct_gbpages_on);
 
 int after_bootmem;
 
-unsigned long __supported_pte_mask __read_mostly = ~0UL;
+pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 static int do_not_nx __cpuinitdata;
index 8cbeda15cd29727bee40bc4f15de3a8fd3b573b2..43c3b6896cd66ef8538daae143fa847d84e4607a 100644 (file)
@@ -242,16 +242,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        switch (prot_val) {
        case _PAGE_CACHE_UC:
        default:
-               prot = PAGE_KERNEL_NOCACHE;
+               prot = PAGE_KERNEL_IO_NOCACHE;
                break;
        case _PAGE_CACHE_UC_MINUS:
-               prot = PAGE_KERNEL_UC_MINUS;
+               prot = PAGE_KERNEL_IO_UC_MINUS;
                break;
        case _PAGE_CACHE_WC:
-               prot = PAGE_KERNEL_WC;
+               prot = PAGE_KERNEL_IO_WC;
                break;
        case _PAGE_CACHE_WB:
-               prot = PAGE_KERNEL;
+               prot = PAGE_KERNEL_IO;
                break;
        }
 
index ed932453ef26a3538ce3fd2c7670dfc78c18d9b9..81805403b64a441875ac17dc8cf8a7c5b58921ae 100644 (file)
@@ -15,7 +15,7 @@
 #define _PAGE_BIT_PAT          7       /* on 4KB pages */
 #define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
 #define _PAGE_BIT_UNUSED1      9       /* available for programmer */
-#define _PAGE_BIT_UNUSED2      10
+#define _PAGE_BIT_IOMAP                10      /* flag used to indicate IO mapping */
 #define _PAGE_BIT_UNUSED3      11
 #define _PAGE_BIT_PAT_LARGE    12      /* On 2MB or 1GB pages */
 #define _PAGE_BIT_SPECIAL      _PAGE_BIT_UNUSED1
@@ -32,7 +32,7 @@
 #define _PAGE_PSE      (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
 #define _PAGE_GLOBAL   (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
 #define _PAGE_UNUSED1  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
-#define _PAGE_UNUSED2  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
+#define _PAGE_IOMAP    (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
 #define _PAGE_UNUSED3  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
 #define _PAGE_PAT      (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
 #define __PAGE_KERNEL_LARGE_NOCACHE    (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
 
+#define __PAGE_KERNEL_IO               (__PAGE_KERNEL | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_NOCACHE       (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_UC_MINUS      (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_WC            (__PAGE_KERNEL_WC | _PAGE_IOMAP)
+
 #define PAGE_KERNEL                    __pgprot(__PAGE_KERNEL)
 #define PAGE_KERNEL_RO                 __pgprot(__PAGE_KERNEL_RO)
 #define PAGE_KERNEL_EXEC               __pgprot(__PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_VSYSCALL           __pgprot(__PAGE_KERNEL_VSYSCALL)
 #define PAGE_KERNEL_VSYSCALL_NOCACHE   __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
 
+#define PAGE_KERNEL_IO                 __pgprot(__PAGE_KERNEL_IO)
+#define PAGE_KERNEL_IO_NOCACHE         __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+#define PAGE_KERNEL_IO_UC_MINUS                __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
+#define PAGE_KERNEL_IO_WC              __pgprot(__PAGE_KERNEL_IO_WC)
+
 /*         xwr */
 #define __P000 PAGE_NONE
 #define __P001 PAGE_READONLY