]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: PAT use reserve free memtype in ioremap and iounmap
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>
Wed, 19 Mar 2008 00:00:17 +0000 (17:00 -0700)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:41:19 +0000 (17:41 +0200)
Use reserve_memtype and free_memtype interfaces in ioremap/iounmap to avoid
aliasing.

If there is an existing alias for the region, inherit the memory type from
the alias. If there are conflicting aliases for the entire region, then fail
ioremap.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/ioremap.c

index 2ac09a5822cb230fd3a29f94f1ec684e6822864d..20c01f2b2e1130325ed29d8f38ca1c10933c4fba 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
+#include <asm/pat.h>
 
 #ifdef CONFIG_X86_64
 
@@ -118,6 +119,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
 {
        unsigned long pfn, offset, last_addr, vaddr;
        struct vm_struct *area;
+       unsigned long new_prot_val;
        pgprot_t prot;
 
        /* Don't allow wraparound or zero size */
@@ -151,6 +153,28 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
                WARN_ON_ONCE(is_ram);
        }
 
+       /*
+        * Mappings have to be page-aligned
+        */
+       offset = phys_addr & ~PAGE_MASK;
+       phys_addr &= PAGE_MASK;
+       size = PAGE_ALIGN(last_addr+1) - phys_addr;
+
+       if (reserve_memtype(phys_addr, phys_addr + size,
+                           prot_val, &new_prot_val)) {
+               /*
+                * Do not fallback to certain memory types with certain
+                * requested type:
+                * - request is uncached, return cannot be write-back
+                */
+               if ((prot_val == _PAGE_CACHE_UC &&
+                    new_prot_val == _PAGE_CACHE_WB)) {
+                       free_memtype(phys_addr, phys_addr + size);
+                       return NULL;
+               }
+               prot_val = new_prot_val;
+       }
+
        switch (prot_val) {
        case _PAGE_CACHE_UC:
        default:
@@ -161,13 +185,6 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
                break;
        }
 
-       /*
-        * Mappings have to be page-aligned
-        */
-       offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr+1) - phys_addr;
-
        /*
         * Ok, go for it..
         */
@@ -177,11 +194,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        area->phys_addr = phys_addr;
        vaddr = (unsigned long) area->addr;
        if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
+               free_memtype(phys_addr, phys_addr + size);
                free_vm_area(area);
                return NULL;
        }
 
        if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
+               free_memtype(phys_addr, phys_addr + size);
                vunmap(area->addr);
                return NULL;
        }
@@ -265,6 +284,8 @@ void iounmap(volatile void __iomem *addr)
                return;
        }
 
+       free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
+
        /* Finally remove it */
        o = remove_vm_area((void *)addr);
        BUG_ON(p != o || o == NULL);