]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] i386/x86-64: Don't call change_page_attr with a spinlock held
authorAndi Kleen <ak@suse.de>
Tue, 13 Dec 2005 06:17:09 +0000 (22:17 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 13 Dec 2005 06:31:16 +0000 (22:31 -0800)
It's illegal because it can sleep.

Use a two step lookup scheme instead.  First look up the vm_struct, then
change the direct mapping, then finally unmap it.  That's ok because nobody
can change the particular virtual address range as long as the vm_struct is
still in the global list.

Also added some LinuxDoc documentation to iounmap.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/mm/ioremap.c
arch/x86_64/mm/ioremap.c

index 5d09de8d1c6b17e2048f485aa940f53589ec94e1..8498b5ac3955c54939bec5acca0754ba314238c0 100644 (file)
@@ -223,9 +223,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
+/**
+ * iounmap - Free a IO remapping
+ * @addr: virtual address from ioremap_*
+ *
+ * Caller must ensure there is only one unmapping for the same pointer.
+ */
 void iounmap(volatile void __iomem *addr)
 {
-       struct vm_struct *p;
+       struct vm_struct *p, *o;
 
        if ((void __force *)addr <= high_memory)
                return;
@@ -239,22 +245,37 @@ void iounmap(volatile void __iomem *addr)
                        addr < phys_to_virt(ISA_END_ADDRESS))
                return;
 
-       write_lock(&vmlist_lock);
-       p = __remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
-       if (!p) { 
-               printk(KERN_WARNING "iounmap: bad address %p\n", addr);
+       addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
+
+       /* Use the vm area unlocked, assuming the caller
+          ensures there isn't another iounmap for the same address
+          in parallel. Reuse of the virtual address is prevented by
+          leaving it in the global lists until we're done with it.
+          cpa takes care of the direct mappings. */
+       read_lock(&vmlist_lock);
+       for (p = vmlist; p; p = p->next) {
+               if (p->addr == addr)
+                       break;
+       }
+       read_unlock(&vmlist_lock);
+
+       if (!p) {
+               printk("iounmap: bad address %p\n", addr);
                dump_stack();
-               goto out_unlock;
+               return;
        }
 
+       /* Reset the direct mapping. Can block */
        if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
                change_page_attr(virt_to_page(__va(p->phys_addr)),
                                 p->size >> PAGE_SHIFT,
                                 PAGE_KERNEL);
                global_flush_tlb();
        } 
-out_unlock:
-       write_unlock(&vmlist_lock);
+
+       /* Finally remove it */
+       o = remove_vm_area((void *)addr);
+       BUG_ON(p != o || o == NULL);
        kfree(p); 
 }
 EXPORT_SYMBOL(iounmap);
index ecf7acb5db9b4b0a8f744e0665d089bb7a040d78..0d260e4492f7f96775dbbb641478a0a65ca838ba 100644 (file)
@@ -247,9 +247,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
        return __ioremap(phys_addr, size, _PAGE_PCD);
 }
 
+/**
+ * iounmap - Free a IO remapping
+ * @addr: virtual address from ioremap_*
+ *
+ * Caller must ensure there is only one unmapping for the same pointer.
+ */
 void iounmap(volatile void __iomem *addr)
 {
-       struct vm_struct *p;
+       struct vm_struct *p, *o;
 
        if (addr <= high_memory) 
                return; 
@@ -257,12 +263,31 @@ void iounmap(volatile void __iomem *addr)
                addr < phys_to_virt(ISA_END_ADDRESS))
                return;
 
-       write_lock(&vmlist_lock);
-       p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
-       if (!p)
+       addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
+       /* Use the vm area unlocked, assuming the caller
+          ensures there isn't another iounmap for the same address
+          in parallel. Reuse of the virtual address is prevented by
+          leaving it in the global lists until we're done with it.
+          cpa takes care of the direct mappings. */
+       read_lock(&vmlist_lock);
+       for (p = vmlist; p; p = p->next) {
+               if (p->addr == addr)
+                       break;
+       }
+       read_unlock(&vmlist_lock);
+
+       if (!p) {
                printk("iounmap: bad address %p\n", addr);
-       else if (p->flags >> 20)
+               dump_stack();
+               return;
+       }
+
+       /* Reset the direct mapping. Can block */
+       if (p->flags >> 20)
                ioremap_change_attr(p->phys_addr, p->size, 0);
-       write_unlock(&vmlist_lock);
+
+       /* Finally remove it */
+       o = remove_vm_area((void *)addr);
+       BUG_ON(p != o || o == NULL);
        kfree(p); 
 }