]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
powerpc: Add a local_flush_tlb_page to handle kmap_atomic invalidates
authorKumar Gala <galak@kernel.crashing.org>
Wed, 19 Nov 2008 05:53:24 +0000 (05:53 +0000)
committerPaul Mackerras <paulus@samba.org>
Wed, 3 Dec 2008 09:46:35 +0000 (20:46 +1100)
The tlb invalidates in kmap_atomic/kunmap_atomic can be called from
IRQ context, however they are only local invalidates (on the processor
that the kmap was called on).  In the future we want to use IPIs to
do tlb invalidates this causes issue since flush_tlb_page() is considered
a broadcast invalidate.

Add local_flush_tlb_page() as a non-broadcast invalidate and use it in
kmap_atomic() since we don't have enough information in the
flush_tlb_page() call to determine its local.

Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/include/asm/highmem.h
arch/powerpc/include/asm/tlbflush.h

index 91c589520c0aa65256f954c0fce003e37a527b5d..7dc52eca8b67761aa67b6714e154d08191a32ea0 100644 (file)
@@ -85,7 +85,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
        BUG_ON(!pte_none(*(kmap_pte-idx)));
 #endif
        __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
-       flush_tlb_page(NULL, vaddr);
+       local_flush_tlb_page(vaddr);
 
        return (void*) vaddr;
 }
@@ -113,7 +113,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
         * this pte without first remap it
         */
        pte_clear(&init_mm, vaddr, kmap_pte-idx);
-       flush_tlb_page(NULL, vaddr);
+       local_flush_tlb_page(vaddr);
 #endif
        pagefault_enable();
 }
index a2c6bfd85fb7a38ab6d002373c24afbf8648399a..93716a9f4e162783ba5700c37f87a641da07fbfd 100644 (file)
@@ -6,6 +6,7 @@
  *
  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
  *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - local_flush_tlb_page(vmaddr) flushes one page on the local processor
  *  - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
  *  - flush_tlb_range(vma, start, end) flushes a range of pages
  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
@@ -44,6 +45,11 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
        _tlbil_pid(mm->context.id);
 }
 
+static inline void local_flush_tlb_page(unsigned long vmaddr)
+{
+       _tlbil_va(vmaddr, 0);
+}
+
 static inline void flush_tlb_page(struct vm_area_struct *vma,
                                  unsigned long vmaddr)
 {
@@ -81,6 +87,10 @@ extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void local_flush_tlb_page(unsigned long vmaddr)
+{
+       flush_tlb_page(NULL, vmaddr);
+}
 
 #else
 /*
@@ -138,6 +148,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 {
 }
 
+static inline void local_flush_tlb_page(unsigned long vmaddr)
+{
+}
+
 static inline void flush_tlb_page(struct vm_area_struct *vma,
                                  unsigned long vmaddr)
 {