From: Tony Lindgren Date: Mon, 15 Aug 2005 09:51:21 +0000 (-0700) Subject: ARM: OMAP: Update cache flushing to compile on VIVT X-Git-Tag: v2.6.13-omap1~25 X-Git-Url: http://pilppa.com/gitweb/?a=commitdiff_plain;h=e4051d961a82dc9397e8adfc1b250dca810f6b89;p=linux-2.6-omap-h63xx.git ARM: OMAP: Update cache flushing to compile on VIVT Update cache flushing to compile on VIVT --- diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index b82cd171a5a..82895072729 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -122,7 +122,7 @@ EXPORT_SYMBOL(flush_dcache_page); void flush_cache_mm(struct mm_struct *mm) { if (cache_is_vivt()) { - if (current->active_mm == mm) + if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) __cpuc_flush_user_all(); return; } @@ -140,7 +140,7 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cache_is_vivt()) { - if (current->active_mm == vma->vm_mm) + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); return; @@ -159,7 +159,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cache_is_vivt()) { - if (current->active_mm == vma->vm_mm) { + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index e81baff4f54..ca51ed8750e 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -256,34 +256,9 @@ extern void dmac_flush_range(unsigned long, unsigned long); * Convert calls to our calling convention. */ #define flush_cache_all() __cpuc_flush_kern_all() -#ifndef CONFIG_CPU_CACHE_VIPT -static inline void flush_cache_mm(struct mm_struct *mm) -{ - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) - __cpuc_flush_user_all(); -} - -static inline void -flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) -{ - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) - __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), - vma->vm_flags); -} - -static inline void -flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) -{ - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { - unsigned long addr = user_addr & PAGE_MASK; - __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); - } -} -#else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); -#endif /* * flush_cache_user_range is used when we want to ensure that the