From: Paul Mundt Date: Thu, 28 Dec 2006 01:31:48 +0000 (+0900) Subject: sh: Lazy dcache writeback optimizations. X-Git-Tag: v2.6.21-rc1~179^2~23 X-Git-Url: http://pilppa.com/gitweb/?a=commitdiff_plain;h=26b7a78c55fbc0e23a7dc19e89fd50f200efc002;p=linux-2.6-omap-h63xx.git sh: Lazy dcache writeback optimizations. This converts the lazy dcache handling to the model described in Documentation/cachetlb.txt and drops the ptep_get_and_clear() hacks used for the aliasing dcaches on SH-4 and SH7705 in 32kB mode. As a bonus, this slightly cuts down on the cache flushing frequency. With that and the PTEA handling out of the way, the update_mmu_cache() implementations can be consolidated, and we no longer have to worry about which configuration the cache is in for the SH7705 case. And finally, explicitly disable the lazy writeback on SMP (SH-4A). Signed-off-by: Paul Mundt --- diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 9031a22a2ce..b26e2bc5894 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -181,10 +181,6 @@ int __init detect_cpu_and_cache_system(void) cpu_data->dcache.ways = 1; #endif -#ifdef CONFIG_CPU_HAS_PTEA - cpu_data->flags |= CPU_HAS_PTEA; -#endif - /* * On anything that's not a direct-mapped cache, look to the CVR * for I/D-cache specifics. diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index fddf6680ec4..28b5102e1cd 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -20,7 +20,7 @@ config CPU_SH4 bool select CPU_HAS_INTEVT select CPU_HAS_SR_RB - select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 + select CPU_HAS_PTEA if (!CPU_SUBTYPE_ST40 && !CPU_SH4A) || CPU_SHX2 config CPU_SH4A bool diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index c6955157c98..72bb4877333 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -236,10 +236,20 @@ static inline void flush_cache_4096(unsigned long start, /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) + * + * This uses a lazy write-back on UP, which is explicitly + * disabled on SMP. */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) { +#ifndef CONFIG_SMP + struct address_space *mapping = page_mapping(page); + + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else +#endif + { unsigned long phys = PHYSADDR(page_address(page)); unsigned long addr = CACHE_OC_ADDRESS_ARRAY; int i, n; diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 045abdf078f..2808b580d98 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -3,11 +3,11 @@ * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2004 Alex Song + * Copyright (C) 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. - * */ #include #include @@ -51,7 +51,6 @@ static inline void cache_wback_all(void) if ((data & v) == v) ctrl_outl(data & ~v, addr); - } addrstart += cpu_data->dcache.way_incr; @@ -128,7 +127,11 @@ static void __flush_dcache_page(unsigned long phys) */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) + struct address_space *mapping = page_mapping(page); + + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else __flush_dcache_page(PHYSADDR(page_address(page))); } diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 3f98d2a4f93..cfc32355174 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -23,7 +23,6 @@ extern struct mutex p3map_mutex[]; */ void clear_user_page(void *to, unsigned long address, struct page *page) { - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { @@ -59,7 +58,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { @@ -84,23 +82,3 @@ void copy_user_page(void *to, void *from, unsigned long address, mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } - -/* - * For SH-4, we have our own implementation for ptep_get_and_clear - */ -inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - return pte; -} diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c index ff9ece986cb..b052d0fee82 100644 --- a/arch/sh/mm/pg-sh7705.c +++ b/arch/sh/mm/pg-sh7705.c @@ -7,9 +7,7 @@ * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. - * */ - #include #include #include @@ -76,7 +74,6 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) { struct page *page = virt_to_page(to); - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { clear_page(to); __flush_wback_region(to, PAGE_SIZE); @@ -95,12 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) * @from: P1 address * @address: U0 address to be mapped */ -void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) +void copy_user_page(void *to, void *from, unsigned long address, + struct page *pg) { struct page *page = virt_to_page(to); - - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { copy_page(to, from); __flush_wback_region(to, PAGE_SIZE); @@ -112,26 +108,3 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg __flush_wback_region(to, PAGE_SIZE); } } - -/* - * For SH7705, we have our own implementation for ptep_get_and_clear - * Copied from pg-sh4.c - */ -inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - - return pte; -} - diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index 73ec7f6084f..9347534aa89 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c @@ -2,15 +2,17 @@ * TLB flushing operations for SH with an MMU. * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2003 - 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include +#include #include #include +#include void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { @@ -132,3 +134,54 @@ void flush_tlb_all(void) ctrl_barrier(); local_irq_restore(flags); } + +void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + unsigned long flags; + unsigned long pteval; + unsigned long vpn; + struct page *page; + unsigned long pfn = pte_pfn(pte); + struct address_space *mapping; + + if (!pfn_valid(pfn)) + return; + + page = pfn_to_page(pfn); + mapping = page_mapping(page); + if (mapping) { + unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + + if (dirty) + __flush_wback_region((void *)P1SEGADDR(phys), + PAGE_SIZE); + } + + local_irq_save(flags); + + /* Set PTEH register */ + vpn = (address & MMU_VPN_MASK) | get_asid(); + ctrl_outl(vpn, MMU_PTEH); + + pteval = pte_val(pte); + +#ifdef CONFIG_CPU_HAS_PTEA + /* Set PTEA register */ + /* TODO: make this look less hacky */ + ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); +#endif + + /* Set PTEL register */ + pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ +#ifdef CONFIG_SH_WRITETHROUGH + pteval |= _PAGE_WT; +#endif + /* conveniently, we want all the software flags to be 0 anyway */ + ctrl_outl(pteval, MMU_PTEL); + + /* Load the TLB */ + asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); + local_irq_restore(flags); +} diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 46b09e26e08..16627069c53 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -8,69 +8,9 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - +#include #include -#include -#include -#include #include -#include - -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) - return; - -#if defined(CONFIG_SH7705_CACHE_32KB) - { - struct page *page = pte_page(pte); - unsigned long pfn = pte_pfn(pte); - - if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - - __flush_wback_region((void *)P1SEGADDR(phys), - PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } - } -#endif - - local_irq_save(flags); - - /* Set PTEH register */ - vpn = (address & MMU_VPN_MASK) | get_asid(); - ctrl_outl(vpn, MMU_PTEH); - - pteval = pte_val(pte); - - /* Set PTEL register */ - pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ - /* conveniently, we want all the software flags to be 0 anyway */ - ctrl_outl(pteval, MMU_PTEL); - - /* Load the TLB */ - asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); - local_irq_restore(flags); -} void __flush_tlb_page(unsigned long asid, unsigned long page) { @@ -94,4 +34,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) for (i = 0; i < ways; i++) ctrl_outl(data, addr + (i << 8)); } - diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 812b2d567de..758d8dec622 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -8,74 +8,9 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - +#include #include -#include -#include -#include #include -#include - -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - struct page *page; - unsigned long pfn; - - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) - return; - - pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (!test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } - } - - local_irq_save(flags); - - /* Set PTEH register */ - vpn = (address & MMU_VPN_MASK) | get_asid(); - ctrl_outl(vpn, MMU_PTEH); - - pteval = pte_val(pte); - - /* Set PTEA register */ - if (cpu_data->flags & CPU_HAS_PTEA) - /* TODO: make this look less hacky */ - ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); - - /* Set PTEL register */ - pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ -#ifdef CONFIG_SH_WRITETHROUGH - pteval |= _PAGE_WT; -#endif - /* conveniently, we want all the software flags to be 0 anyway */ - ctrl_outl(pteval, MMU_PTEL); - - /* Load the TLB */ - asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); - local_irq_restore(flags); -} void __flush_tlb_page(unsigned long asid, unsigned long page) { @@ -93,4 +28,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) ctrl_outl(data, addr); back_to_P1(); } - diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h index 07f62ec9ff0..22f12634975 100644 --- a/include/asm-sh/cacheflush.h +++ b/include/asm-sh/cacheflush.h @@ -30,5 +30,8 @@ extern void __flush_invalidate_region(void *start, int size); #define HAVE_ARCH_UNMAPPED_AREA +/* Page flag for lazy dcache write-back for the aliasing UP caches */ +#define PG_dcache_dirty PG_arch_1 + #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */ diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h index f70d8ef76a1..6fabbba228d 100644 --- a/include/asm-sh/cpu-sh3/cacheflush.h +++ b/include/asm-sh/cpu-sh3/cacheflush.h @@ -36,8 +36,6 @@ /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ #define CACHE_ALIAS 0x00001000 -#define PG_mapped PG_arch_1 - void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); #define flush_cache_dup_mm(mm) flush_cache_mm(mm) diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h index b01a10f3122..f563c3bccc4 100644 --- a/include/asm-sh/cpu-sh4/cacheflush.h +++ b/include/asm-sh/cpu-sh4/cacheflush.h @@ -38,16 +38,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, /* Initialization of P3 area for copy_user_page */ void p3_cache_init(void); -#define PG_mapped PG_arch_1 - -#ifdef CONFIG_MMU -extern int remap_area_pages(unsigned long addr, unsigned long phys_addr, - unsigned long size, unsigned long flags); -#else /* CONFIG_MMU */ -static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr, - unsigned long size, unsigned long flags) -{ - return 0; -} -#endif /* CONFIG_MMU */ #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index eba14184baf..3721a4412ce 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -583,11 +583,6 @@ struct mm_struct; extern unsigned int kobjsize(const void *objp); #endif /* !CONFIG_MMU */ -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -#endif - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void);