X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Fcacheflush.h;h=d38a1cadf0b7945f417b3520b9521116264cb55e;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=0e053301c09f185eec4d503c7e42101a61af9dd8;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 0e053301c..d38a1cadf 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -97,7 +97,7 @@ * Start addresses are inclusive and end addresses are exclusive; * start addresses should be rounded down, end addresses up. * - * See linux/Documentation/cachetlb.txt for more information. + * See Documentation/cachetlb.txt for more information. * Please note that the implementation of these, and the required * effects are cache-type (VIVT/VIPT/PIPT) specific. * @@ -157,6 +157,7 @@ struct cpu_cache_fns { void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*coherent_kern_range)(unsigned long, unsigned long); + void (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_page)(void *); void (*dma_inv_range)(unsigned long, unsigned long); @@ -175,6 +176,7 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_all cpu_cache.flush_user_all #define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range +#define __cpuc_coherent_user_range cpu_cache.coherent_user_range #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page /* @@ -193,12 +195,14 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); +extern void __cpuc_coherent_user_range(unsigned long, unsigned long); extern void __cpuc_flush_dcache_page(void *); /* @@ -233,11 +237,17 @@ extern void dmac_flush_range(unsigned long, unsigned long); * space" model to handle this. */ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ -} while (0) + do { \ + flush_cache_page(vma, vaddr); \ + memcpy(dst, src, len); \ + flush_dcache_page(page); \ + } while (0) + #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) + do { \ + flush_cache_page(vma, vaddr); \ + memcpy(dst, src, len); \ + } while (0) /* * Convert calls to our calling convention. @@ -246,14 +256,14 @@ do { memcpy(dst, src, len); \ static inline void flush_cache_mm(struct mm_struct *mm) { - if (current->active_mm == mm) + if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) __cpuc_flush_user_all(); } static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (current->active_mm == vma->vm_mm) + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } @@ -261,12 +271,20 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) { - if (current->active_mm == vma->vm_mm) { + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } } +/* + * flush_cache_user_range is used when we want to ensure that the + * Harvard caches are synchronised for the user space address range. + * This is used for the ARM private sys_cacheflush system call. + */ +#define flush_cache_user_range(vma,start,end) \ + __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) + /* * Perform necessary cache operations to ensure that data previously * stored within this range of addresses can be executed by the CPU. @@ -291,17 +309,12 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) * about to change to user space. This is the same method as used on SPARC64. * See update_mmu_cache for the user space part. */ -extern void __flush_dcache_page(struct page *); +extern void flush_dcache_page(struct page *); -static inline void flush_dcache_page(struct page *page) -{ - struct address_space *mapping = page_mapping(page); - - if (mapping && !mapping_mapped(mapping)) - set_bit(PG_dcache_dirty, &page->flags); - else - __flush_dcache_page(page); -} +#define flush_dcache_mmap_lock(mapping) \ + spin_lock_irq(&(mapping)->tree_lock) +#define flush_dcache_mmap_unlock(mapping) \ + spin_unlock_irq(&(mapping)->tree_lock) #define flush_icache_user_range(vma,page,addr,len) \ flush_dcache_page(page) @@ -312,4 +325,63 @@ static inline void flush_dcache_page(struct page *page) */ #define flush_icache_page(vma,page) do { } while (0) +#define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) +#define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25)) +#define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25)) +#define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) +#define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) + +#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) + +#define cache_is_vivt() 1 +#define cache_is_vipt() 0 +#define cache_is_vipt_nonaliasing() 0 +#define cache_is_vipt_aliasing() 0 + +#elif defined(CONFIG_CPU_CACHE_VIPT) + +#define cache_is_vivt() 0 +#define cache_is_vipt() 1 +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_vipt_aliasing(__val); \ + }) + +#else + +#define cache_is_vivt() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ + }) + +#define cache_is_vipt() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && __cacheid_vipt(__val); \ + }) + +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vipt_aliasing(__val); \ + }) + +#endif + #endif