X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Fcacheflush.h;h=746be56b1b705e9a88819868e2fe01f44527c8fc;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=048ed44dbce72fb8a87647dd1beca693cc21c2f8;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 048ed44db..746be56b1 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -14,8 +14,10 @@ #include #include -#include #include +#include + +#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) /* * Cache Model @@ -69,6 +71,14 @@ # endif #endif +#if defined(CONFIG_CPU_XSC3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xsc3 +# endif +#endif + #if defined(CONFIG_CPU_V6) //# ifdef _CACHE # define MULTI_CACHE 1 @@ -97,7 +107,7 @@ * Start addresses are inclusive and end addresses are exclusive; * start addresses should be rounded down, end addresses up. * - * See linux/Documentation/cachetlb.txt for more information. + * See Documentation/cachetlb.txt for more information. * Please note that the implementation of these, and the required * effects are cache-type (VIVT/VIPT/PIPT) specific. * @@ -157,6 +167,7 @@ struct cpu_cache_fns { void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*coherent_kern_range)(unsigned long, unsigned long); + void (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_page)(void *); void (*dma_inv_range)(unsigned long, unsigned long); @@ -175,6 +186,7 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_all cpu_cache.flush_user_all #define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range +#define __cpuc_coherent_user_range cpu_cache.coherent_user_range #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page /* @@ -193,12 +205,14 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); +extern void __cpuc_coherent_user_range(unsigned long, unsigned long); extern void __cpuc_flush_dcache_page(void *); /* @@ -233,39 +247,58 @@ extern void dmac_flush_range(unsigned long, unsigned long); * space" model to handle this. */ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ -} while (0) + do { \ + flush_cache_page(vma, vaddr, page_to_pfn(page));\ + memcpy(dst, src, len); \ + flush_dcache_page(page); \ + } while (0) + #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) + do { \ + flush_cache_page(vma, vaddr, page_to_pfn(page));\ + memcpy(dst, src, len); \ + } while (0) /* * Convert calls to our calling convention. */ #define flush_cache_all() __cpuc_flush_kern_all() - +#ifndef CONFIG_CPU_CACHE_VIPT static inline void flush_cache_mm(struct mm_struct *mm) { - if (current->active_mm == mm) + if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) __cpuc_flush_user_all(); } static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (current->active_mm == vma->vm_mm) + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } static inline void -flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) +flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { - if (current->active_mm == vma->vm_mm) { + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } } +#else +extern void flush_cache_mm(struct mm_struct *mm); +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); +#endif + +/* + * flush_cache_user_range is used when we want to ensure that the + * Harvard caches are synchronised for the user space address range. + * This is used for the ARM private sys_cacheflush system call. + */ +#define flush_cache_user_range(vma,start,end) \ + __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) /* * Perform necessary cache operations to ensure that data previously @@ -294,9 +327,9 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) extern void flush_dcache_page(struct page *); #define flush_dcache_mmap_lock(mapping) \ - spin_lock_irq(&(mapping)->tree_lock) + write_lock_irq(&(mapping)->tree_lock) #define flush_dcache_mmap_unlock(mapping) \ - spin_unlock_irq(&(mapping)->tree_lock) + write_unlock_irq(&(mapping)->tree_lock) #define flush_icache_user_range(vma,page,addr,len) \ flush_dcache_page(page) @@ -307,4 +340,63 @@ extern void flush_dcache_page(struct page *); */ #define flush_icache_page(vma,page) do { } while (0) +#define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) +#define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25)) +#define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25)) +#define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) +#define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) + +#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) + +#define cache_is_vivt() 1 +#define cache_is_vipt() 0 +#define cache_is_vipt_nonaliasing() 0 +#define cache_is_vipt_aliasing() 0 + +#elif defined(CONFIG_CPU_CACHE_VIPT) + +#define cache_is_vivt() 0 +#define cache_is_vipt() 1 +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_vipt_aliasing(__val); \ + }) + +#else + +#define cache_is_vivt() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ + }) + +#define cache_is_vipt() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && __cacheid_vipt(__val); \ + }) + +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vipt_aliasing(__val); \ + }) + +#endif + #endif