X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Fcacheflush.h;h=5f531ea03059d003bab3e86643ed2580a36133cf;hb=refs%2Fheads%2Fvserver;hp=d38a1cadf0b7945f417b3520b9521116264cb55e;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index d38a1cadf..5f531ea03 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -10,12 +10,13 @@ #ifndef _ASMARM_CACHEFLUSH_H #define _ASMARM_CACHEFLUSH_H -#include #include #include -#include #include +#include + +#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) /* * Cache Model @@ -24,7 +25,7 @@ #undef _CACHE #undef MULTI_CACHE -#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) +#if defined(CONFIG_CPU_CACHE_V3) # ifdef _CACHE # define MULTI_CACHE 1 # else @@ -32,7 +33,7 @@ # endif #endif -#if defined(CONFIG_CPU_ARM720T) +#if defined(CONFIG_CPU_CACHE_V4) # ifdef _CACHE # define MULTI_CACHE 1 # else @@ -53,7 +54,23 @@ # endif #endif -#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) +#if defined(CONFIG_CPU_ARM940T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm940 +# endif +#endif + +#if defined(CONFIG_CPU_ARM946E) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm946 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4WB) # ifdef _CACHE # define MULTI_CACHE 1 # else @@ -69,6 +86,14 @@ # endif #endif +#if defined(CONFIG_CPU_XSC3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xsc3 +# endif +#endif + #if defined(CONFIG_CPU_V6) //# ifdef _CACHE # define MULTI_CACHE 1 @@ -237,23 +262,21 @@ extern void dmac_flush_range(unsigned long, unsigned long); * space" model to handle this. */ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr); \ - memcpy(dst, src, len); \ - flush_dcache_page(page); \ + do { \ + memcpy(dst, src, len); \ + flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr); \ - memcpy(dst, src, len); \ + do { \ + memcpy(dst, src, len); \ } while (0) /* * Convert calls to our calling convention. */ #define flush_cache_all() __cpuc_flush_kern_all() - +#ifndef CONFIG_CPU_CACHE_VIPT static inline void flush_cache_mm(struct mm_struct *mm) { if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) @@ -269,7 +292,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long } static inline void -flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) +flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { unsigned long addr = user_addr & PAGE_MASK; @@ -277,6 +300,27 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) } } +static inline void +flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write) +{ + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } +} +#else +extern void flush_cache_mm(struct mm_struct *mm); +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); +extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write); +#endif + +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + /* * flush_cache_user_range is used when we want to ensure that the * Harvard caches are synchronised for the user space address range. @@ -311,10 +355,22 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) */ extern void flush_dcache_page(struct page *); +extern void __flush_dcache_page(struct address_space *mapping, struct page *page); + +#define ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long vmaddr) +{ + extern void __flush_anon_page(struct vm_area_struct *vma, + struct page *, unsigned long); + if (PageAnon(page)) + __flush_anon_page(vma, page, vmaddr); +} + #define flush_dcache_mmap_lock(mapping) \ - spin_lock_irq(&(mapping)->tree_lock) + write_lock_irq(&(mapping)->tree_lock) #define flush_dcache_mmap_unlock(mapping) \ - spin_unlock_irq(&(mapping)->tree_lock) + write_unlock_irq(&(mapping)->tree_lock) #define flush_icache_user_range(vma,page,addr,len) \ flush_dcache_page(page)