vserver 2.0 rc7
[linux-2.6.git] / include / asm-arm / cacheflush.h
index 84d8752..09ffeed 100644 (file)
@@ -157,6 +157,7 @@ struct cpu_cache_fns {
        void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
 
        void (*coherent_kern_range)(unsigned long, unsigned long);
+       void (*coherent_user_range)(unsigned long, unsigned long);
        void (*flush_kern_dcache_page)(void *);
 
        void (*dma_inv_range)(unsigned long, unsigned long);
@@ -175,6 +176,7 @@ extern struct cpu_cache_fns cpu_cache;
 #define __cpuc_flush_user_all          cpu_cache.flush_user_all
 #define __cpuc_flush_user_range                cpu_cache.flush_user_range
 #define __cpuc_coherent_kern_range     cpu_cache.coherent_kern_range
+#define __cpuc_coherent_user_range     cpu_cache.coherent_user_range
 #define __cpuc_flush_dcache_page       cpu_cache.flush_kern_dcache_page
 
 /*
@@ -193,12 +195,14 @@ extern struct cpu_cache_fns cpu_cache;
 #define __cpuc_flush_user_all          __glue(_CACHE,_flush_user_cache_all)
 #define __cpuc_flush_user_range                __glue(_CACHE,_flush_user_cache_range)
 #define __cpuc_coherent_kern_range     __glue(_CACHE,_coherent_kern_range)
+#define __cpuc_coherent_user_range     __glue(_CACHE,_coherent_user_range)
 #define __cpuc_flush_dcache_page       __glue(_CACHE,_flush_kern_dcache_page)
 
 extern void __cpuc_flush_kern_all(void);
 extern void __cpuc_flush_user_all(void);
 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
+extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
 extern void __cpuc_flush_dcache_page(void *);
 
 /*
@@ -233,11 +237,17 @@ extern void dmac_flush_range(unsigned long, unsigned long);
  * space" model to handle this.
  */
 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
-     flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
+       do {                                                    \
+               flush_cache_page(vma, vaddr, page_to_pfn(page));\
+               memcpy(dst, src, len);                          \
+               flush_dcache_page(page);                        \
+       } while (0)
+
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
+       do {                                                    \
+               flush_cache_page(vma, vaddr, page_to_pfn(page));\
+               memcpy(dst, src, len);                          \
+       } while (0)
 
 /*
  * Convert calls to our calling convention.
@@ -246,27 +256,35 @@ do { memcpy(dst, src, len); \
 
 static inline void flush_cache_mm(struct mm_struct *mm)
 {
-       if (current->active_mm == mm)
+       if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
                __cpuc_flush_user_all();
 }
 
 static inline void
 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
-       if (current->active_mm == vma->vm_mm)
+       if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
                __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
                                        vma->vm_flags);
 }
 
 static inline void
-flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
+flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 {
-       if (current->active_mm == vma->vm_mm) {
+       if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
                unsigned long addr = user_addr & PAGE_MASK;
                __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
        }
 }
 
+/*
+ * flush_cache_user_range is used when we want to ensure that the
+ * Harvard caches are synchronised for the user space address range.
+ * This is used for the ARM private sys_cacheflush system call.
+ */
+#define flush_cache_user_range(vma,start,end) \
+       __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+
 /*
  * Perform necessary cache operations to ensure that data previously
  * stored within this range of addresses can be executed by the CPU.
@@ -294,9 +312,9 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
 extern void flush_dcache_page(struct page *);
 
 #define flush_dcache_mmap_lock(mapping) \
-       spin_lock_irq(&(mapping)->tree_lock)
+       write_lock_irq(&(mapping)->tree_lock)
 #define flush_dcache_mmap_unlock(mapping) \
-       spin_unlock_irq(&(mapping)->tree_lock)
+       write_unlock_irq(&(mapping)->tree_lock)
 
 #define flush_icache_user_range(vma,page,addr,len) \
        flush_dcache_page(page)
@@ -307,4 +325,63 @@ extern void flush_dcache_page(struct page *);
  */
 #define flush_icache_page(vma,page)    do { } while (0)
 
+#define __cacheid_present(val)         (val != read_cpuid(CPUID_ID))
+#define __cacheid_vivt(val)            ((val & (15 << 25)) != (14 << 25))
+#define __cacheid_vipt(val)            ((val & (15 << 25)) == (14 << 25))
+#define __cacheid_vipt_nonaliasing(val)        ((val & (15 << 25 | 1 << 23)) == (14 << 25))
+#define __cacheid_vipt_aliasing(val)   ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
+
+#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
+
+#define cache_is_vivt()                        1
+#define cache_is_vipt()                        0
+#define cache_is_vipt_nonaliasing()    0
+#define cache_is_vipt_aliasing()       0
+
+#elif defined(CONFIG_CPU_CACHE_VIPT)
+
+#define cache_is_vivt()                        0
+#define cache_is_vipt()                        1
+#define cache_is_vipt_nonaliasing()                                    \
+       ({                                                              \
+               unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
+               __cacheid_vipt_nonaliasing(__val);                      \
+       })
+
+#define cache_is_vipt_aliasing()                                       \
+       ({                                                              \
+               unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
+               __cacheid_vipt_aliasing(__val);                         \
+       })
+
+#else
+
+#define cache_is_vivt()                                                        \
+       ({                                                              \
+               unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
+               (!__cacheid_present(__val)) || __cacheid_vivt(__val);   \
+       })
+               
+#define cache_is_vipt()                                                        \
+       ({                                                              \
+               unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
+               __cacheid_present(__val) && __cacheid_vipt(__val);      \
+       })
+
+#define cache_is_vipt_nonaliasing()                                    \
+       ({                                                              \
+               unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
+               __cacheid_present(__val) &&                             \
+                __cacheid_vipt_nonaliasing(__val);                     \
+       })
+
+#define cache_is_vipt_aliasing()                                       \
+       ({                                                              \
+               unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
+               __cacheid_present(__val) &&                             \
+                __cacheid_vipt_aliasing(__val);                        \
+       })
+
+#endif
+
 #endif