#ifndef _ASMARM_CACHEFLUSH_H
#define _ASMARM_CACHEFLUSH_H
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <asm/mman.h>
#include <asm/glue.h>
+#include <asm/shmparam.h>
+
+#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Cache Model
# endif
#endif
+#if defined(CONFIG_CPU_XSC3)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE xsc3
+# endif
+#endif
+
#if defined(CONFIG_CPU_V6)
//# ifdef _CACHE
# define MULTI_CACHE 1
* space" model to handle this.
*/
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- flush_cache_page(vma, vaddr); \
- memcpy(dst, src, len); \
- flush_dcache_page(page); \
+ do { \
+ memcpy(dst, src, len); \
+ flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- flush_cache_page(vma, vaddr); \
- memcpy(dst, src, len); \
+ do { \
+ memcpy(dst, src, len); \
} while (0)
/*
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
-
+#ifndef CONFIG_CPU_CACHE_VIPT
static inline void flush_cache_mm(struct mm_struct *mm)
{
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
}
static inline void
-flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
+flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
unsigned long addr = user_addr & PAGE_MASK;
}
}
+static inline void
+flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *kaddr,
+ unsigned long len, int write)
+{
+ if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ unsigned long addr = (unsigned long)kaddr;
+ __cpuc_coherent_kern_range(addr, addr + len);
+ }
+}
+#else
+extern void flush_cache_mm(struct mm_struct *mm);
+extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
+extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *kaddr,
+ unsigned long len, int write);
+#endif
+
/*
* flush_cache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
extern void flush_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) \
- spin_lock_irq(&(mapping)->tree_lock)
+ write_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
- spin_unlock_irq(&(mapping)->tree_lock)
+ write_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)