fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / arm / mm / flush.c
index b103e56..9df507d 100644 (file)
 #include <asm/system.h>
 #include <asm/tlbflush.h>
 
+#include "mm.h"
+
 #ifdef CONFIG_CPU_CACHE_VIPT
 
 #define ALIAS_FLUSH_START      0xffff4000
 
-#define TOP_PTE(x)     pte_offset_kernel(top_pmd, x)
-
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 {
        unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
        const int zero = 0;
 
-       set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
+       set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
        flush_tlb_kernel_page(to);
 
        asm(    "mcrr   p15, 0, %1, %0, c14\n"
@@ -87,6 +87,32 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
        if (cache_is_vipt_aliasing())
                flush_pfn_alias(pfn, user_addr);
 }
+
+void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+                        unsigned long uaddr, void *kaddr,
+                        unsigned long len, int write)
+{
+       if (cache_is_vivt()) {
+               if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+                       unsigned long addr = (unsigned long)kaddr;
+                       __cpuc_coherent_kern_range(addr, addr + len);
+               }
+               return;
+       }
+
+       if (cache_is_vipt_aliasing()) {
+               flush_pfn_alias(page_to_pfn(page), uaddr);
+               return;
+       }
+
+       /* VIPT non-aliasing cache */
+       if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
+           vma->vm_flags & VM_EXEC) {
+               unsigned long addr = (unsigned long)kaddr;
+               /* only flushing the kernel mapping on non-aliasing VIPT */
+               __cpuc_coherent_kern_range(addr, addr + len);
+       }
+}
 #else
 #define flush_pfn_alias(pfn,vaddr)     do { } while (0)
 #endif
@@ -176,3 +202,42 @@ void flush_dcache_page(struct page *page)
        }
 }
 EXPORT_SYMBOL(flush_dcache_page);
+
+/*
+ * Flush an anonymous page so that users of get_user_pages()
+ * can safely access the data.  The expected sequence is:
+ *
+ *  get_user_pages()
+ *    -> flush_anon_page
+ *  memcpy() to/from page
+ *  if written to page, flush_dcache_page()
+ */
+void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+       unsigned long pfn;
+
+       /* VIPT non-aliasing caches need do nothing */
+       if (cache_is_vipt_nonaliasing())
+               return;
+
+       /*
+        * Write back and invalidate userspace mapping.
+        */
+       pfn = page_to_pfn(page);
+       if (cache_is_vivt()) {
+               flush_cache_page(vma, vmaddr, pfn);
+       } else {
+               /*
+                * For aliasing VIPT, we can flush an alias of the
+                * userspace address only.
+                */
+               flush_pfn_alias(pfn, vmaddr);
+       }
+
+       /*
+        * Invalidate kernel mapping.  No data should be contained
+        * in this mapping of the page.  FIXME: this is overkill
+        * since we actually ask for a write-back and invalidate.
+        */
+       __cpuc_flush_dcache_page(page_address(page));
+}