/* cache code */
#define FLUSH_I_AND_D (0x00000808)
-#define FLUSH_I (0x00000008)
+#define FLUSH_I (0x00000008)
/* This is needed whenever the virtual mapping of the current
process changes. */
unsigned long start,
unsigned long end)
{
- if (vma->vm_mm == current->mm)
+ if (vma->vm_mm == current->mm)
__flush_cache_030();
}
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
- if (vma->vm_mm == current->mm)
+ if (vma->vm_mm == current->mm)
__flush_cache_030();
}
}
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \