X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-sh%2Fcpu-sh3%2Fcacheflush.h;h=f70d8ef76a15f6b3d980bb4e01eb36516b2bae86;hb=refs%2Fheads%2Fvserver;hp=f51aed00c68f762132c14e9173f4579c6fac6171;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h index f51aed00c..f70d8ef76 100644 --- a/include/asm-sh/cpu-sh3/cacheflush.h +++ b/include/asm-sh/cpu-sh3/cacheflush.h @@ -10,11 +10,12 @@ #ifndef __ASM_CPU_SH3_CACHEFLUSH_H #define __ASM_CPU_SH3_CACHEFLUSH_H -/* +/* * Cache flushing: * * - flush_cache_all() flushes entire cache * - flush_cache_mm(mm) flushes the specified mm context's cache lines + * - flush_cache_dup mm(mm) handles cache flushing when forking * - flush_cache_page(mm, vmaddr, pfn) flushes a single page * - flush_cache_range(vma, start, end) flushes a range of pages * @@ -35,53 +36,35 @@ /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ #define CACHE_ALIAS 0x00001000 -struct page; -struct mm_struct; -struct vm_area_struct; - -extern void flush_cache_all(void); -extern void flush_cache_mm(struct mm_struct *mm); -extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); -extern void flush_dcache_page(struct page *pg); -extern void flush_icache_range(unsigned long start, unsigned long end); -extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); - -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) - -/* SH3 has unified cache so no special action needed here */ -#define flush_cache_sigtramp(vaddr) do { } while (0) -#define flush_page_to_ram(page) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) - -#define p3_cache_init() do { } while (0) - #define PG_mapped PG_arch_1 -/* We provide our own get_unmapped_area to avoid cache alias issue */ -#define HAVE_ARCH_UNMAPPED_AREA - +void flush_cache_all(void); +void flush_cache_mm(struct mm_struct *mm); +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +void flush_dcache_page(struct page *pg); +void flush_icache_range(unsigned long start, unsigned long end); +void flush_icache_page(struct vm_area_struct *vma, struct page *page); #else - #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_range(start, end) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) -#define flush_cache_sigtramp(vaddr) do { } while (0) +#endif -#define p3_cache_init() do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define HAVE_ARCH_UNMAPPED_AREA +/* SH3 has unified cache so no special action needed here */ +#define flush_cache_sigtramp(vaddr) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) -#endif +#define p3_cache_init() do { } while (0) #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ -