X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-m68k%2Fcacheflush.h;h=16bf375fdbe1c25a36992dfe94c066b074cbe736;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=e4773946f10dccfb857f5717481c144f9ced12fe;hpb=f7f1b0f1e2fbadeab12d24236000e778aa9b1ead;p=linux-2.6.git diff --git a/include/asm-m68k/cacheflush.h b/include/asm-m68k/cacheflush.h index e4773946f..16bf375fd 100644 --- a/include/asm-m68k/cacheflush.h +++ b/include/asm-m68k/cacheflush.h @@ -3,26 +3,30 @@ #include +/* cache code */ +#define FLUSH_I_AND_D (0x00000808) +#define FLUSH_I (0x00000008) + /* * Cache handling functions */ -#define flush_icache() \ -({ \ - if (CPU_IS_040_OR_060) \ - __asm__ __volatile__("nop\n\t" \ - ".chip 68040\n\t" \ - "cinva %%ic\n\t" \ - ".chip 68k" : ); \ - else { \ - unsigned long _tmp; \ - __asm__ __volatile__("movec %%cacr,%0\n\t" \ - "orw %1,%0\n\t" \ - "movec %0,%%cacr" \ - : "=&d" (_tmp) \ - : "id" (FLUSH_I)); \ - } \ -}) +static inline void flush_icache(void) +{ + if (CPU_IS_040_OR_060) + asm volatile ( "nop\n" + " .chip 68040\n" + " cpusha %bc\n" + " .chip 68k"); + else { + unsigned long tmp; + asm volatile ( "movec %%cacr,%0\n" + " or.w %1,%0\n" + " movec %0,%%cacr" + : "=&d" (tmp) + : "id" (FLUSH_I)); + } +} /* * invalidate the cache for the specified memory range. @@ -43,10 +47,6 @@ extern void cache_push(unsigned long paddr, int len); */ extern void cache_push_v(unsigned long vaddr, int len); -/* cache code */ -#define FLUSH_I_AND_D (0x00000808) -#define FLUSH_I (0x00000008) - /* This is needed whenever the virtual mapping of the current process changes. */ #define __flush_cache_all() \ @@ -89,6 +89,8 @@ static inline void flush_cache_mm(struct mm_struct *mm) __flush_cache_030(); } +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + /* flush_cache_range/flush_cache_page must be macros to avoid a dependency on linux/mm.h, which includes this file... */ static inline void flush_cache_range(struct vm_area_struct *vma, @@ -130,20 +132,25 @@ static inline void __flush_page_to_ram(void *vaddr) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ - memcpy(dst, src, len); \ - } while (0) - -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ - memcpy(dst, src, len); \ - } while (0) +extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len); extern void flush_icache_range(unsigned long address, unsigned long endaddr); +static inline void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, void *src, int len) +{ + flush_cache_page(vma, vaddr, page_to_pfn(page)); + memcpy(dst, src, len); + flush_icache_user_range(vma, page, vaddr, len); +} +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, void *src, int len) +{ + flush_cache_page(vma, vaddr, page_to_pfn(page)); + memcpy(dst, src, len); +} + #endif /* _M68K_CACHEFLUSH_H */