X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-m32r%2Fsystem.h;h=4ce0619f6989024855782484766eb0adfa88cc78;hb=refs%2Fheads%2Fvserver;hp=5828af7d412203f910188716c7374b9b717787f8;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 5828af7d4..4ce0619f6 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -6,11 +6,11 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 by Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto - * Copyright (C) 2004 Hirokazu Takata + * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto + * Copyright (C) 2004, 2006 Hirokazu Takata */ -#include +#include #ifdef __KERNEL__ @@ -21,59 +21,44 @@ * `next' and `prev' should be struct task_struct, but it isn't always defined */ -#ifndef CONFIG_SMP -#define prepare_to_switch() do { } while(0) -#endif /* not CONFIG_SMP */ - #define switch_to(prev, next, last) do { \ - register unsigned long arg0 __asm__ ("r0") = (unsigned long)prev; \ - register unsigned long arg1 __asm__ ("r1") = (unsigned long)next; \ - register unsigned long *oldsp __asm__ ("r2") = &(prev->thread.sp); \ - register unsigned long *newsp __asm__ ("r3") = &(next->thread.sp); \ - register unsigned long *oldlr __asm__ ("r4") = &(prev->thread.lr); \ - register unsigned long *newlr __asm__ ("r5") = &(next->thread.lr); \ - register struct task_struct *__last __asm__ ("r6"); \ __asm__ __volatile__ ( \ - "st r8, @-r15 \n\t" \ - "st r9, @-r15 \n\t" \ - "st r10, @-r15 \n\t" \ - "st r11, @-r15 \n\t" \ - "st r12, @-r15 \n\t" \ - "st r13, @-r15 \n\t" \ - "st r14, @-r15 \n\t" \ - "seth r14, #high(1f) \n\t" \ - "or3 r14, r14, #low(1f) \n\t" \ - "st r14, @r4 ; store old LR \n\t" \ - "st r15, @r2 ; store old SP \n\t" \ - "ld r15, @r3 ; load new SP \n\t" \ - "st r0, @-r15 ; store 'prev' onto new stack \n\t" \ - "ld r14, @r5 ; load new LR \n\t" \ - "jmp r14 \n\t" \ - ".fillinsn \n " \ - "1: \n\t" \ - "ld r6, @r15+ ; load 'prev' from new stack \n\t" \ - "ld r14, @r15+ \n\t" \ - "ld r13, @r15+ \n\t" \ - "ld r12, @r15+ \n\t" \ - "ld r11, @r15+ \n\t" \ - "ld r10, @r15+ \n\t" \ - "ld r9, @r15+ \n\t" \ - "ld r8, @r15+ \n\t" \ - : "=&r" (__last) \ - : "r" (arg0), "r" (arg1), "r" (oldsp), "r" (newsp), \ - "r" (oldlr), "r" (newlr) \ - : "memory" \ + " seth lr, #high(1f) \n" \ + " or3 lr, lr, #low(1f) \n" \ + " st lr, @%4 ; store old LR \n" \ + " ld lr, @%5 ; load new LR \n" \ + " st sp, @%2 ; store old SP \n" \ + " ld sp, @%3 ; load new SP \n" \ + " push %1 ; store `prev' on new stack \n" \ + " jmp lr \n" \ + " .fillinsn \n" \ + "1: \n" \ + " pop %0 ; restore `__last' from new stack \n" \ + : "=r" (last) \ + : "0" (prev), \ + "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \ + "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \ + : "memory", "lr" \ ); \ - last = __last; \ } while(0) +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} + /* Interrupt Control */ -#if !defined(CONFIG_CHIP_M32102) +#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) #define local_irq_enable() \ __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") #define local_irq_disable() \ __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") -#else /* CONFIG_CHIP_M32102 */ +#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ static inline void local_irq_enable(void) { unsigned long tmpreg; @@ -95,7 +80,7 @@ static inline void local_irq_disable(void) "mvtc %0, psw \n\t" : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); } -#endif /* CONFIG_CHIP_M32102 */ +#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ #define local_save_flags(x) \ __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) @@ -104,13 +89,13 @@ static inline void local_irq_disable(void) __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ : "r" (x) : "cbit", "memory") -#if !defined(CONFIG_CHIP_M32102) +#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) #define local_irq_save(x) \ __asm__ __volatile__( \ "mvfc %0, psw; \n\t" \ "clrpsw #0x40 -> nop; \n\t" \ : "=r" (x) : /* no input */ : "memory") -#else /* CONFIG_CHIP_M32102 */ +#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ #define local_irq_save(x) \ ({ \ unsigned long tmpreg; \ @@ -123,7 +108,7 @@ static inline void local_irq_disable(void) : "=r" (x), "=&r" (tmpreg) \ : : "cbit", "memory"); \ }) -#endif /* CONFIG_CHIP_M32102 */ +#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ #define irqs_disabled() \ ({ \ @@ -132,8 +117,6 @@ static inline void local_irq_disable(void) !(flags & 0x40); \ }) -#endif /* __KERNEL__ */ - #define nop() __asm__ __volatile__ ("nop" : : ) #define xchg(ptr,x) \ @@ -162,8 +145,8 @@ extern void __xchg_called_with_bad_pointer(void); #define DCACHE_CLEAR(reg0, reg1, addr) #endif /* CONFIG_CHIP_M32700_TS1 */ -static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, - int size) +static inline unsigned long +__xchg(unsigned long x, volatile void * ptr, int size) { unsigned long flags; unsigned long tmp = 0; @@ -213,6 +196,67 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, return (tmp); } +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long +__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) +{ + unsigned long flags; + unsigned int retval; + + local_irq_save(flags); + __asm__ __volatile__ ( + DCACHE_CLEAR("%0", "r4", "%1") + M32R_LOCK" %0, @%1; \n" + " bne %0, %2, 1f; \n" + M32R_UNLOCK" %3, @%1; \n" + " bra 2f; \n" + " .fillinsn \n" + "1:" + M32R_UNLOCK" %0, @%1; \n" + " .fillinsn \n" + "2:" + : "=&r" (retval) + : "r" (p), "r" (old), "r" (new) + : "cbit", "memory" +#ifdef CONFIG_CHIP_M32700_TS1 + , "r4" +#endif /* CONFIG_CHIP_M32700_TS1 */ + ); + local_irq_restore(flags); + + return retval; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); +#if 0 /* we don't have __cmpxchg_u64 */ + case 8: + return __cmpxchg_u64(ptr, old, new); +#endif /* 0 */ + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr,o,n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +#endif /* __KERNEL__ */ + /* * Memory barrier. * @@ -274,7 +318,7 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like thiswhere there are no data dependencies. + * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while (0) @@ -284,14 +328,15 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() +#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while (0) +#define set_mb(var, value) do { var = value; barrier(); } while (0) #endif -#define set_mb(var, value) do { xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) +#define arch_align_stack(x) (x) #endif /* _ASM_M32R_SYSTEM_H */