X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-mips%2Fsystem.h;h=5e1289c85ed958f414ec673a535e48b1c056e221;hb=refs%2Fheads%2Fvserver;hp=9b7354b872f1d4f576e92c871fdb2055fc681f7d;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 9b7354b87..5e1289c85 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle * Copyright (C) 1996 by Paul M. Antoine * Copyright (C) 1999 Silicon Graphics * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com @@ -12,341 +12,164 @@ #ifndef _ASM_SYSTEM_H #define _ASM_SYSTEM_H -#include -#include - -#include +#include +#include #include -#include - -__asm__ ( - ".macro\tlocal_irq_enable\n\t" - ".set\tpush\n\t" - ".set\treorder\n\t" - ".set\tnoat\n\t" - "mfc0\t$1,$12\n\t" - "ori\t$1,0x1f\n\t" - "xori\t$1,0x1e\n\t" - "mtc0\t$1,$12\n\t" - ".set\tpop\n\t" - ".endm"); - -static inline void local_irq_enable(void) -{ - __asm__ __volatile__( - "local_irq_enable" - : /* no outputs */ - : /* no inputs */ - : "memory"); -} +#include +#include +#include +#include + /* - * For cli() we have to insert nops to make sure that the new value - * has actually arrived in the status register before the end of this - * macro. - * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs - * no nops at all. + * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. */ -__asm__ ( - ".macro\tlocal_irq_disable\n\t" - ".set\tpush\n\t" - ".set\tnoat\n\t" - "mfc0\t$1,$12\n\t" - "ori\t$1,1\n\t" - "xori\t$1,1\n\t" - ".set\tnoreorder\n\t" - "mtc0\t$1,$12\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - ".set\tpop\n\t" - ".endm"); - -static inline void local_irq_disable(void) -{ - __asm__ __volatile__( - "local_irq_disable" - : /* no outputs */ - : /* no inputs */ - : "memory"); -} +extern asmlinkage void *resume(void *last, void *next, void *next_ti); -__asm__ ( - ".macro\tlocal_save_flags flags\n\t" - ".set\tpush\n\t" - ".set\treorder\n\t" - "mfc0\t\\flags, $12\n\t" - ".set\tpop\n\t" - ".endm"); - -#define local_save_flags(x) \ -__asm__ __volatile__( \ - "local_save_flags %0" \ - : "=r" (x)) - -__asm__ ( - ".macro\tlocal_irq_save result\n\t" - ".set\tpush\n\t" - ".set\treorder\n\t" - ".set\tnoat\n\t" - "mfc0\t\\result, $12\n\t" - "ori\t$1, \\result, 1\n\t" - "xori\t$1, 1\n\t" - ".set\tnoreorder\n\t" - "mtc0\t$1, $12\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - ".set\tpop\n\t" - ".endm"); - -#define local_irq_save(x) \ -__asm__ __volatile__( \ - "local_irq_save\t%0" \ - : "=r" (x) \ - : /* no inputs */ \ - : "memory") - -__asm__(".macro\tlocal_irq_restore flags\n\t" - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "mfc0\t$1, $12\n\t" - "andi\t\\flags, 1\n\t" - "ori\t$1, 1\n\t" - "xori\t$1, 1\n\t" - "or\t\\flags, $1\n\t" - "mtc0\t\\flags, $12\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - "sll\t$0, $0, 1\t\t\t# nop\n\t" - ".set\tat\n\t" - ".set\treorder\n\t" - ".endm"); - -#define local_irq_restore(flags) \ -do { \ - unsigned long __tmp1; \ - \ - __asm__ __volatile__( \ - "local_irq_restore\t%0" \ - : "=r" (__tmp1) \ - : "0" (flags) \ - : "memory"); \ -} while(0) +struct task_struct; -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !(flags & 1); \ -}) +#ifdef CONFIG_MIPS_MT_FPAFF /* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * + * Handle the scheduler resume end of FPU affinity management. We do this + * inline to try to keep the overhead down. If we have been forced to run on + * a "CPU" with an FPU because of a previous high level of FP computation, + * but did not actually use the FPU during the most recent time-slice (CU1 + * isn't set), we undo the restriction on cpus_allowed. * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like thiswhere there are no data dependencies. + * We're not calling set_cpus_allowed() here, because we have no need to + * force prompt migration - we're already switching the current CPU to a + * different thread. */ -#define read_barrier_depends() do { } while(0) - -#ifdef CONFIG_CPU_HAS_SYNC -#define __sync() \ - __asm__ __volatile__( \ - ".set push\n\t" \ - ".set noreorder\n\t" \ - ".set mips2\n\t" \ - "sync\n\t" \ - ".set pop" \ - : /* no output */ \ - : /* no input */ \ - : "memory") -#else -#define __sync() do { } while(0) -#endif +#define switch_to(prev,next,last) \ +do { \ + if (cpu_has_fpu && \ + (prev->thread.mflags & MF_FPUBOUND) && \ + (!(KSTK_STATUS(prev) & ST0_CU1))) { \ + prev->thread.mflags &= ~MF_FPUBOUND; \ + prev->cpus_allowed = prev->thread.user_cpus_allowed; \ + } \ + if (cpu_has_dsp) \ + __save_dsp(prev); \ + next->thread.emulated_fp = 0; \ + (last) = resume(prev, next, next->thread_info); \ + if (cpu_has_dsp) \ + __restore_dsp(current); \ +} while(0) -#define __fast_iob() \ - __asm__ __volatile__( \ - ".set push\n\t" \ - ".set noreorder\n\t" \ - "lw $0,%0\n\t" \ - "nop\n\t" \ - ".set pop" \ - : /* no output */ \ - : "m" (*(int *)CKSEG1) \ - : "memory") - -#define fast_wmb() __sync() -#define fast_rmb() __sync() -#define fast_mb() __sync() -#define fast_iob() \ - do { \ - __sync(); \ - __fast_iob(); \ - } while (0) - -#ifdef CONFIG_CPU_HAS_WB - -#include - -#define wmb() fast_wmb() -#define rmb() fast_rmb() -#define mb() wbflush() -#define iob() wbflush() - -#else /* !CONFIG_CPU_HAS_WB */ - -#define wmb() fast_wmb() -#define rmb() fast_rmb() -#define mb() fast_mb() -#define iob() fast_iob() - -#endif /* !CONFIG_CPU_HAS_WB */ - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() #else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) +#define switch_to(prev,next,last) \ +do { \ + if (cpu_has_dsp) \ + __save_dsp(prev); \ + (last) = resume(prev, next, task_thread_info(next)); \ + if (cpu_has_dsp) \ + __restore_dsp(current); \ +} while(0) #endif -#define set_mb(var, value) \ -do { var = value; mb(); } while (0) - -#define set_wmb(var, value) \ -do { var = value; wmb(); } while (0) - /* - * switch_to(n) should switch tasks to task nr n, first - * checking that n isn't the current task, in which case it does nothing. + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! */ -extern asmlinkage void *resume(void *last, void *next, void *next_ti); - -struct task_struct; - -#define switch_to(prev,next,last) \ -do { \ - (last) = resume(prev, next, next->thread_info); \ -} while(0) +static inline void sched_cacheflush(void) +{ +} static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) { __u32 retval; -#ifdef CONFIG_CPU_HAS_LLSC - unsigned long dummy; - - __asm__ __volatile__( - ".set\tpush\t\t\t\t# xchg_u32\n\t" - ".set\tnoreorder\n\t" - ".set\tnomacro\n\t" - "ll\t%0, %3\n" - "1:\tmove\t%2, %z4\n\t" - "sc\t%2, %1\n\t" - "beqzl\t%2, 1b\n\t" - " ll\t%0, %3\n\t" -#ifdef CONFIG_SMP - "sync\n\t" -#endif - ".set\tpop" + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long dummy; + + __asm__ __volatile__( + " .set mips3 \n" + "1: ll %0, %3 # xchg_u32 \n" + " .set mips0 \n" + " move %2, %z4 \n" + " .set mips3 \n" + " sc %2, %1 \n" + " beqzl %2, 1b \n" + " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory"); -#else - unsigned long flags; + } else if (cpu_has_llsc) { + unsigned long dummy; + + __asm__ __volatile__( + " .set mips3 \n" + "1: ll %0, %3 # xchg_u32 \n" + " .set mips0 \n" + " move %2, %z4 \n" + " .set mips3 \n" + " sc %2, %1 \n" + " beqz %2, 1b \n" + " .set mips0 \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); + } else { + unsigned long flags; - local_irq_save(flags); - retval = *m; - *m = val; - local_irq_restore(flags); /* implies memory barrier */ -#endif + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); /* implies memory barrier */ + } + + smp_mb(); return retval; } -#ifdef CONFIG_MIPS64 +#ifdef CONFIG_64BIT static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) { __u64 retval; -#ifdef CONFIG_CPU_HAS_LLDSCD - unsigned long dummy; - - __asm__ __volatile__( - ".set\tpush\t\t\t\t# xchg_u64\n\t" - ".set\tnoreorder\n\t" - ".set\tnomacro\n\t" - "lld\t%0, %3\n" - "1:\tmove\t%2, %z4\n\t" - "scd\t%2, %1\n\t" - "beqzl\t%2, 1b\n\t" - " lld\t%0, %3\n\t" -#ifdef CONFIG_SMP - "sync\n\t" -#endif - ".set\tpop" + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long dummy; + + __asm__ __volatile__( + " .set mips3 \n" + "1: lld %0, %3 # xchg_u64 \n" + " move %2, %z4 \n" + " scd %2, %1 \n" + " beqzl %2, 1b \n" + " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory"); -#else - unsigned long flags; + } else if (cpu_has_llsc) { + unsigned long dummy; + + __asm__ __volatile__( + " .set mips3 \n" + "1: lld %0, %3 # xchg_u64 \n" + " move %2, %z4 \n" + " scd %2, %1 \n" + " beqz %2, 1b \n" + " .set mips0 \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); + } else { + unsigned long flags; - local_irq_save(flags); - retval = *m; - *m = val; - local_irq_restore(flags); /* implies memory barrier */ -#endif + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); /* implies memory barrier */ + } + + smp_mb(); return retval; } @@ -362,10 +185,10 @@ extern void __xchg_called_with_bad_pointer(void); static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { - case 4: - return __xchg_u32(ptr, x); - case 8: - return __xchg_u64(ptr, x); + case 4: + return __xchg_u32(ptr, x); + case 8: + return __xchg_u64(ptr, x); } __xchg_called_with_bad_pointer(); return x; @@ -381,66 +204,102 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, { __u32 retval; -#ifdef CONFIG_CPU_HAS_LLSC - __asm__ __volatile__( - " .set noat \n" - "1: ll %0, %2 # __cmpxchg_u32 \n" - " bne %0, %z3, 2f \n" - " move $1, %z4 \n" - " sc $1, %1 \n" - " beqz $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif - "2: \n" - " .set at \n" - : "=&r" (retval), "=m" (*m) - : "R" (*m), "Jr" (old), "Jr" (new) - : "memory"); -#else - unsigned long flags; + if (cpu_has_llsc && R10000_LLSC_WAR) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: ll %0, %2 # __cmpxchg_u32 \n" + " bne %0, %z3, 2f \n" + " .set mips0 \n" + " move $1, %z4 \n" + " .set mips3 \n" + " sc $1, %1 \n" + " beqzl $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else if (cpu_has_llsc) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: ll %0, %2 # __cmpxchg_u32 \n" + " bne %0, %z3, 2f \n" + " .set mips0 \n" + " move $1, %z4 \n" + " .set mips3 \n" + " sc $1, %1 \n" + " beqz $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else { + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); /* implies memory barrier */ + } - local_irq_save(flags); - retval = *m; - if (retval == old) - *m = new; - local_irq_restore(flags); /* implies memory barrier */ -#endif + smp_mb(); return retval; } -#ifdef CONFIG_MIPS64 +#ifdef CONFIG_64BIT static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, unsigned long new) { __u64 retval; -#ifdef CONFIG_CPU_HAS_LLDSCD - __asm__ __volatile__( - " .set noat \n" - "1: lld %0, %2 # __cmpxchg_u64 \n" - " bne %0, %z3, 2f \n" - " move $1, %z4 \n" - " scd $1, %1 \n" - " beqz $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif - "2: \n" - " .set at \n" - : "=&r" (retval), "=m" (*m) - : "R" (*m), "Jr" (old), "Jr" (new) - : "memory"); -#else - unsigned long flags; + if (cpu_has_llsc && R10000_LLSC_WAR) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: lld %0, %2 # __cmpxchg_u64 \n" + " bne %0, %z3, 2f \n" + " move $1, %z4 \n" + " scd $1, %1 \n" + " beqzl $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else if (cpu_has_llsc) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: lld %0, %2 # __cmpxchg_u64 \n" + " bne %0, %z3, 2f \n" + " move $1, %z4 \n" + " scd $1, %1 \n" + " beqz $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else { + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); /* implies memory barrier */ + } - local_irq_save(flags); - retval = *m; - if (retval == old) - *m = new; - local_irq_restore(flags); /* implies memory barrier */ -#endif + smp_mb(); return retval; } @@ -469,37 +328,21 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) +extern void set_handler (unsigned long offset, void *addr, unsigned long len); +extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); +extern void *set_vi_handler (int n, void *addr); extern void *set_except_vector(int n, void *addr); +extern unsigned long ebase; extern void per_cpu_trap_init(void); -extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file, - const char *func, unsigned long line); -extern void __die_if_kernel(const char *, struct pt_regs *, const char *file, - const char *func, unsigned long line); - -#define die(msg, regs) \ - __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) -#define die_if_kernel(msg, regs) \ - __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) - -extern int serial_console; extern int stop_a_enabled; -static __inline__ int con_is_present(void) -{ - return serial_console ? 0 : 1; -} - /* - * Taken from include/asm-ia64/system.h; prevents deadlock on SMP + * See include/asm-ia64/system.h; prevents deadlock on SMP * systems. */ -#define prepare_arch_switch(rq, next) \ -do { \ - spin_lock(&(next)->switch_lock); \ - spin_unlock(&(rq)->lock); \ -} while (0) -#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock) -#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) +#define __ARCH_WANT_UNLOCKED_CTXSW + +#define arch_align_stack(x) (x) #endif /* _ASM_SYSTEM_H */