X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-mips%2Fsystem.h;fp=include%2Fasm-mips%2Fsystem.h;h=261f71d16a074f4dd4b3ab2fc2d42ba2c583d2e9;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=888fd8908467210cc9344e3ce3050eabd28ee2de;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 888fd8908..261f71d16 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -70,7 +71,7 @@ * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like thiswhere there are no data dependencies. + * in cases like this where there are no data dependencies. */ #define read_barrier_depends() do { } while(0) @@ -154,14 +155,56 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti); struct task_struct; -#define switch_to(prev,next,last) \ -do { \ - (last) = resume(prev, next, next->thread_info); \ +#ifdef CONFIG_MIPS_MT_FPAFF + +/* + * Handle the scheduler resume end of FPU affinity management. We do this + * inline to try to keep the overhead down. If we have been forced to run on + * a "CPU" with an FPU because of a previous high level of FP computation, + * but did not actually use the FPU during the most recent time-slice (CU1 + * isn't set), we undo the restriction on cpus_allowed. + * + * We're not calling set_cpus_allowed() here, because we have no need to + * force prompt migration - we're already switching the current CPU to a + * different thread. + */ + +#define switch_to(prev,next,last) \ +do { \ + if (cpu_has_fpu && \ + (prev->thread.mflags & MF_FPUBOUND) && \ + (!(KSTK_STATUS(prev) & ST0_CU1))) { \ + prev->thread.mflags &= ~MF_FPUBOUND; \ + prev->cpus_allowed = prev->thread.user_cpus_allowed; \ + } \ + if (cpu_has_dsp) \ + __save_dsp(prev); \ + next->thread.emulated_fp = 0; \ + (last) = resume(prev, next, next->thread_info); \ + if (cpu_has_dsp) \ + __restore_dsp(current); \ } while(0) -#define ROT_IN_PIECES \ - " .set noreorder \n" \ - " .set reorder \n" +#else +#define switch_to(prev,next,last) \ +do { \ + if (cpu_has_dsp) \ + __save_dsp(prev); \ + (last) = resume(prev, next, task_thread_info(next)); \ + if (cpu_has_dsp) \ + __restore_dsp(current); \ +} while(0) +#endif + +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) { @@ -171,14 +214,17 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) unsigned long dummy; __asm__ __volatile__( + " .set mips3 \n" "1: ll %0, %3 # xchg_u32 \n" + " .set mips0 \n" " move %2, %z4 \n" + " .set mips3 \n" " sc %2, %1 \n" " beqzl %2, 1b \n" - ROT_IN_PIECES #ifdef CONFIG_SMP " sync \n" #endif + " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory"); @@ -186,13 +232,17 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) unsigned long dummy; __asm__ __volatile__( + " .set mips3 \n" "1: ll %0, %3 # xchg_u32 \n" + " .set mips0 \n" " move %2, %z4 \n" + " .set mips3 \n" " sc %2, %1 \n" " beqz %2, 1b \n" #ifdef CONFIG_SMP " sync \n" #endif + " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory"); @@ -208,7 +258,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) return retval; } -#ifdef CONFIG_MIPS64 +#ifdef CONFIG_64BIT static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) { __u64 retval; @@ -217,14 +267,15 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) unsigned long dummy; __asm__ __volatile__( + " .set mips3 \n" "1: lld %0, %3 # xchg_u64 \n" " move %2, %z4 \n" " scd %2, %1 \n" " beqzl %2, 1b \n" - ROT_IN_PIECES #ifdef CONFIG_SMP " sync \n" #endif + " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory"); @@ -232,6 +283,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) unsigned long dummy; __asm__ __volatile__( + " .set mips3 \n" "1: lld %0, %3 # xchg_u64 \n" " move %2, %z4 \n" " scd %2, %1 \n" @@ -239,6 +291,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) #ifdef CONFIG_SMP " sync \n" #endif + " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory"); @@ -265,10 +318,10 @@ extern void __xchg_called_with_bad_pointer(void); static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { - case 4: - return __xchg_u32(ptr, x); - case 8: - return __xchg_u64(ptr, x); + case 4: + return __xchg_u32(ptr, x); + case 8: + return __xchg_u64(ptr, x); } __xchg_called_with_bad_pointer(); return x; @@ -286,35 +339,42 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, if (cpu_has_llsc && R10000_LLSC_WAR) { __asm__ __volatile__( + " .set push \n" " .set noat \n" + " .set mips3 \n" "1: ll %0, %2 # __cmpxchg_u32 \n" " bne %0, %z3, 2f \n" + " .set mips0 \n" " move $1, %z4 \n" + " .set mips3 \n" " sc $1, %1 \n" " beqzl $1, 1b \n" - ROT_IN_PIECES #ifdef CONFIG_SMP " sync \n" #endif "2: \n" - " .set at \n" - : "=&r" (retval), "=m" (*m) + " .set pop \n" + : "=&r" (retval), "=R" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__( + " .set push \n" " .set noat \n" + " .set mips3 \n" "1: ll %0, %2 # __cmpxchg_u32 \n" " bne %0, %z3, 2f \n" + " .set mips0 \n" " move $1, %z4 \n" + " .set mips3 \n" " sc $1, %1 \n" " beqz $1, 1b \n" #ifdef CONFIG_SMP " sync \n" #endif "2: \n" - " .set at \n" - : "=&r" (retval), "=m" (*m) + " .set pop \n" + : "=&r" (retval), "=R" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory"); } else { @@ -330,7 +390,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, return retval; } -#ifdef CONFIG_MIPS64 +#ifdef CONFIG_64BIT static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, unsigned long new) { @@ -338,24 +398,27 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, if (cpu_has_llsc) { __asm__ __volatile__( + " .set push \n" " .set noat \n" + " .set mips3 \n" "1: lld %0, %2 # __cmpxchg_u64 \n" " bne %0, %z3, 2f \n" " move $1, %z4 \n" " scd $1, %1 \n" " beqzl $1, 1b \n" - ROT_IN_PIECES #ifdef CONFIG_SMP " sync \n" #endif "2: \n" - " .set at \n" - : "=&r" (retval), "=m" (*m) + " .set pop \n" + : "=&r" (retval), "=R" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__( + " .set push \n" " .set noat \n" + " .set mips3 \n" "1: lld %0, %2 # __cmpxchg_u64 \n" " bne %0, %z3, 2f \n" " move $1, %z4 \n" @@ -365,8 +428,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, " sync \n" #endif "2: \n" - " .set at \n" - : "=&r" (retval), "=m" (*m) + " .set pop \n" + : "=&r" (retval), "=R" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory"); } else { @@ -406,32 +469,28 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) +extern void set_handler (unsigned long offset, void *addr, unsigned long len); +extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); +extern void *set_vi_handler (int n, void *addr); extern void *set_except_vector(int n, void *addr); +extern unsigned long ebase; extern void per_cpu_trap_init(void); -extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file, - const char *func, unsigned long line); -extern void __die_if_kernel(const char *, struct pt_regs *, const char *file, - const char *func, unsigned long line); +extern NORET_TYPE void die(const char *, struct pt_regs *); -#define die(msg, regs) \ - __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) -#define die_if_kernel(msg, regs) \ - __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) +static inline void die_if_kernel(const char *str, struct pt_regs *regs) +{ + if (unlikely(!user_mode(regs))) + die(str, regs); +} extern int stop_a_enabled; /* - * Taken from include/asm-ia64/system.h; prevents deadlock on SMP + * See include/asm-ia64/system.h; prevents deadlock on SMP * systems. */ -#define prepare_arch_switch(rq, next) \ -do { \ - spin_lock(&(next)->switch_lock); \ - spin_unlock(&(rq)->lock); \ -} while (0) -#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock) -#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) +#define __ARCH_WANT_UNLOCKED_CTXSW #define arch_align_stack(x) (x)