#define _ASM_SYSTEM_H
#include <linux/config.h>
-#include <asm/sgidefs.h>
-
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <asm/addrspace.h>
+#include <asm/cpu-features.h>
+#include <asm/dsp.h>
#include <asm/ptrace.h>
-#include <asm/hazards.h>
-
-__asm__ (
- ".macro\tlocal_irq_enable\n\t"
- ".set\tpush\n\t"
- ".set\treorder\n\t"
- ".set\tnoat\n\t"
- "mfc0\t$1,$12\n\t"
- "ori\t$1,0x1f\n\t"
- "xori\t$1,0x1e\n\t"
- "mtc0\t$1,$12\n\t"
- "irq_enable_hazard\n\t"
- ".set\tpop\n\t"
- ".endm");
-
-static inline void local_irq_enable(void)
-{
- __asm__ __volatile__(
- "local_irq_enable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
-}
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-__asm__ (
- ".macro\tlocal_irq_disable\n\t"
- ".set\tpush\n\t"
- ".set\tnoat\n\t"
- "mfc0\t$1,$12\n\t"
- "ori\t$1,1\n\t"
- "xori\t$1,1\n\t"
- ".set\tnoreorder\n\t"
- "mtc0\t$1,$12\n\t"
- "irq_disable_hazard\n\t"
- ".set\tpop\n\t"
- ".endm");
-
-static inline void local_irq_disable(void)
-{
- __asm__ __volatile__(
- "local_irq_disable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
-}
-
-__asm__ (
- ".macro\tlocal_save_flags flags\n\t"
- ".set\tpush\n\t"
- ".set\treorder\n\t"
- "mfc0\t\\flags, $12\n\t"
- ".set\tpop\n\t"
- ".endm");
-
-#define local_save_flags(x) \
-__asm__ __volatile__( \
- "local_save_flags %0" \
- : "=r" (x))
-
-__asm__ (
- ".macro\tlocal_irq_save result\n\t"
- ".set\tpush\n\t"
- ".set\treorder\n\t"
- ".set\tnoat\n\t"
- "mfc0\t\\result, $12\n\t"
- "ori\t$1, \\result, 1\n\t"
- "xori\t$1, 1\n\t"
- ".set\tnoreorder\n\t"
- "mtc0\t$1, $12\n\t"
- "irq_disable_hazard\n\t"
- ".set\tpop\n\t"
- ".endm");
-
-#define local_irq_save(x) \
-__asm__ __volatile__( \
- "local_irq_save\t%0" \
- : "=r" (x) \
- : /* no inputs */ \
- : "memory")
-
-__asm__ (
- ".macro\tlocal_irq_restore flags\n\t"
- ".set\tnoreorder\n\t"
- ".set\tnoat\n\t"
- "mfc0\t$1, $12\n\t"
- "andi\t\\flags, 1\n\t"
- "ori\t$1, 1\n\t"
- "xori\t$1, 1\n\t"
- "or\t\\flags, $1\n\t"
- "mtc0\t\\flags, $12\n\t"
- "irq_disable_hazard\n\t"
- ".set\tat\n\t"
- ".set\treorder\n\t"
- ".endm");
-
-#define local_irq_restore(flags) \
-do { \
- unsigned long __tmp1; \
- \
- __asm__ __volatile__( \
- "local_irq_restore\t%0" \
- : "=r" (__tmp1) \
- : "0" (flags) \
- : "memory"); \
-} while(0)
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- !(flags & 1); \
-})
+#include <asm/war.h>
+#include <asm/interrupt.h>
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like thiswhere there are no data dependencies.
+ * in cases like this where there are no data dependencies.
*/
#define read_barrier_depends() do { } while(0)
struct task_struct;
-#define switch_to(prev,next,last) \
-do { \
- (last) = resume(prev, next, next->thread_info); \
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+/*
+ * Handle the scheduler resume end of FPU affinity management. We do this
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+ * isn't set), we undo the restriction on cpus_allowed.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+ * different thread.
+ */
+
+#define switch_to(prev,next,last) \
+do { \
+ if (cpu_has_fpu && \
+ (prev->thread.mflags & MF_FPUBOUND) && \
+ (!(KSTK_STATUS(prev) & ST0_CU1))) { \
+ prev->thread.mflags &= ~MF_FPUBOUND; \
+ prev->cpus_allowed = prev->thread.user_cpus_allowed; \
+ } \
+ if (cpu_has_dsp) \
+ __save_dsp(prev); \
+ next->thread.emulated_fp = 0; \
+ (last) = resume(prev, next, next->thread_info); \
+ if (cpu_has_dsp) \
+ __restore_dsp(current); \
} while(0)
+#else
+#define switch_to(prev,next,last) \
+do { \
+ if (cpu_has_dsp) \
+ __save_dsp(prev); \
+ (last) = resume(prev, next, task_thread_info(next)); \
+ if (cpu_has_dsp) \
+ __restore_dsp(current); \
+} while(0)
+#endif
+
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
__u32 retval;
-#ifdef CONFIG_CPU_HAS_LLSC
- unsigned long dummy;
-
- __asm__ __volatile__(
- ".set\tpush\t\t\t\t# xchg_u32\n\t"
- ".set\tnoreorder\n\t"
- ".set\tnomacro\n\t"
- "ll\t%0, %3\n"
- "1:\tmove\t%2, %z4\n\t"
- "sc\t%2, %1\n\t"
- "beqzl\t%2, 1b\n\t"
- " ll\t%0, %3\n\t"
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %0, %3 # xchg_u32 \n"
+ " .set mips0 \n"
+ " move %2, %z4 \n"
+ " .set mips3 \n"
+ " sc %2, %1 \n"
+ " beqzl %2, 1b \n"
#ifdef CONFIG_SMP
- "sync\n\t"
+ " sync \n"
#endif
- ".set\tpop"
+ " .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- *m = val;
- local_irq_restore(flags); /* implies memory barrier */
+ } else if (cpu_has_llsc) {
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %0, %3 # xchg_u32 \n"
+ " .set mips0 \n"
+ " move %2, %z4 \n"
+ " .set mips3 \n"
+ " sc %2, %1 \n"
+ " beqz %2, 1b \n"
+#ifdef CONFIG_SMP
+ " sync \n"
#endif
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ *m = val;
+ local_irq_restore(flags); /* implies memory barrier */
+ }
return retval;
}
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
{
__u64 retval;
-#ifdef CONFIG_CPU_HAS_LLDSCD
- unsigned long dummy;
-
- __asm__ __volatile__(
- ".set\tpush\t\t\t\t# xchg_u64\n\t"
- ".set\tnoreorder\n\t"
- ".set\tnomacro\n\t"
- "lld\t%0, %3\n"
- "1:\tmove\t%2, %z4\n\t"
- "scd\t%2, %1\n\t"
- "beqzl\t%2, 1b\n\t"
- " lld\t%0, %3\n\t"
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %0, %3 # xchg_u64 \n"
+ " move %2, %z4 \n"
+ " scd %2, %1 \n"
+ " beqzl %2, 1b \n"
#ifdef CONFIG_SMP
- "sync\n\t"
+ " sync \n"
#endif
- ".set\tpop"
+ " .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- *m = val;
- local_irq_restore(flags); /* implies memory barrier */
+ } else if (cpu_has_llsc) {
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %0, %3 # xchg_u64 \n"
+ " move %2, %z4 \n"
+ " scd %2, %1 \n"
+ " beqz %2, 1b \n"
+#ifdef CONFIG_SMP
+ " sync \n"
#endif
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ *m = val;
+ local_irq_restore(flags); /* implies memory barrier */
+ }
return retval;
}
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
+ case 4:
+ return __xchg_u32(ptr, x);
+ case 8:
+ return __xchg_u64(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
{
__u32 retval;
-#ifdef CONFIG_CPU_HAS_LLSC
- __asm__ __volatile__(
- " .set noat \n"
- "1: ll %0, %2 # __cmpxchg_u32 \n"
- " bne %0, %z3, 2f \n"
- " move $1, %z4 \n"
- " sc $1, %1 \n"
- " beqz $1, 1b \n"
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 # __cmpxchg_u32 \n"
+ " bne %0, %z3, 2f \n"
+ " .set mips0 \n"
+ " move $1, %z4 \n"
+ " .set mips3 \n"
+ " sc $1, %1 \n"
+ " beqzl $1, 1b \n"
#ifdef CONFIG_SMP
- " sync \n"
+ " sync \n"
#endif
- "2: \n"
- " .set at \n"
- : "=&r" (retval), "=m" (*m)
- : "R" (*m), "Jr" (old), "Jr" (new)
- : "memory");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- if (retval == old)
- *m = new;
- local_irq_restore(flags); /* implies memory barrier */
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 # __cmpxchg_u32 \n"
+ " bne %0, %z3, 2f \n"
+ " .set mips0 \n"
+ " move $1, %z4 \n"
+ " .set mips3 \n"
+ " sc $1, %1 \n"
+ " beqz $1, 1b \n"
+#ifdef CONFIG_SMP
+ " sync \n"
#endif
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ if (retval == old)
+ *m = new;
+ local_irq_restore(flags); /* implies memory barrier */
+ }
return retval;
}
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
unsigned long new)
{
__u64 retval;
-#ifdef CONFIG_CPU_HAS_LLDSCD
- __asm__ __volatile__(
- " .set noat \n"
- "1: lld %0, %2 # __cmpxchg_u64 \n"
- " bne %0, %z3, 2f \n"
- " move $1, %z4 \n"
- " scd $1, %1 \n"
- " beqz $1, 1b \n"
+ if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: lld %0, %2 # __cmpxchg_u64 \n"
+ " bne %0, %z3, 2f \n"
+ " move $1, %z4 \n"
+ " scd $1, %1 \n"
+ " beqzl $1, 1b \n"
#ifdef CONFIG_SMP
- " sync \n"
+ " sync \n"
#endif
- "2: \n"
- " .set at \n"
- : "=&r" (retval), "=m" (*m)
- : "R" (*m), "Jr" (old), "Jr" (new)
- : "memory");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- if (retval == old)
- *m = new;
- local_irq_restore(flags); /* implies memory barrier */
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: lld %0, %2 # __cmpxchg_u64 \n"
+ " bne %0, %z3, 2f \n"
+ " move $1, %z4 \n"
+ " scd $1, %1 \n"
+ " beqz $1, 1b \n"
+#ifdef CONFIG_SMP
+ " sync \n"
#endif
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ if (retval == old)
+ *m = new;
+ local_irq_restore(flags); /* implies memory barrier */
+ }
return retval;
}
#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
+extern void set_handler (unsigned long offset, void *addr, unsigned long len);
+extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
+extern void *set_vi_handler (int n, void *addr);
extern void *set_except_vector(int n, void *addr);
+extern unsigned long ebase;
extern void per_cpu_trap_init(void);
-extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file,
- const char *func, unsigned long line);
-extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
- const char *func, unsigned long line);
-
-#define die(msg, regs) \
- __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
-#define die_if_kernel(msg, regs) \
- __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
-
-extern int serial_console;
-extern int stop_a_enabled;
+extern NORET_TYPE void die(const char *, struct pt_regs *);
-static __inline__ int con_is_present(void)
+static inline void die_if_kernel(const char *str, struct pt_regs *regs)
{
- return serial_console ? 0 : 1;
+ if (unlikely(!user_mode(regs)))
+ die(str, regs);
}
+extern int stop_a_enabled;
+
/*
- * Taken from include/asm-ia64/system.h; prevents deadlock on SMP
+ * See include/asm-ia64/system.h; prevents deadlock on SMP
* systems.
*/
-#define prepare_arch_switch(rq, next) \
-do { \
- spin_lock(&(next)->switch_lock); \
- spin_unlock(&(rq)->lock); \
-} while (0)
-#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
-#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+#define __ARCH_WANT_UNLOCKED_CTXSW
+
+#define arch_align_stack(x) (x)
#endif /* _ASM_SYSTEM_H */