* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics
* Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H
-#include <linux/config.h>
#include <linux/types.h>
+#include <linux/irqflags.h>
#include <asm/addrspace.h>
+#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/dsp.h>
-#include <asm/ptrace.h>
#include <asm/war.h>
-#include <asm/interrupt.h>
-/*
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- */
-
-#define read_barrier_depends() do { } while(0)
-
-#ifdef CONFIG_CPU_HAS_SYNC
-#define __sync() \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- ".set mips2\n\t" \
- "sync\n\t" \
- ".set pop" \
- : /* no output */ \
- : /* no input */ \
- : "memory")
-#else
-#define __sync() do { } while(0)
-#endif
-
-#define __fast_iob() \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "lw $0,%0\n\t" \
- "nop\n\t" \
- ".set pop" \
- : /* no output */ \
- : "m" (*(int *)CKSEG1) \
- : "memory")
-
-#define fast_wmb() __sync()
-#define fast_rmb() __sync()
-#define fast_mb() __sync()
-#define fast_iob() \
- do { \
- __sync(); \
- __fast_iob(); \
- } while (0)
-
-#ifdef CONFIG_CPU_HAS_WB
-
-#include <asm/wbflush.h>
-
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-#define mb() wbflush()
-#define iob() wbflush()
-
-#else /* !CONFIG_CPU_HAS_WB */
-
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-#define mb() fast_mb()
-#define iob() fast_iob()
-
-#endif /* !CONFIG_CPU_HAS_WB */
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
-
-#define set_wmb(var, value) \
-do { var = value; wmb(); } while (0)
/*
* switch_to(n) should switch tasks to task nr n, first
struct task_struct;
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+/*
+ * Handle the scheduler resume end of FPU affinity management. We do this
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+ * isn't set), we undo the restriction on cpus_allowed.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+ * different thread.
+ */
+
+#define switch_to(prev,next,last) \
+do { \
+ if (cpu_has_fpu && \
+ (prev->thread.mflags & MF_FPUBOUND) && \
+ (!(KSTK_STATUS(prev) & ST0_CU1))) { \
+ prev->thread.mflags &= ~MF_FPUBOUND; \
+ prev->cpus_allowed = prev->thread.user_cpus_allowed; \
+ } \
+ if (cpu_has_dsp) \
+ __save_dsp(prev); \
+ next->thread.emulated_fp = 0; \
+ (last) = resume(prev, next, next->thread_info); \
+ if (cpu_has_dsp) \
+ __restore_dsp(current); \
+} while(0)
+
+#else
#define switch_to(prev,next,last) \
do { \
if (cpu_has_dsp) \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
+#endif
/*
* On SMP systems, when the scheduler does migration-cost autodetection,
" .set mips3 \n"
" sc %2, %1 \n"
" beqzl %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
" .set mips3 \n"
" sc %2, %1 \n"
" beqz %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
" move %2, %z4 \n"
" scd %2, %1 \n"
" beqzl %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
" move %2, %z4 \n"
" scd %2, %1 \n"
" beqz %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
#else
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
+ case 4:
+ return __xchg_u32(ptr, x);
+ case 8:
+ return __xchg_u64(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
" .set mips3 \n"
" sc $1, %1 \n"
" beqzl $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
" .set mips3 \n"
" sc $1, %1 \n"
" beqz $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
{
__u64 retval;
- if (cpu_has_llsc) {
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set push \n"
" .set noat \n"
" move $1, %z4 \n"
" scd $1, %1 \n"
" beqzl $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
" move $1, %z4 \n"
" scd $1, %1 \n"
" beqz $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
#else
extern void set_handler (unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
extern void *set_vi_handler (int n, void *addr);
-extern void *set_vi_srs_handler (int n, void *addr, int regset);
extern void *set_except_vector(int n, void *addr);
+extern unsigned long ebase;
extern void per_cpu_trap_init(void);
-extern NORET_TYPE void die(const char *, struct pt_regs *);
-
-static inline void die_if_kernel(const char *str, struct pt_regs *regs)
-{
- if (unlikely(!user_mode(regs)))
- die(str, regs);
-}
-
extern int stop_a_enabled;
/*