#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H
+#include <linux/config.h>
#include <linux/types.h>
-#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/cpu-features.h>
#include <asm/dsp.h>
#include <asm/ptrace.h>
#include <asm/war.h>
+#include <asm/interrupt.h>
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
+#define set_wmb(var, value) \
+do { var = value; wmb(); } while (0)
+
/*
* switch_to(n) should switch tasks to task nr n, first
* checking that n isn't the current task, in which case it does nothing.
struct task_struct;
-#ifdef CONFIG_MIPS_MT_FPAFF
-
-/*
- * Handle the scheduler resume end of FPU affinity management. We do this
- * inline to try to keep the overhead down. If we have been forced to run on
- * a "CPU" with an FPU because of a previous high level of FP computation,
- * but did not actually use the FPU during the most recent time-slice (CU1
- * isn't set), we undo the restriction on cpus_allowed.
- *
- * We're not calling set_cpus_allowed() here, because we have no need to
- * force prompt migration - we're already switching the current CPU to a
- * different thread.
- */
-
-#define switch_to(prev,next,last) \
-do { \
- if (cpu_has_fpu && \
- (prev->thread.mflags & MF_FPUBOUND) && \
- (!(KSTK_STATUS(prev) & ST0_CU1))) { \
- prev->thread.mflags &= ~MF_FPUBOUND; \
- prev->cpus_allowed = prev->thread.user_cpus_allowed; \
- } \
- if (cpu_has_dsp) \
- __save_dsp(prev); \
- next->thread.emulated_fp = 0; \
- (last) = resume(prev, next, next->thread_info); \
- if (cpu_has_dsp) \
- __restore_dsp(current); \
-} while(0)
-
-#else
#define switch_to(prev,next,last) \
do { \
if (cpu_has_dsp) \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
-#endif
/*
* On SMP systems, when the scheduler does migration-cost autodetection,
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
+ case 4:
+ return __xchg_u32(ptr, x);
+ case 8:
+ return __xchg_u64(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
extern void set_handler (unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
extern void *set_vi_handler (int n, void *addr);
+extern void *set_vi_srs_handler (int n, void *addr, int regset);
extern void *set_except_vector(int n, void *addr);
-extern unsigned long ebase;
extern void per_cpu_trap_init(void);
extern NORET_TYPE void die(const char *, struct pt_regs *);