#define read_cpuid(reg) \
({ \
unsigned int __val; \
- asm("mrc%? p15, 0, %0, c0, c0, " __stringify(reg) \
- : "=r" (__val)); \
+ asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
+ : "=r" (__val) \
+ : \
+ : "cc"); \
__val; \
})
* the compiler from one version to another so a bit of paranoia won't hurt.
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
+ * (for details, see gcc PR 15089)
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
+#include <linux/linkage.h>
struct thread_info;
+struct task_struct;
/* information about the system we're running on */
extern unsigned int system_rev;
void die(const char *msg, struct pt_regs *regs, int err)
__attribute__((noreturn));
-void die_if_kernel(const char *str, struct pt_regs *regs, int err);
+struct siginfo;
+void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
+ unsigned long err, unsigned long trap);
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, const char *name);
-#include <asm/proc-fns.h>
-
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+struct mm_struct;
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+extern void __show_regs(struct pt_regs *);
extern int cpu_architecture(void);
+extern void cpu_init(void);
#define set_cr(x) \
__asm__ __volatile__( \
extern unsigned int user_debug;
#if __LINUX_ARM_ARCH__ >= 4
-#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
+#define vectors_high() (cr_alignment & CR_V)
#else
-#define vectors_base() (0)
+#define vectors_high() (0)
#endif
+#if __LINUX_ARM_ARCH__ >= 6
+#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+ : : "r" (0) : "memory")
+#else
#define mb() __asm__ __volatile__ ("" : : : "memory")
+#endif
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
-#ifdef CONFIG_SMP
/*
- * Define our own context switch locking. This allows us to enable
- * interrupts over the context switch, otherwise we end up with high
- * interrupt latency. The real problem area is switch_mm() which may
- * do a full cache flush.
+ * switch_mm() may do a full cache flush over the context switch,
+ * so enable interrupts over the context switch to avoid high
+ * latency.
*/
-#define prepare_arch_switch(rq,next) \
-do { \
- spin_lock(&(next)->switch_lock); \
- spin_unlock_irq(&(rq)->lock); \
-} while (0)
-
-#define finish_arch_switch(rq,prev) \
- spin_unlock(&(prev)->switch_lock)
-
-#define task_running(rq,p) \
- ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
-#else
-/*
- * Our UP-case is more simple, but we assume knowledge of how
- * spin_unlock_irq() and friends are implemented. This avoids
- * us needlessly decrementing and incrementing the preempt count.
- */
-#define prepare_arch_switch(rq,next) local_irq_enable()
-#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
-#define task_running(rq,p) ((rq)->curr == (p))
-#endif
+#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'. schedule() itself
* contains the memory barrier to tell GCC not to cache `current'.
*/
-struct thread_info;
-struct task_struct;
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
#define switch_to(prev,next,last) \
do { \
- last = __switch_to(prev,prev->thread_info,next->thread_info); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
/*
* CPU interrupt mask handling.
*/
/*
* Enable FIQs
*/
-#define __stf() \
+#define local_fiq_enable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
/*
* Disable FIQs
*/
-#define __clf() \
+#define local_fiq_disable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
: "r" (x) \
: "memory", "cc")
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ (int)(flags & PSR_I_BIT); \
+})
+
#ifdef CONFIG_SMP
-#error SMP not supported
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
-#define clf() __clf()
-#define stf() __stf()
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- flags & PSR_I_BIT; \
-})
+#endif /* CONFIG_SMP */
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
*
* We choose (1) since its the "easiest" to achieve here and is not
* dependent on the processor type.
+ *
+ * NOTE that this solution won't work on an SMP system, so explcitly
+ * forbid it here.
*/
#define swp_is_buggy
#endif
#ifdef swp_is_buggy
unsigned long flags;
#endif
+#if __LINUX_ARM_ARCH__ >= 6
+ unsigned int tmp;
+#endif
switch (size) {
-#ifdef swp_is_buggy
- case 1:
- local_irq_save(flags);
- ret = *(volatile unsigned char *)ptr;
- *(volatile unsigned char *)ptr = x;
- local_irq_restore(flags);
- break;
-
- case 4:
- local_irq_save(flags);
- ret = *(volatile unsigned long *)ptr;
- *(volatile unsigned long *)ptr = x;
- local_irq_restore(flags);
- break;
+#if __LINUX_ARM_ARCH__ >= 6
+ case 1:
+ asm volatile("@ __xchg1\n"
+ "1: ldrexb %0, [%3]\n"
+ " strexb %1, %2, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+ case 4:
+ asm volatile("@ __xchg4\n"
+ "1: ldrex %0, [%3]\n"
+ " strex %1, %2, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+#elif defined(swp_is_buggy)
+#ifdef CONFIG_SMP
+#error SMP is not supported on this platform
+#endif
+ case 1:
+ local_irq_save(flags);
+ ret = *(volatile unsigned char *)ptr;
+ *(volatile unsigned char *)ptr = x;
+ local_irq_restore(flags);
+ break;
+
+ case 4:
+ local_irq_save(flags);
+ ret = *(volatile unsigned long *)ptr;
+ *(volatile unsigned long *)ptr = x;
+ local_irq_restore(flags);
+ break;
#else
- case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
- : "=&r" (ret)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
- break;
- case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
- : "=&r" (ret)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
- break;
+ case 1:
+ asm volatile("@ __xchg1\n"
+ " swpb %0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+ case 4:
+ asm volatile("@ __xchg4\n"
+ " swp %0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
#endif
- default: __bad_xchg(ptr, size), ret = 0;
+ default:
+ __bad_xchg(ptr, size), ret = 0;
+ break;
}
return ret;
}
-#endif /* CONFIG_SMP */
+extern void disable_hlt(void);
+extern void enable_hlt(void);
#endif /* __ASSEMBLY__ */
+#define arch_align_stack(x) (x)
+
#endif /* __KERNEL__ */
#endif