X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Fsystem.h;h=7c9568d30307ff46ffafec33122de19d4f4f9e29;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=9c26aecd963d0d9c40f406947e38f7029be3659d;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 9c26aecd9..7c9568d30 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -50,8 +50,10 @@ #define read_cpuid(reg) \ ({ \ unsigned int __val; \ - asm("mrc%? p15, 0, %0, c0, c0, " __stringify(reg) \ - : "=r" (__val)); \ + asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ + : "=r" (__val) \ + : \ + : "cc"); \ __val; \ }) @@ -61,14 +63,16 @@ * the compiler from one version to another so a bit of paranoia won't hurt. * This string is meant to be concatenated with the inline asm string and * will cause compilation to stop on mismatch. + * (for details, see gcc PR 15089) */ #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" #ifndef __ASSEMBLY__ -#include +#include struct thread_info; +struct task_struct; /* information about the system we're running on */ extern unsigned int system_rev; @@ -81,22 +85,53 @@ struct pt_regs; void die(const char *msg, struct pt_regs *regs, int err) __attribute__((noreturn)); -void die_if_kernel(const char *str, struct pt_regs *regs, int err); +struct siginfo; +void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, + unsigned long err, unsigned long trap); void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, const char *name); -#include - #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #define tas(ptr) (xchg((ptr),1)) extern asmlinkage void __backtrace(void); +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +struct mm_struct; +extern void show_pte(struct mm_struct *mm, unsigned long addr); +extern void __show_regs(struct pt_regs *); extern int cpu_architecture(void); +extern void cpu_init(void); + +/* + * Intel's XScale3 core supports some v6 features (supersections, L2) + * but advertises itself as v5 as it does not support the v6 ISA. For + * this reason, we need a way to explicitly test for this type of CPU. + */ +#ifndef CONFIG_CPU_XSC3 +#define cpu_is_xsc3() 0 +#else +static inline int cpu_is_xsc3(void) +{ + extern unsigned int processor_id; + + if ((processor_id & 0xffffe000) == 0x69056000) + return 1; + + return 0; +} +#endif + +#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) +#define cpu_is_xscale() 0 +#else +#define cpu_is_xscale() 1 +#endif #define set_cr(x) \ __asm__ __volatile__( \ @@ -124,12 +159,17 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ extern unsigned int user_debug; #if __LINUX_ARM_ARCH__ >= 4 -#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0) +#define vectors_high() (cr_alignment & CR_V) #else -#define vectors_base() (0) +#define vectors_high() (0) #endif +#if __LINUX_ARM_ARCH__ >= 6 +#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#else #define mb() __asm__ __volatile__ ("" : : : "memory") +#endif #define rmb() mb() #define wmb() mb() #define read_barrier_depends() do { } while(0) @@ -137,22 +177,34 @@ extern unsigned int user_debug; #define set_wmb(var, value) do { var = value; wmb(); } while (0) #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); -#define prepare_to_switch() do { } while(0) +/* + * switch_mm() may do a full cache flush over the context switch, + * so enable interrupts over the context switch to avoid high + * latency. + */ +#define __ARCH_WANT_INTERRUPTS_ON_CTXSW /* * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'. - * The `mb' is to tell GCC not to cache `current' across this call. + * `prev' will never be the same as `next'. schedule() itself + * contains the memory barrier to tell GCC not to cache `current'. */ -struct thread_info; -struct task_struct; extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); -#define switch_to(prev,next,last) \ - do { \ - last = __switch_to(prev,prev->thread_info,next->thread_info); \ - mb(); \ - } while (0) +#define switch_to(prev,next,last) \ +do { \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ +} while (0) + +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} /* * CPU interrupt mask handling. @@ -223,7 +275,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info /* * Enable FIQs */ -#define __stf() \ +#define local_fiq_enable() \ ({ \ unsigned long temp; \ __asm__ __volatile__( \ @@ -238,7 +290,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info /* * Disable FIQs */ -#define __clf() \ +#define local_fiq_disable() \ ({ \ unsigned long temp; \ __asm__ __volatile__( \ @@ -272,8 +324,14 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info : "r" (x) \ : "memory", "cc") +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + (int)(flags & PSR_I_BIT); \ +}) + #ifdef CONFIG_SMP -#error SMP not supported #define smp_mb() mb() #define smp_rmb() rmb() @@ -287,15 +345,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while(0) -#define clf() __clf() -#define stf() __stf() - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - flags & PSR_I_BIT; \ -}) +#endif /* CONFIG_SMP */ #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) /* @@ -309,6 +359,9 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info * * We choose (1) since its the "easiest" to achieve here and is not * dependent on the processor type. + * + * NOTE that this solution won't work on an SMP system, so explcitly + * forbid it here. */ #define swp_is_buggy #endif @@ -320,44 +373,80 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #ifdef swp_is_buggy unsigned long flags; #endif +#if __LINUX_ARM_ARCH__ >= 6 + unsigned int tmp; +#endif switch (size) { -#ifdef swp_is_buggy - case 1: - local_irq_save(flags); - ret = *(volatile unsigned char *)ptr; - *(volatile unsigned char *)ptr = x; - local_irq_restore(flags); - break; - - case 4: - local_irq_save(flags); - ret = *(volatile unsigned long *)ptr; - *(volatile unsigned long *)ptr = x; - local_irq_restore(flags); - break; +#if __LINUX_ARM_ARCH__ >= 6 + case 1: + asm volatile("@ __xchg1\n" + "1: ldrexb %0, [%3]\n" + " strexb %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + "1: ldrex %0, [%3]\n" + " strex %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#elif defined(swp_is_buggy) +#ifdef CONFIG_SMP +#error SMP is not supported on this platform +#endif + case 1: + local_irq_save(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + local_irq_restore(flags); + break; + + case 4: + local_irq_save(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + local_irq_restore(flags); + break; #else - case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; + case 1: + asm volatile("@ __xchg1\n" + " swpb %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + " swp %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; #endif - default: __bad_xchg(ptr, size), ret = 0; + default: + __bad_xchg(ptr, size), ret = 0; + break; } return ret; } -#endif /* CONFIG_SMP */ +extern void disable_hlt(void); +extern void enable_hlt(void); #endif /* __ASSEMBLY__ */ +#define arch_align_stack(x) (x) + #endif /* __KERNEL__ */ #endif