X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Fsystem.h;h=7572ac4ff5e5c3a4a9400def781dcf829b464109;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=9c26aecd963d0d9c40f406947e38f7029be3659d;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 9c26aecd9..7572ac4ff 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -50,11 +50,45 @@ #define read_cpuid(reg) \ ({ \ unsigned int __val; \ - asm("mrc%? p15, 0, %0, c0, c0, " __stringify(reg) \ - : "=r" (__val)); \ + asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ + : "=r" (__val) \ + : \ + : "cc"); \ __val; \ }) +#define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) +#define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25)) +#define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25)) +#define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) +#define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) + +#define cache_is_vivt() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ + }) + +#define cache_is_vipt() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && __cacheid_vipt(__val); \ + }) + +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vipt_aliasing(__val); \ + }) + /* * This is used to ensure the compiler did actually allocate the register we * asked it for some inline assembly sequences. Apparently we can't trust @@ -66,9 +100,10 @@ #ifndef __ASSEMBLY__ -#include +#include struct thread_info; +struct task_struct; /* information about the system we're running on */ extern unsigned int system_rev; @@ -137,22 +172,46 @@ extern unsigned int user_debug; #define set_wmb(var, value) do { var = value; wmb(); } while (0) #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); -#define prepare_to_switch() do { } while(0) +#ifdef CONFIG_SMP +/* + * Define our own context switch locking. This allows us to enable + * interrupts over the context switch, otherwise we end up with high + * interrupt latency. The real problem area is switch_mm() which may + * do a full cache flush. + */ +#define prepare_arch_switch(rq,next) \ +do { \ + spin_lock(&(next)->switch_lock); \ + spin_unlock_irq(&(rq)->lock); \ +} while (0) + +#define finish_arch_switch(rq,prev) \ + spin_unlock(&(prev)->switch_lock) + +#define task_running(rq,p) \ + ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) +#else +/* + * Our UP-case is more simple, but we assume knowledge of how + * spin_unlock_irq() and friends are implemented. This avoids + * us needlessly decrementing and incrementing the preempt count. + */ +#define prepare_arch_switch(rq,next) local_irq_enable() +#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock) +#define task_running(rq,p) ((rq)->curr == (p)) +#endif /* * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'. - * The `mb' is to tell GCC not to cache `current' across this call. + * `prev' will never be the same as `next'. schedule() itself + * contains the memory barrier to tell GCC not to cache `current'. */ -struct thread_info; -struct task_struct; extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); -#define switch_to(prev,next,last) \ - do { \ - last = __switch_to(prev,prev->thread_info,next->thread_info); \ - mb(); \ - } while (0) +#define switch_to(prev,next,last) \ +do { \ + last = __switch_to(prev,prev->thread_info,next->thread_info); \ +} while (0) /* * CPU interrupt mask handling.