vserver 1.9.3
[linux-2.6.git] / include / asm-arm / system.h
1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
3
4 #ifdef __KERNEL__
5
6 #include <linux/config.h>
7
8 #define CPU_ARCH_UNKNOWN        0
9 #define CPU_ARCH_ARMv3          1
10 #define CPU_ARCH_ARMv4          2
11 #define CPU_ARCH_ARMv4T         3
12 #define CPU_ARCH_ARMv5          4
13 #define CPU_ARCH_ARMv5T         5
14 #define CPU_ARCH_ARMv5TE        6
15 #define CPU_ARCH_ARMv5TEJ       7
16 #define CPU_ARCH_ARMv6          8
17
18 /*
19  * CR1 bits (CP#15 CR1)
20  */
21 #define CR_M    (1 << 0)        /* MMU enable                           */
22 #define CR_A    (1 << 1)        /* Alignment abort enable               */
23 #define CR_C    (1 << 2)        /* Dcache enable                        */
24 #define CR_W    (1 << 3)        /* Write buffer enable                  */
25 #define CR_P    (1 << 4)        /* 32-bit exception handler             */
26 #define CR_D    (1 << 5)        /* 32-bit data address range            */
27 #define CR_L    (1 << 6)        /* Implementation defined               */
28 #define CR_B    (1 << 7)        /* Big endian                           */
29 #define CR_S    (1 << 8)        /* System MMU protection                */
30 #define CR_R    (1 << 9)        /* ROM MMU protection                   */
31 #define CR_F    (1 << 10)       /* Implementation defined               */
32 #define CR_Z    (1 << 11)       /* Implementation defined               */
33 #define CR_I    (1 << 12)       /* Icache enable                        */
34 #define CR_V    (1 << 13)       /* Vectors relocated to 0xffff0000      */
35 #define CR_RR   (1 << 14)       /* Round Robin cache replacement        */
36 #define CR_L4   (1 << 15)       /* LDR pc can set T bit                 */
37 #define CR_DT   (1 << 16)
38 #define CR_IT   (1 << 18)
39 #define CR_ST   (1 << 19)
40 #define CR_FI   (1 << 21)       /* Fast interrupt (lower latency mode)  */
41 #define CR_U    (1 << 22)       /* Unaligned access operation           */
42 #define CR_XP   (1 << 23)       /* Extended page tables                 */
43 #define CR_VE   (1 << 24)       /* Vectored interrupts                  */
44
45 #define CPUID_ID        0
46 #define CPUID_CACHETYPE 1
47 #define CPUID_TCM       2
48 #define CPUID_TLBTYPE   3
49
50 #define read_cpuid(reg)                                                 \
51         ({                                                              \
52                 unsigned int __val;                                     \
53                 asm("mrc        p15, 0, %0, c0, c0, " __stringify(reg)  \
54                     : "=r" (__val)                                      \
55                     :                                                   \
56                     : "cc");                                            \
57                 __val;                                                  \
58         })
59
60 #define __cacheid_present(val)          (val != read_cpuid(CPUID_ID))
61 #define __cacheid_vivt(val)             ((val & (15 << 25)) != (14 << 25))
62 #define __cacheid_vipt(val)             ((val & (15 << 25)) == (14 << 25))
63 #define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
64 #define __cacheid_vipt_aliasing(val)    ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
65
66 #define cache_is_vivt()                                                 \
67         ({                                                              \
68                 unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
69                 (!__cacheid_present(__val)) || __cacheid_vivt(__val);   \
70         })
71                 
72 #define cache_is_vipt()                                                 \
73         ({                                                              \
74                 unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
75                 __cacheid_present(__val) && __cacheid_vipt(__val);      \
76         })
77
78 #define cache_is_vipt_nonaliasing()                                     \
79         ({                                                              \
80                 unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
81                 __cacheid_present(__val) &&                             \
82                  __cacheid_vipt_nonaliasing(__val);                     \
83         })
84
85 #define cache_is_vipt_aliasing()                                        \
86         ({                                                              \
87                 unsigned int __val = read_cpuid(CPUID_CACHETYPE);       \
88                 __cacheid_present(__val) &&                             \
89                  __cacheid_vipt_aliasing(__val);                        \
90         })
91
92 /*
93  * This is used to ensure the compiler did actually allocate the register we
94  * asked it for some inline assembly sequences.  Apparently we can't trust
95  * the compiler from one version to another so a bit of paranoia won't hurt.
96  * This string is meant to be concatenated with the inline asm string and
97  * will cause compilation to stop on mismatch.
98  */
99 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
100
101 #ifndef __ASSEMBLY__
102
103 #include <linux/linkage.h>
104
105 struct thread_info;
106 struct task_struct;
107
108 /* information about the system we're running on */
109 extern unsigned int system_rev;
110 extern unsigned int system_serial_low;
111 extern unsigned int system_serial_high;
112 extern unsigned int mem_fclk_21285;
113
114 struct pt_regs;
115
116 void die(const char *msg, struct pt_regs *regs, int err)
117                 __attribute__((noreturn));
118
119 void die_if_kernel(const char *str, struct pt_regs *regs, int err);
120
121 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
122                                        struct pt_regs *),
123                      int sig, const char *name);
124
125 #include <asm/proc-fns.h>
126
127 #define xchg(ptr,x) \
128         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
129
130 #define tas(ptr) (xchg((ptr),1))
131
132 extern asmlinkage void __backtrace(void);
133
134 extern int cpu_architecture(void);
135
136 #define set_cr(x)                                       \
137         __asm__ __volatile__(                           \
138         "mcr    p15, 0, %0, c1, c0, 0   @ set CR"       \
139         : : "r" (x) : "cc")
140
141 #define get_cr()                                        \
142         ({                                              \
143         unsigned int __val;                             \
144         __asm__ __volatile__(                           \
145         "mrc    p15, 0, %0, c1, c0, 0   @ get CR"       \
146         : "=r" (__val) : : "cc");                       \
147         __val;                                          \
148         })
149
150 extern unsigned long cr_no_alignment;   /* defined in entry-armv.S */
151 extern unsigned long cr_alignment;      /* defined in entry-armv.S */
152
153 #define UDBG_UNDEFINED  (1 << 0)
154 #define UDBG_SYSCALL    (1 << 1)
155 #define UDBG_BADABORT   (1 << 2)
156 #define UDBG_SEGV       (1 << 3)
157 #define UDBG_BUS        (1 << 4)
158
159 extern unsigned int user_debug;
160
161 #if __LINUX_ARM_ARCH__ >= 4
162 #define vectors_base()  ((cr_alignment & CR_V) ? 0xffff0000 : 0)
163 #else
164 #define vectors_base()  (0)
165 #endif
166
167 #define mb() __asm__ __volatile__ ("" : : : "memory")
168 #define rmb() mb()
169 #define wmb() mb()
170 #define read_barrier_depends() do { } while(0)
171 #define set_mb(var, value)  do { var = value; mb(); } while (0)
172 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
173 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
174
175 #ifdef CONFIG_SMP
176 /*
177  * Define our own context switch locking.  This allows us to enable
178  * interrupts over the context switch, otherwise we end up with high
179  * interrupt latency.  The real problem area is switch_mm() which may
180  * do a full cache flush.
181  */
182 #define prepare_arch_switch(rq,next)                                    \
183 do {                                                                    \
184         spin_lock(&(next)->switch_lock);                                \
185         spin_unlock_irq(&(rq)->lock);                                   \
186 } while (0)
187
188 #define finish_arch_switch(rq,prev)                                     \
189         spin_unlock(&(prev)->switch_lock)
190
191 #define task_running(rq,p)                                              \
192         ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
193 #else
194 /*
195  * Our UP-case is more simple, but we assume knowledge of how
196  * spin_unlock_irq() and friends are implemented.  This avoids
197  * us needlessly decrementing and incrementing the preempt count.
198  */
199 #define prepare_arch_switch(rq,next)    local_irq_enable()
200 #define finish_arch_switch(rq,prev)     spin_unlock(&(rq)->lock)
201 #define task_running(rq,p)              ((rq)->curr == (p))
202 #endif
203
204 /*
205  * switch_to(prev, next) should switch from task `prev' to `next'
206  * `prev' will never be the same as `next'.  schedule() itself
207  * contains the memory barrier to tell GCC not to cache `current'.
208  */
209 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
210
211 #define switch_to(prev,next,last)                                       \
212 do {                                                                    \
213         last = __switch_to(prev,prev->thread_info,next->thread_info);   \
214 } while (0)
215
216 /*
217  * CPU interrupt mask handling.
218  */
219 #if __LINUX_ARM_ARCH__ >= 6
220
221 #define local_irq_save(x)                                       \
222         ({                                                      \
223         __asm__ __volatile__(                                   \
224         "mrs    %0, cpsr                @ local_irq_save\n"     \
225         "cpsid  i"                                              \
226         : "=r" (x) : : "memory", "cc");                         \
227         })
228
229 #define local_irq_enable()  __asm__("cpsie i    @ __sti" : : : "memory", "cc")
230 #define local_irq_disable() __asm__("cpsid i    @ __cli" : : : "memory", "cc")
231 #define local_fiq_enable()  __asm__("cpsie f    @ __stf" : : : "memory", "cc")
232 #define local_fiq_disable() __asm__("cpsid f    @ __clf" : : : "memory", "cc")
233
234 #else
235
236 /*
237  * Save the current interrupt enable state & disable IRQs
238  */
239 #define local_irq_save(x)                                       \
240         ({                                                      \
241                 unsigned long temp;                             \
242                 (void) (&temp == &x);                           \
243         __asm__ __volatile__(                                   \
244         "mrs    %0, cpsr                @ local_irq_save\n"     \
245 "       orr     %1, %0, #128\n"                                 \
246 "       msr     cpsr_c, %1"                                     \
247         : "=r" (x), "=r" (temp)                                 \
248         :                                                       \
249         : "memory", "cc");                                      \
250         })
251         
252 /*
253  * Enable IRQs
254  */
255 #define local_irq_enable()                                      \
256         ({                                                      \
257                 unsigned long temp;                             \
258         __asm__ __volatile__(                                   \
259         "mrs    %0, cpsr                @ local_irq_enable\n"   \
260 "       bic     %0, %0, #128\n"                                 \
261 "       msr     cpsr_c, %0"                                     \
262         : "=r" (temp)                                           \
263         :                                                       \
264         : "memory", "cc");                                      \
265         })
266
267 /*
268  * Disable IRQs
269  */
270 #define local_irq_disable()                                     \
271         ({                                                      \
272                 unsigned long temp;                             \
273         __asm__ __volatile__(                                   \
274         "mrs    %0, cpsr                @ local_irq_disable\n"  \
275 "       orr     %0, %0, #128\n"                                 \
276 "       msr     cpsr_c, %0"                                     \
277         : "=r" (temp)                                           \
278         :                                                       \
279         : "memory", "cc");                                      \
280         })
281
282 /*
283  * Enable FIQs
284  */
285 #define __stf()                                                 \
286         ({                                                      \
287                 unsigned long temp;                             \
288         __asm__ __volatile__(                                   \
289         "mrs    %0, cpsr                @ stf\n"                \
290 "       bic     %0, %0, #64\n"                                  \
291 "       msr     cpsr_c, %0"                                     \
292         : "=r" (temp)                                           \
293         :                                                       \
294         : "memory", "cc");                                      \
295         })
296
297 /*
298  * Disable FIQs
299  */
300 #define __clf()                                                 \
301         ({                                                      \
302                 unsigned long temp;                             \
303         __asm__ __volatile__(                                   \
304         "mrs    %0, cpsr                @ clf\n"                \
305 "       orr     %0, %0, #64\n"                                  \
306 "       msr     cpsr_c, %0"                                     \
307         : "=r" (temp)                                           \
308         :                                                       \
309         : "memory", "cc");                                      \
310         })
311
312 #endif
313
314 /*
315  * Save the current interrupt enable state.
316  */
317 #define local_save_flags(x)                                     \
318         ({                                                      \
319         __asm__ __volatile__(                                   \
320         "mrs    %0, cpsr                @ local_save_flags"     \
321         : "=r" (x) : : "memory", "cc");                         \
322         })
323
324 /*
325  * restore saved IRQ & FIQ state
326  */
327 #define local_irq_restore(x)                                    \
328         __asm__ __volatile__(                                   \
329         "msr    cpsr_c, %0              @ local_irq_restore\n"  \
330         :                                                       \
331         : "r" (x)                                               \
332         : "memory", "cc")
333
334 #ifdef CONFIG_SMP
335 #error SMP not supported
336
337 #define smp_mb()                mb()
338 #define smp_rmb()               rmb()
339 #define smp_wmb()               wmb()
340 #define smp_read_barrier_depends()              read_barrier_depends()
341
342 #else
343
344 #define smp_mb()                barrier()
345 #define smp_rmb()               barrier()
346 #define smp_wmb()               barrier()
347 #define smp_read_barrier_depends()              do { } while(0)
348
349 #define clf()                   __clf()
350 #define stf()                   __stf()
351
352 #define irqs_disabled()                 \
353 ({                                      \
354         unsigned long flags;            \
355         local_save_flags(flags);        \
356         flags & PSR_I_BIT;              \
357 })
358
359 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
360 /*
361  * On the StrongARM, "swp" is terminally broken since it bypasses the
362  * cache totally.  This means that the cache becomes inconsistent, and,
363  * since we use normal loads/stores as well, this is really bad.
364  * Typically, this causes oopsen in filp_close, but could have other,
365  * more disasterous effects.  There are two work-arounds:
366  *  1. Disable interrupts and emulate the atomic swap
367  *  2. Clean the cache, perform atomic swap, flush the cache
368  *
369  * We choose (1) since its the "easiest" to achieve here and is not
370  * dependent on the processor type.
371  */
372 #define swp_is_buggy
373 #endif
374
375 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
376 {
377         extern void __bad_xchg(volatile void *, int);
378         unsigned long ret;
379 #ifdef swp_is_buggy
380         unsigned long flags;
381 #endif
382
383         switch (size) {
384 #ifdef swp_is_buggy
385                 case 1:
386                         local_irq_save(flags);
387                         ret = *(volatile unsigned char *)ptr;
388                         *(volatile unsigned char *)ptr = x;
389                         local_irq_restore(flags);
390                         break;
391
392                 case 4:
393                         local_irq_save(flags);
394                         ret = *(volatile unsigned long *)ptr;
395                         *(volatile unsigned long *)ptr = x;
396                         local_irq_restore(flags);
397                         break;
398 #else
399                 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
400                                         : "=&r" (ret)
401                                         : "r" (x), "r" (ptr)
402                                         : "memory", "cc");
403                         break;
404                 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
405                                         : "=&r" (ret)
406                                         : "r" (x), "r" (ptr)
407                                         : "memory", "cc");
408                         break;
409 #endif
410                 default: __bad_xchg(ptr, size), ret = 0;
411         }
412
413         return ret;
414 }
415
416 #endif /* CONFIG_SMP */
417
418 #endif /* __ASSEMBLY__ */
419
420 #endif /* __KERNEL__ */
421
422 #endif