ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / asm-mips / system.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1999 Silicon Graphics
9  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10  * Copyright (C) 2000 MIPS Technologies, Inc.
11  */
12 #ifndef _ASM_SYSTEM_H
13 #define _ASM_SYSTEM_H
14
15 #include <linux/config.h>
16 #include <asm/sgidefs.h>
17
18 #include <linux/kernel.h>
19
20 #include <asm/addrspace.h>
21 #include <asm/ptrace.h>
22
23 __asm__ (
24         ".macro\tlocal_irq_enable\n\t"
25         ".set\tpush\n\t"
26         ".set\treorder\n\t"
27         ".set\tnoat\n\t"
28         "mfc0\t$1,$12\n\t"
29         "ori\t$1,0x1f\n\t"
30         "xori\t$1,0x1e\n\t"
31         "mtc0\t$1,$12\n\t"
32         ".set\tpop\n\t"
33         ".endm");
34
35 static inline void local_irq_enable(void)
36 {
37         __asm__ __volatile__(
38                 "local_irq_enable"
39                 : /* no outputs */
40                 : /* no inputs */
41                 : "memory");
42 }
43
44 /*
45  * For cli() we have to insert nops to make sure that the new value
46  * has actually arrived in the status register before the end of this
47  * macro.
48  * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
49  * no nops at all.
50  */
51 __asm__ (
52         ".macro\tlocal_irq_disable\n\t"
53         ".set\tpush\n\t"
54         ".set\tnoat\n\t"
55         "mfc0\t$1,$12\n\t"
56         "ori\t$1,1\n\t"
57         "xori\t$1,1\n\t"
58         ".set\tnoreorder\n\t"
59         "mtc0\t$1,$12\n\t"
60         "sll\t$0, $0, 1\t\t\t# nop\n\t"
61         "sll\t$0, $0, 1\t\t\t# nop\n\t"
62         "sll\t$0, $0, 1\t\t\t# nop\n\t"
63         ".set\tpop\n\t"
64         ".endm");
65
66 static inline void local_irq_disable(void)
67 {
68         __asm__ __volatile__(
69                 "local_irq_disable"
70                 : /* no outputs */
71                 : /* no inputs */
72                 : "memory");
73 }
74
75 __asm__ (
76         ".macro\tlocal_save_flags flags\n\t"
77         ".set\tpush\n\t"
78         ".set\treorder\n\t"
79         "mfc0\t\\flags, $12\n\t"
80         ".set\tpop\n\t"
81         ".endm");
82
83 #define local_save_flags(x)                                                     \
84 __asm__ __volatile__(                                                   \
85         "local_save_flags %0"                                           \
86         : "=r" (x))
87
88 __asm__ (
89         ".macro\tlocal_irq_save result\n\t"
90         ".set\tpush\n\t"
91         ".set\treorder\n\t"
92         ".set\tnoat\n\t"
93         "mfc0\t\\result, $12\n\t"
94         "ori\t$1, \\result, 1\n\t"
95         "xori\t$1, 1\n\t"
96         ".set\tnoreorder\n\t"
97         "mtc0\t$1, $12\n\t"
98         "sll\t$0, $0, 1\t\t\t# nop\n\t"
99         "sll\t$0, $0, 1\t\t\t# nop\n\t"
100         "sll\t$0, $0, 1\t\t\t# nop\n\t"
101         ".set\tpop\n\t"
102         ".endm");
103
104 #define local_irq_save(x)                                               \
105 __asm__ __volatile__(                                                   \
106         "local_irq_save\t%0"                                            \
107         : "=r" (x)                                                      \
108         : /* no inputs */                                               \
109         : "memory")
110
111 __asm__(".macro\tlocal_irq_restore flags\n\t"
112         ".set\tnoreorder\n\t"
113         ".set\tnoat\n\t"
114         "mfc0\t$1, $12\n\t"
115         "andi\t\\flags, 1\n\t"
116         "ori\t$1, 1\n\t"
117         "xori\t$1, 1\n\t"
118         "or\t\\flags, $1\n\t"
119         "mtc0\t\\flags, $12\n\t"
120         "sll\t$0, $0, 1\t\t\t# nop\n\t"
121         "sll\t$0, $0, 1\t\t\t# nop\n\t"
122         "sll\t$0, $0, 1\t\t\t# nop\n\t"
123         ".set\tat\n\t"
124         ".set\treorder\n\t"
125         ".endm");
126
127 #define local_irq_restore(flags)                                                \
128 do {                                                                    \
129         unsigned long __tmp1;                                           \
130                                                                         \
131         __asm__ __volatile__(                                           \
132                 "local_irq_restore\t%0"                                 \
133                 : "=r" (__tmp1)                                         \
134                 : "0" (flags)                                           \
135                 : "memory");                                            \
136 } while(0)
137
138 #define irqs_disabled()                                                 \
139 ({                                                                      \
140         unsigned long flags;                                            \
141         local_save_flags(flags);                                        \
142         !(flags & 1);                                                   \
143 })
144
145 /*
146  * read_barrier_depends - Flush all pending reads that subsequents reads
147  * depend on.
148  *
149  * No data-dependent reads from memory-like regions are ever reordered
150  * over this barrier.  All reads preceding this primitive are guaranteed
151  * to access memory (but not necessarily other CPUs' caches) before any
152  * reads following this primitive that depend on the data return by
153  * any of the preceding reads.  This primitive is much lighter weight than
154  * rmb() on most CPUs, and is never heavier weight than is
155  * rmb().
156  *
157  * These ordering constraints are respected by both the local CPU
158  * and the compiler.
159  *
160  * Ordering is not guaranteed by anything other than these primitives,
161  * not even by data dependencies.  See the documentation for
162  * memory_barrier() for examples and URLs to more information.
163  *
164  * For example, the following code would force ordering (the initial
165  * value of "a" is zero, "b" is one, and "p" is "&a"):
166  *
167  * <programlisting>
168  *      CPU 0                           CPU 1
169  *
170  *      b = 2;
171  *      memory_barrier();
172  *      p = &b;                         q = p;
173  *                                      read_barrier_depends();
174  *                                      d = *q;
175  * </programlisting>
176  *
177  * because the read of "*q" depends on the read of "p" and these
178  * two reads are separated by a read_barrier_depends().  However,
179  * the following code, with the same initial values for "a" and "b":
180  *
181  * <programlisting>
182  *      CPU 0                           CPU 1
183  *
184  *      a = 2;
185  *      memory_barrier();
186  *      b = 3;                          y = b;
187  *                                      read_barrier_depends();
188  *                                      x = a;
189  * </programlisting>
190  *
191  * does not enforce ordering, since there is no data dependency between
192  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
193  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
194  * in cases like thiswhere there are no data dependencies.
195  */
196
197 #define read_barrier_depends()  do { } while(0)
198
199 #ifdef CONFIG_CPU_HAS_SYNC
200 #define __sync()                                \
201         __asm__ __volatile__(                   \
202                 ".set   push\n\t"               \
203                 ".set   noreorder\n\t"          \
204                 ".set   mips2\n\t"              \
205                 "sync\n\t"                      \
206                 ".set   pop"                    \
207                 : /* no output */               \
208                 : /* no input */                \
209                 : "memory")
210 #else
211 #define __sync()        do { } while(0)
212 #endif
213
214 #define __fast_iob()                            \
215         __asm__ __volatile__(                   \
216                 ".set   push\n\t"               \
217                 ".set   noreorder\n\t"          \
218                 "lw     $0,%0\n\t"              \
219                 "nop\n\t"                       \
220                 ".set   pop"                    \
221                 : /* no output */               \
222                 : "m" (*(int *)CKSEG1)          \
223                 : "memory")
224
225 #define fast_wmb()      __sync()
226 #define fast_rmb()      __sync()
227 #define fast_mb()       __sync()
228 #define fast_iob()                              \
229         do {                                    \
230                 __sync();                       \
231                 __fast_iob();                   \
232         } while (0)
233
234 #ifdef CONFIG_CPU_HAS_WB
235
236 #include <asm/wbflush.h>
237
238 #define wmb()           fast_wmb()
239 #define rmb()           fast_rmb()
240 #define mb()            wbflush()
241 #define iob()           wbflush()
242
243 #else /* !CONFIG_CPU_HAS_WB */
244
245 #define wmb()           fast_wmb()
246 #define rmb()           fast_rmb()
247 #define mb()            fast_mb()
248 #define iob()           fast_iob()
249
250 #endif /* !CONFIG_CPU_HAS_WB */
251
252 #ifdef CONFIG_SMP
253 #define smp_mb()        mb()
254 #define smp_rmb()       rmb()
255 #define smp_wmb()       wmb()
256 #define smp_read_barrier_depends()      read_barrier_depends()
257 #else
258 #define smp_mb()        barrier()
259 #define smp_rmb()       barrier()
260 #define smp_wmb()       barrier()
261 #define smp_read_barrier_depends()      do { } while(0)
262 #endif
263
264 #define set_mb(var, value) \
265 do { var = value; mb(); } while (0)
266
267 #define set_wmb(var, value) \
268 do { var = value; wmb(); } while (0)
269
270 /*
271  * switch_to(n) should switch tasks to task nr n, first
272  * checking that n isn't the current task, in which case it does nothing.
273  */
274 extern asmlinkage void *resume(void *last, void *next, void *next_ti);
275
276 struct task_struct;
277
278 #define switch_to(prev,next,last) \
279 do { \
280         (last) = resume(prev, next, next->thread_info); \
281 } while(0)
282
283 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
284 {
285         __u32 retval;
286
287 #ifdef CONFIG_CPU_HAS_LLSC
288         unsigned long dummy;
289
290         __asm__ __volatile__(
291                 ".set\tpush\t\t\t\t# xchg_u32\n\t"
292                 ".set\tnoreorder\n\t"
293                 ".set\tnomacro\n\t"
294                 "ll\t%0, %3\n"
295                 "1:\tmove\t%2, %z4\n\t"
296                 "sc\t%2, %1\n\t"
297                 "beqzl\t%2, 1b\n\t"
298                 " ll\t%0, %3\n\t"
299 #ifdef CONFIG_SMP
300                 "sync\n\t"
301 #endif
302                 ".set\tpop"
303                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
304                 : "R" (*m), "Jr" (val)
305                 : "memory");
306 #else
307         unsigned long flags;
308
309         local_irq_save(flags);
310         retval = *m;
311         *m = val;
312         local_irq_restore(flags);       /* implies memory barrier  */
313 #endif
314
315         return retval;
316 }
317
318 #ifdef CONFIG_MIPS64
319 static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
320 {
321         __u64 retval;
322
323 #ifdef CONFIG_CPU_HAS_LLDSCD
324         unsigned long dummy;
325
326         __asm__ __volatile__(
327                 ".set\tpush\t\t\t\t# xchg_u64\n\t"
328                 ".set\tnoreorder\n\t"
329                 ".set\tnomacro\n\t"
330                 "lld\t%0, %3\n"
331                 "1:\tmove\t%2, %z4\n\t"
332                 "scd\t%2, %1\n\t"
333                 "beqzl\t%2, 1b\n\t"
334                 " lld\t%0, %3\n\t"
335 #ifdef CONFIG_SMP
336                 "sync\n\t"
337 #endif
338                 ".set\tpop"
339                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
340                 : "R" (*m), "Jr" (val)
341                 : "memory");
342 #else
343         unsigned long flags;
344
345         local_irq_save(flags);
346         retval = *m;
347         *m = val;
348         local_irq_restore(flags);       /* implies memory barrier  */
349 #endif
350
351         return retval;
352 }
353 #else
354 extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
355 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
356 #endif
357
358 /* This function doesn't exist, so you'll get a linker error
359    if something tries to do an invalid xchg().  */
360 extern void __xchg_called_with_bad_pointer(void);
361
362 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
363 {
364         switch (size) {
365                 case 4:
366                         return __xchg_u32(ptr, x);
367                 case 8:
368                         return __xchg_u64(ptr, x);
369         }
370         __xchg_called_with_bad_pointer();
371         return x;
372 }
373
374 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
375 #define tas(ptr) (xchg((ptr),1))
376
377 #define __HAVE_ARCH_CMPXCHG 1
378
379 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
380         unsigned long new)
381 {
382         __u32 retval;
383
384 #ifdef CONFIG_CPU_HAS_LLSC
385         __asm__ __volatile__(
386         "       .set    noat                                    \n"
387         "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
388         "       bne     %0, %z3, 2f                             \n"
389         "       move    $1, %z4                                 \n"
390         "       sc      $1, %1                                  \n"
391         "       beqz    $1, 1b                                  \n"
392 #ifdef CONFIG_SMP
393         "       sync                                            \n"
394 #endif
395         "2:                                                     \n"
396         "       .set    at                                      \n"
397         : "=&r" (retval), "=m" (*m)
398         : "R" (*m), "Jr" (old), "Jr" (new)
399         : "memory");
400 #else
401         unsigned long flags;
402
403         local_irq_save(flags);
404         retval = *m;
405         if (retval == old)
406                 *m = new;
407         local_irq_restore(flags);       /* implies memory barrier  */
408 #endif
409
410         return retval;
411 }
412
413 #ifdef CONFIG_MIPS64
414 static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
415         unsigned long new)
416 {
417         __u64 retval;
418
419 #ifdef CONFIG_CPU_HAS_LLDSCD
420         __asm__ __volatile__(
421         "       .set    noat                                    \n"
422         "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
423         "       bne     %0, %z3, 2f                             \n"
424         "       move    $1, %z4                                 \n"
425         "       scd     $1, %1                                  \n"
426         "       beqz    $1, 1b                                  \n"
427 #ifdef CONFIG_SMP
428         "       sync                                            \n"
429 #endif
430         "2:                                                     \n"
431         "       .set    at                                      \n"
432         : "=&r" (retval), "=m" (*m)
433         : "R" (*m), "Jr" (old), "Jr" (new)
434         : "memory");
435 #else
436         unsigned long flags;
437
438         local_irq_save(flags);
439         retval = *m;
440         if (retval == old)
441                 *m = new;
442         local_irq_restore(flags);       /* implies memory barrier  */
443 #endif
444
445         return retval;
446 }
447 #else
448 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
449         volatile int * m, unsigned long old, unsigned long new);
450 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
451 #endif
452
453 /* This function doesn't exist, so you'll get a linker error
454    if something tries to do an invalid cmpxchg().  */
455 extern void __cmpxchg_called_with_bad_pointer(void);
456
457 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
458         unsigned long new, int size)
459 {
460         switch (size) {
461         case 4:
462                 return __cmpxchg_u32(ptr, old, new);
463         case 8:
464                 return __cmpxchg_u64(ptr, old, new);
465         }
466         __cmpxchg_called_with_bad_pointer();
467         return old;
468 }
469
470 #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
471
472 extern void *set_except_vector(int n, void *addr);
473 extern void per_cpu_trap_init(void);
474
475 extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file,
476         const char *func, unsigned long line);
477 extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
478         const char *func, unsigned long line);
479
480 #define die(msg, regs)                                                  \
481         __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
482 #define die_if_kernel(msg, regs)                                        \
483         __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
484
485 extern int serial_console;
486 extern int stop_a_enabled;
487
488 static __inline__ int con_is_present(void)
489 {
490         return serial_console ? 0 : 1;
491 }
492
493 /*
494  * Taken from include/asm-ia64/system.h; prevents deadlock on SMP
495  * systems.
496  */
497 #define prepare_arch_switch(rq, next)           \
498 do {                                            \
499         spin_lock(&(next)->switch_lock);        \
500         spin_unlock(&(rq)->lock);               \
501 } while (0)
502 #define finish_arch_switch(rq, prev)    spin_unlock_irq(&(prev)->switch_lock)
503 #define task_running(rq, p)             ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
504
505 #endif /* _ASM_SYSTEM_H */