VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / include / asm-mips / system.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1999 Silicon Graphics
9  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10  * Copyright (C) 2000 MIPS Technologies, Inc.
11  */
12 #ifndef _ASM_SYSTEM_H
13 #define _ASM_SYSTEM_H
14
15 #include <linux/config.h>
16 #include <asm/sgidefs.h>
17
18 #include <linux/kernel.h>
19
20 #include <asm/addrspace.h>
21 #include <asm/ptrace.h>
22 #include <asm/hazards.h>
23
24 __asm__ (
25         ".macro\tlocal_irq_enable\n\t"
26         ".set\tpush\n\t"
27         ".set\treorder\n\t"
28         ".set\tnoat\n\t"
29         "mfc0\t$1,$12\n\t"
30         "ori\t$1,0x1f\n\t"
31         "xori\t$1,0x1e\n\t"
32         "mtc0\t$1,$12\n\t"
33         "irq_enable_hazard\n\t"
34         ".set\tpop\n\t"
35         ".endm");
36
37 static inline void local_irq_enable(void)
38 {
39         __asm__ __volatile__(
40                 "local_irq_enable"
41                 : /* no outputs */
42                 : /* no inputs */
43                 : "memory");
44 }
45
46 /*
47  * For cli() we have to insert nops to make sure that the new value
48  * has actually arrived in the status register before the end of this
49  * macro.
50  * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
51  * no nops at all.
52  */
53 __asm__ (
54         ".macro\tlocal_irq_disable\n\t"
55         ".set\tpush\n\t"
56         ".set\tnoat\n\t"
57         "mfc0\t$1,$12\n\t"
58         "ori\t$1,1\n\t"
59         "xori\t$1,1\n\t"
60         ".set\tnoreorder\n\t"
61         "mtc0\t$1,$12\n\t"
62         "irq_disable_hazard\n\t"
63         ".set\tpop\n\t"
64         ".endm");
65
66 static inline void local_irq_disable(void)
67 {
68         __asm__ __volatile__(
69                 "local_irq_disable"
70                 : /* no outputs */
71                 : /* no inputs */
72                 : "memory");
73 }
74
75 __asm__ (
76         ".macro\tlocal_save_flags flags\n\t"
77         ".set\tpush\n\t"
78         ".set\treorder\n\t"
79         "mfc0\t\\flags, $12\n\t"
80         ".set\tpop\n\t"
81         ".endm");
82
83 #define local_save_flags(x)                                             \
84 __asm__ __volatile__(                                                   \
85         "local_save_flags %0"                                           \
86         : "=r" (x))
87
88 __asm__ (
89         ".macro\tlocal_irq_save result\n\t"
90         ".set\tpush\n\t"
91         ".set\treorder\n\t"
92         ".set\tnoat\n\t"
93         "mfc0\t\\result, $12\n\t"
94         "ori\t$1, \\result, 1\n\t"
95         "xori\t$1, 1\n\t"
96         ".set\tnoreorder\n\t"
97         "mtc0\t$1, $12\n\t"
98         "irq_disable_hazard\n\t"
99         ".set\tpop\n\t"
100         ".endm");
101
102 #define local_irq_save(x)                                               \
103 __asm__ __volatile__(                                                   \
104         "local_irq_save\t%0"                                            \
105         : "=r" (x)                                                      \
106         : /* no inputs */                                               \
107         : "memory")
108
109 __asm__ (
110         ".macro\tlocal_irq_restore flags\n\t"
111         ".set\tnoreorder\n\t"
112         ".set\tnoat\n\t"
113         "mfc0\t$1, $12\n\t"
114         "andi\t\\flags, 1\n\t"
115         "ori\t$1, 1\n\t"
116         "xori\t$1, 1\n\t"
117         "or\t\\flags, $1\n\t"
118         "mtc0\t\\flags, $12\n\t"
119         "irq_disable_hazard\n\t"
120         ".set\tat\n\t"
121         ".set\treorder\n\t"
122         ".endm");
123
124 #define local_irq_restore(flags)                                        \
125 do {                                                                    \
126         unsigned long __tmp1;                                           \
127                                                                         \
128         __asm__ __volatile__(                                           \
129                 "local_irq_restore\t%0"                                 \
130                 : "=r" (__tmp1)                                         \
131                 : "0" (flags)                                           \
132                 : "memory");                                            \
133 } while(0)
134
135 #define irqs_disabled()                                                 \
136 ({                                                                      \
137         unsigned long flags;                                            \
138         local_save_flags(flags);                                        \
139         !(flags & 1);                                                   \
140 })
141
142 /*
143  * read_barrier_depends - Flush all pending reads that subsequents reads
144  * depend on.
145  *
146  * No data-dependent reads from memory-like regions are ever reordered
147  * over this barrier.  All reads preceding this primitive are guaranteed
148  * to access memory (but not necessarily other CPUs' caches) before any
149  * reads following this primitive that depend on the data return by
150  * any of the preceding reads.  This primitive is much lighter weight than
151  * rmb() on most CPUs, and is never heavier weight than is
152  * rmb().
153  *
154  * These ordering constraints are respected by both the local CPU
155  * and the compiler.
156  *
157  * Ordering is not guaranteed by anything other than these primitives,
158  * not even by data dependencies.  See the documentation for
159  * memory_barrier() for examples and URLs to more information.
160  *
161  * For example, the following code would force ordering (the initial
162  * value of "a" is zero, "b" is one, and "p" is "&a"):
163  *
164  * <programlisting>
165  *      CPU 0                           CPU 1
166  *
167  *      b = 2;
168  *      memory_barrier();
169  *      p = &b;                         q = p;
170  *                                      read_barrier_depends();
171  *                                      d = *q;
172  * </programlisting>
173  *
174  * because the read of "*q" depends on the read of "p" and these
175  * two reads are separated by a read_barrier_depends().  However,
176  * the following code, with the same initial values for "a" and "b":
177  *
178  * <programlisting>
179  *      CPU 0                           CPU 1
180  *
181  *      a = 2;
182  *      memory_barrier();
183  *      b = 3;                          y = b;
184  *                                      read_barrier_depends();
185  *                                      x = a;
186  * </programlisting>
187  *
188  * does not enforce ordering, since there is no data dependency between
189  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
190  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
191  * in cases like thiswhere there are no data dependencies.
192  */
193
194 #define read_barrier_depends()  do { } while(0)
195
196 #ifdef CONFIG_CPU_HAS_SYNC
197 #define __sync()                                \
198         __asm__ __volatile__(                   \
199                 ".set   push\n\t"               \
200                 ".set   noreorder\n\t"          \
201                 ".set   mips2\n\t"              \
202                 "sync\n\t"                      \
203                 ".set   pop"                    \
204                 : /* no output */               \
205                 : /* no input */                \
206                 : "memory")
207 #else
208 #define __sync()        do { } while(0)
209 #endif
210
211 #define __fast_iob()                            \
212         __asm__ __volatile__(                   \
213                 ".set   push\n\t"               \
214                 ".set   noreorder\n\t"          \
215                 "lw     $0,%0\n\t"              \
216                 "nop\n\t"                       \
217                 ".set   pop"                    \
218                 : /* no output */               \
219                 : "m" (*(int *)CKSEG1)          \
220                 : "memory")
221
222 #define fast_wmb()      __sync()
223 #define fast_rmb()      __sync()
224 #define fast_mb()       __sync()
225 #define fast_iob()                              \
226         do {                                    \
227                 __sync();                       \
228                 __fast_iob();                   \
229         } while (0)
230
231 #ifdef CONFIG_CPU_HAS_WB
232
233 #include <asm/wbflush.h>
234
235 #define wmb()           fast_wmb()
236 #define rmb()           fast_rmb()
237 #define mb()            wbflush()
238 #define iob()           wbflush()
239
240 #else /* !CONFIG_CPU_HAS_WB */
241
242 #define wmb()           fast_wmb()
243 #define rmb()           fast_rmb()
244 #define mb()            fast_mb()
245 #define iob()           fast_iob()
246
247 #endif /* !CONFIG_CPU_HAS_WB */
248
249 #ifdef CONFIG_SMP
250 #define smp_mb()        mb()
251 #define smp_rmb()       rmb()
252 #define smp_wmb()       wmb()
253 #define smp_read_barrier_depends()      read_barrier_depends()
254 #else
255 #define smp_mb()        barrier()
256 #define smp_rmb()       barrier()
257 #define smp_wmb()       barrier()
258 #define smp_read_barrier_depends()      do { } while(0)
259 #endif
260
261 #define set_mb(var, value) \
262 do { var = value; mb(); } while (0)
263
264 #define set_wmb(var, value) \
265 do { var = value; wmb(); } while (0)
266
267 /*
268  * switch_to(n) should switch tasks to task nr n, first
269  * checking that n isn't the current task, in which case it does nothing.
270  */
271 extern asmlinkage void *resume(void *last, void *next, void *next_ti);
272
273 struct task_struct;
274
275 #define switch_to(prev,next,last) \
276 do { \
277         (last) = resume(prev, next, next->thread_info); \
278 } while(0)
279
280 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
281 {
282         __u32 retval;
283
284 #ifdef CONFIG_CPU_HAS_LLSC
285         unsigned long dummy;
286
287         __asm__ __volatile__(
288                 ".set\tpush\t\t\t\t# xchg_u32\n\t"
289                 ".set\tnoreorder\n\t"
290                 ".set\tnomacro\n\t"
291                 "ll\t%0, %3\n"
292                 "1:\tmove\t%2, %z4\n\t"
293                 "sc\t%2, %1\n\t"
294                 "beqzl\t%2, 1b\n\t"
295                 " ll\t%0, %3\n\t"
296 #ifdef CONFIG_SMP
297                 "sync\n\t"
298 #endif
299                 ".set\tpop"
300                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
301                 : "R" (*m), "Jr" (val)
302                 : "memory");
303 #else
304         unsigned long flags;
305
306         local_irq_save(flags);
307         retval = *m;
308         *m = val;
309         local_irq_restore(flags);       /* implies memory barrier  */
310 #endif
311
312         return retval;
313 }
314
315 #ifdef CONFIG_MIPS64
316 static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
317 {
318         __u64 retval;
319
320 #ifdef CONFIG_CPU_HAS_LLDSCD
321         unsigned long dummy;
322
323         __asm__ __volatile__(
324                 ".set\tpush\t\t\t\t# xchg_u64\n\t"
325                 ".set\tnoreorder\n\t"
326                 ".set\tnomacro\n\t"
327                 "lld\t%0, %3\n"
328                 "1:\tmove\t%2, %z4\n\t"
329                 "scd\t%2, %1\n\t"
330                 "beqzl\t%2, 1b\n\t"
331                 " lld\t%0, %3\n\t"
332 #ifdef CONFIG_SMP
333                 "sync\n\t"
334 #endif
335                 ".set\tpop"
336                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
337                 : "R" (*m), "Jr" (val)
338                 : "memory");
339 #else
340         unsigned long flags;
341
342         local_irq_save(flags);
343         retval = *m;
344         *m = val;
345         local_irq_restore(flags);       /* implies memory barrier  */
346 #endif
347
348         return retval;
349 }
350 #else
351 extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
352 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
353 #endif
354
355 /* This function doesn't exist, so you'll get a linker error
356    if something tries to do an invalid xchg().  */
357 extern void __xchg_called_with_bad_pointer(void);
358
359 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
360 {
361         switch (size) {
362                 case 4:
363                         return __xchg_u32(ptr, x);
364                 case 8:
365                         return __xchg_u64(ptr, x);
366         }
367         __xchg_called_with_bad_pointer();
368         return x;
369 }
370
371 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
372 #define tas(ptr) (xchg((ptr),1))
373
374 #define __HAVE_ARCH_CMPXCHG 1
375
376 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
377         unsigned long new)
378 {
379         __u32 retval;
380
381 #ifdef CONFIG_CPU_HAS_LLSC
382         __asm__ __volatile__(
383         "       .set    noat                                    \n"
384         "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
385         "       bne     %0, %z3, 2f                             \n"
386         "       move    $1, %z4                                 \n"
387         "       sc      $1, %1                                  \n"
388         "       beqz    $1, 1b                                  \n"
389 #ifdef CONFIG_SMP
390         "       sync                                            \n"
391 #endif
392         "2:                                                     \n"
393         "       .set    at                                      \n"
394         : "=&r" (retval), "=m" (*m)
395         : "R" (*m), "Jr" (old), "Jr" (new)
396         : "memory");
397 #else
398         unsigned long flags;
399
400         local_irq_save(flags);
401         retval = *m;
402         if (retval == old)
403                 *m = new;
404         local_irq_restore(flags);       /* implies memory barrier  */
405 #endif
406
407         return retval;
408 }
409
410 #ifdef CONFIG_MIPS64
411 static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
412         unsigned long new)
413 {
414         __u64 retval;
415
416 #ifdef CONFIG_CPU_HAS_LLDSCD
417         __asm__ __volatile__(
418         "       .set    noat                                    \n"
419         "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
420         "       bne     %0, %z3, 2f                             \n"
421         "       move    $1, %z4                                 \n"
422         "       scd     $1, %1                                  \n"
423         "       beqz    $1, 1b                                  \n"
424 #ifdef CONFIG_SMP
425         "       sync                                            \n"
426 #endif
427         "2:                                                     \n"
428         "       .set    at                                      \n"
429         : "=&r" (retval), "=m" (*m)
430         : "R" (*m), "Jr" (old), "Jr" (new)
431         : "memory");
432 #else
433         unsigned long flags;
434
435         local_irq_save(flags);
436         retval = *m;
437         if (retval == old)
438                 *m = new;
439         local_irq_restore(flags);       /* implies memory barrier  */
440 #endif
441
442         return retval;
443 }
444 #else
445 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
446         volatile int * m, unsigned long old, unsigned long new);
447 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
448 #endif
449
450 /* This function doesn't exist, so you'll get a linker error
451    if something tries to do an invalid cmpxchg().  */
452 extern void __cmpxchg_called_with_bad_pointer(void);
453
454 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
455         unsigned long new, int size)
456 {
457         switch (size) {
458         case 4:
459                 return __cmpxchg_u32(ptr, old, new);
460         case 8:
461                 return __cmpxchg_u64(ptr, old, new);
462         }
463         __cmpxchg_called_with_bad_pointer();
464         return old;
465 }
466
467 #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
468
469 extern void *set_except_vector(int n, void *addr);
470 extern void per_cpu_trap_init(void);
471
472 extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file,
473         const char *func, unsigned long line);
474 extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
475         const char *func, unsigned long line);
476
477 #define die(msg, regs)                                                  \
478         __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
479 #define die_if_kernel(msg, regs)                                        \
480         __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
481
482 extern int serial_console;
483 extern int stop_a_enabled;
484
485 static __inline__ int con_is_present(void)
486 {
487         return serial_console ? 0 : 1;
488 }
489
490 /*
491  * Taken from include/asm-ia64/system.h; prevents deadlock on SMP
492  * systems.
493  */
494 #define prepare_arch_switch(rq, next)           \
495 do {                                            \
496         spin_lock(&(next)->switch_lock);        \
497         spin_unlock(&(rq)->lock);               \
498 } while (0)
499 #define finish_arch_switch(rq, prev)    spin_unlock_irq(&(prev)->switch_lock)
500 #define task_running(rq, p)             ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
501
502 #endif /* _ASM_SYSTEM_H */