2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
15 #include <linux/config.h>
16 #include <asm/sgidefs.h>
18 #include <linux/kernel.h>
20 #include <asm/addrspace.h>
21 #include <asm/ptrace.h>
22 #include <asm/hazards.h>
25 ".macro\tlocal_irq_enable\n\t"
33 "irq_enable_hazard\n\t"
37 static inline void local_irq_enable(void)
47 * For cli() we have to insert nops to make sure that the new value
48 * has actually arrived in the status register before the end of this
50 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
54 ".macro\tlocal_irq_disable\n\t"
62 "irq_disable_hazard\n\t"
66 static inline void local_irq_disable(void)
76 ".macro\tlocal_save_flags flags\n\t"
79 "mfc0\t\\flags, $12\n\t"
83 #define local_save_flags(x) \
84 __asm__ __volatile__( \
85 "local_save_flags %0" \
89 ".macro\tlocal_irq_save result\n\t"
93 "mfc0\t\\result, $12\n\t"
94 "ori\t$1, \\result, 1\n\t"
98 "irq_disable_hazard\n\t"
102 #define local_irq_save(x) \
103 __asm__ __volatile__( \
104 "local_irq_save\t%0" \
110 ".macro\tlocal_irq_restore flags\n\t"
111 ".set\tnoreorder\n\t"
114 "andi\t\\flags, 1\n\t"
117 "or\t\\flags, $1\n\t"
118 "mtc0\t\\flags, $12\n\t"
119 "irq_disable_hazard\n\t"
124 #define local_irq_restore(flags) \
126 unsigned long __tmp1; \
128 __asm__ __volatile__( \
129 "local_irq_restore\t%0" \
135 #define irqs_disabled() \
137 unsigned long flags; \
138 local_save_flags(flags); \
143 * read_barrier_depends - Flush all pending reads that subsequents reads
146 * No data-dependent reads from memory-like regions are ever reordered
147 * over this barrier. All reads preceding this primitive are guaranteed
148 * to access memory (but not necessarily other CPUs' caches) before any
149 * reads following this primitive that depend on the data return by
150 * any of the preceding reads. This primitive is much lighter weight than
151 * rmb() on most CPUs, and is never heavier weight than is
154 * These ordering constraints are respected by both the local CPU
157 * Ordering is not guaranteed by anything other than these primitives,
158 * not even by data dependencies. See the documentation for
159 * memory_barrier() for examples and URLs to more information.
161 * For example, the following code would force ordering (the initial
162 * value of "a" is zero, "b" is one, and "p" is "&a"):
170 * read_barrier_depends();
174 * because the read of "*q" depends on the read of "p" and these
175 * two reads are separated by a read_barrier_depends(). However,
176 * the following code, with the same initial values for "a" and "b":
184 * read_barrier_depends();
188 * does not enforce ordering, since there is no data dependency between
189 * the read of "a" and the read of "b". Therefore, on some CPUs, such
190 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
191 * in cases like thiswhere there are no data dependencies.
194 #define read_barrier_depends() do { } while(0)
196 #ifdef CONFIG_CPU_HAS_SYNC
198 __asm__ __volatile__( \
200 ".set noreorder\n\t" \
208 #define __sync() do { } while(0)
211 #define __fast_iob() \
212 __asm__ __volatile__( \
214 ".set noreorder\n\t" \
219 : "m" (*(int *)CKSEG1) \
222 #define fast_wmb() __sync()
223 #define fast_rmb() __sync()
224 #define fast_mb() __sync()
231 #ifdef CONFIG_CPU_HAS_WB
233 #include <asm/wbflush.h>
235 #define wmb() fast_wmb()
236 #define rmb() fast_rmb()
237 #define mb() wbflush()
238 #define iob() wbflush()
240 #else /* !CONFIG_CPU_HAS_WB */
242 #define wmb() fast_wmb()
243 #define rmb() fast_rmb()
244 #define mb() fast_mb()
245 #define iob() fast_iob()
247 #endif /* !CONFIG_CPU_HAS_WB */
250 #define smp_mb() mb()
251 #define smp_rmb() rmb()
252 #define smp_wmb() wmb()
253 #define smp_read_barrier_depends() read_barrier_depends()
255 #define smp_mb() barrier()
256 #define smp_rmb() barrier()
257 #define smp_wmb() barrier()
258 #define smp_read_barrier_depends() do { } while(0)
261 #define set_mb(var, value) \
262 do { var = value; mb(); } while (0)
264 #define set_wmb(var, value) \
265 do { var = value; wmb(); } while (0)
268 * switch_to(n) should switch tasks to task nr n, first
269 * checking that n isn't the current task, in which case it does nothing.
271 extern asmlinkage void *resume(void *last, void *next, void *next_ti);
275 #define switch_to(prev,next,last) \
277 (last) = resume(prev, next, next->thread_info); \
280 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
284 #ifdef CONFIG_CPU_HAS_LLSC
287 __asm__ __volatile__(
288 ".set\tpush\t\t\t\t# xchg_u32\n\t"
289 ".set\tnoreorder\n\t"
292 "1:\tmove\t%2, %z4\n\t"
300 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
301 : "R" (*m), "Jr" (val)
306 local_irq_save(flags);
309 local_irq_restore(flags); /* implies memory barrier */
316 static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
320 #ifdef CONFIG_CPU_HAS_LLDSCD
323 __asm__ __volatile__(
324 ".set\tpush\t\t\t\t# xchg_u64\n\t"
325 ".set\tnoreorder\n\t"
328 "1:\tmove\t%2, %z4\n\t"
336 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
337 : "R" (*m), "Jr" (val)
342 local_irq_save(flags);
345 local_irq_restore(flags); /* implies memory barrier */
351 extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
352 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
355 /* This function doesn't exist, so you'll get a linker error
356 if something tries to do an invalid xchg(). */
357 extern void __xchg_called_with_bad_pointer(void);
359 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
363 return __xchg_u32(ptr, x);
365 return __xchg_u64(ptr, x);
367 __xchg_called_with_bad_pointer();
371 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
372 #define tas(ptr) (xchg((ptr),1))
374 #define __HAVE_ARCH_CMPXCHG 1
376 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
381 #ifdef CONFIG_CPU_HAS_LLSC
382 __asm__ __volatile__(
384 "1: ll %0, %2 # __cmpxchg_u32 \n"
385 " bne %0, %z3, 2f \n"
394 : "=&r" (retval), "=m" (*m)
395 : "R" (*m), "Jr" (old), "Jr" (new)
400 local_irq_save(flags);
404 local_irq_restore(flags); /* implies memory barrier */
411 static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
416 #ifdef CONFIG_CPU_HAS_LLDSCD
417 __asm__ __volatile__(
419 "1: lld %0, %2 # __cmpxchg_u64 \n"
420 " bne %0, %z3, 2f \n"
429 : "=&r" (retval), "=m" (*m)
430 : "R" (*m), "Jr" (old), "Jr" (new)
435 local_irq_save(flags);
439 local_irq_restore(flags); /* implies memory barrier */
445 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
446 volatile int * m, unsigned long old, unsigned long new);
447 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
450 /* This function doesn't exist, so you'll get a linker error
451 if something tries to do an invalid cmpxchg(). */
452 extern void __cmpxchg_called_with_bad_pointer(void);
454 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
455 unsigned long new, int size)
459 return __cmpxchg_u32(ptr, old, new);
461 return __cmpxchg_u64(ptr, old, new);
463 __cmpxchg_called_with_bad_pointer();
467 #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
469 extern void *set_except_vector(int n, void *addr);
470 extern void per_cpu_trap_init(void);
472 extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file,
473 const char *func, unsigned long line);
474 extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
475 const char *func, unsigned long line);
477 #define die(msg, regs) \
478 __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
479 #define die_if_kernel(msg, regs) \
480 __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
482 extern int serial_console;
483 extern int stop_a_enabled;
485 static __inline__ int con_is_present(void)
487 return serial_console ? 0 : 1;
491 * Taken from include/asm-ia64/system.h; prevents deadlock on SMP
494 #define prepare_arch_switch(rq, next) \
496 spin_lock(&(next)->switch_lock); \
497 spin_unlock(&(rq)->lock); \
499 #define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
500 #define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
502 #endif /* _ASM_SYSTEM_H */