2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
15 #include <linux/config.h>
16 #include <asm/sgidefs.h>
18 #include <linux/kernel.h>
20 #include <asm/addrspace.h>
21 #include <asm/ptrace.h>
24 ".macro\tlocal_irq_enable\n\t"
35 static inline void local_irq_enable(void)
45 * For cli() we have to insert nops to make sure that the new value
46 * has actually arrived in the status register before the end of this
48 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
52 ".macro\tlocal_irq_disable\n\t"
60 "sll\t$0, $0, 1\t\t\t# nop\n\t"
61 "sll\t$0, $0, 1\t\t\t# nop\n\t"
62 "sll\t$0, $0, 1\t\t\t# nop\n\t"
66 static inline void local_irq_disable(void)
76 ".macro\tlocal_save_flags flags\n\t"
79 "mfc0\t\\flags, $12\n\t"
83 #define local_save_flags(x) \
84 __asm__ __volatile__( \
85 "local_save_flags %0" \
89 ".macro\tlocal_irq_save result\n\t"
93 "mfc0\t\\result, $12\n\t"
94 "ori\t$1, \\result, 1\n\t"
98 "sll\t$0, $0, 1\t\t\t# nop\n\t"
99 "sll\t$0, $0, 1\t\t\t# nop\n\t"
100 "sll\t$0, $0, 1\t\t\t# nop\n\t"
104 #define local_irq_save(x) \
105 __asm__ __volatile__( \
106 "local_irq_save\t%0" \
111 __asm__(".macro\tlocal_irq_restore flags\n\t"
112 ".set\tnoreorder\n\t"
115 "andi\t\\flags, 1\n\t"
118 "or\t\\flags, $1\n\t"
119 "mtc0\t\\flags, $12\n\t"
120 "sll\t$0, $0, 1\t\t\t# nop\n\t"
121 "sll\t$0, $0, 1\t\t\t# nop\n\t"
122 "sll\t$0, $0, 1\t\t\t# nop\n\t"
127 #define local_irq_restore(flags) \
129 unsigned long __tmp1; \
131 __asm__ __volatile__( \
132 "local_irq_restore\t%0" \
138 #define irqs_disabled() \
140 unsigned long flags; \
141 local_save_flags(flags); \
146 * read_barrier_depends - Flush all pending reads that subsequents reads
149 * No data-dependent reads from memory-like regions are ever reordered
150 * over this barrier. All reads preceding this primitive are guaranteed
151 * to access memory (but not necessarily other CPUs' caches) before any
152 * reads following this primitive that depend on the data return by
153 * any of the preceding reads. This primitive is much lighter weight than
154 * rmb() on most CPUs, and is never heavier weight than is
157 * These ordering constraints are respected by both the local CPU
160 * Ordering is not guaranteed by anything other than these primitives,
161 * not even by data dependencies. See the documentation for
162 * memory_barrier() for examples and URLs to more information.
164 * For example, the following code would force ordering (the initial
165 * value of "a" is zero, "b" is one, and "p" is "&a"):
173 * read_barrier_depends();
177 * because the read of "*q" depends on the read of "p" and these
178 * two reads are separated by a read_barrier_depends(). However,
179 * the following code, with the same initial values for "a" and "b":
187 * read_barrier_depends();
191 * does not enforce ordering, since there is no data dependency between
192 * the read of "a" and the read of "b". Therefore, on some CPUs, such
193 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
194 * in cases like thiswhere there are no data dependencies.
197 #define read_barrier_depends() do { } while(0)
199 #ifdef CONFIG_CPU_HAS_SYNC
201 __asm__ __volatile__( \
203 ".set noreorder\n\t" \
211 #define __sync() do { } while(0)
214 #define __fast_iob() \
215 __asm__ __volatile__( \
217 ".set noreorder\n\t" \
222 : "m" (*(int *)CKSEG1) \
225 #define fast_wmb() __sync()
226 #define fast_rmb() __sync()
227 #define fast_mb() __sync()
234 #ifdef CONFIG_CPU_HAS_WB
236 #include <asm/wbflush.h>
238 #define wmb() fast_wmb()
239 #define rmb() fast_rmb()
240 #define mb() wbflush()
241 #define iob() wbflush()
243 #else /* !CONFIG_CPU_HAS_WB */
245 #define wmb() fast_wmb()
246 #define rmb() fast_rmb()
247 #define mb() fast_mb()
248 #define iob() fast_iob()
250 #endif /* !CONFIG_CPU_HAS_WB */
253 #define smp_mb() mb()
254 #define smp_rmb() rmb()
255 #define smp_wmb() wmb()
256 #define smp_read_barrier_depends() read_barrier_depends()
258 #define smp_mb() barrier()
259 #define smp_rmb() barrier()
260 #define smp_wmb() barrier()
261 #define smp_read_barrier_depends() do { } while(0)
264 #define set_mb(var, value) \
265 do { var = value; mb(); } while (0)
267 #define set_wmb(var, value) \
268 do { var = value; wmb(); } while (0)
271 * switch_to(n) should switch tasks to task nr n, first
272 * checking that n isn't the current task, in which case it does nothing.
274 extern asmlinkage void *resume(void *last, void *next, void *next_ti);
278 #define switch_to(prev,next,last) \
280 (last) = resume(prev, next, next->thread_info); \
283 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
287 #ifdef CONFIG_CPU_HAS_LLSC
290 __asm__ __volatile__(
291 ".set\tpush\t\t\t\t# xchg_u32\n\t"
292 ".set\tnoreorder\n\t"
295 "1:\tmove\t%2, %z4\n\t"
303 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
304 : "R" (*m), "Jr" (val)
309 local_irq_save(flags);
312 local_irq_restore(flags); /* implies memory barrier */
319 static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
323 #ifdef CONFIG_CPU_HAS_LLDSCD
326 __asm__ __volatile__(
327 ".set\tpush\t\t\t\t# xchg_u64\n\t"
328 ".set\tnoreorder\n\t"
331 "1:\tmove\t%2, %z4\n\t"
339 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
340 : "R" (*m), "Jr" (val)
345 local_irq_save(flags);
348 local_irq_restore(flags); /* implies memory barrier */
354 extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
355 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
358 /* This function doesn't exist, so you'll get a linker error
359 if something tries to do an invalid xchg(). */
360 extern void __xchg_called_with_bad_pointer(void);
362 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
366 return __xchg_u32(ptr, x);
368 return __xchg_u64(ptr, x);
370 __xchg_called_with_bad_pointer();
374 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
375 #define tas(ptr) (xchg((ptr),1))
377 #define __HAVE_ARCH_CMPXCHG 1
379 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
384 #ifdef CONFIG_CPU_HAS_LLSC
385 __asm__ __volatile__(
387 "1: ll %0, %2 # __cmpxchg_u32 \n"
388 " bne %0, %z3, 2f \n"
397 : "=&r" (retval), "=m" (*m)
398 : "R" (*m), "Jr" (old), "Jr" (new)
403 local_irq_save(flags);
407 local_irq_restore(flags); /* implies memory barrier */
414 static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
419 #ifdef CONFIG_CPU_HAS_LLDSCD
420 __asm__ __volatile__(
422 "1: lld %0, %2 # __cmpxchg_u64 \n"
423 " bne %0, %z3, 2f \n"
432 : "=&r" (retval), "=m" (*m)
433 : "R" (*m), "Jr" (old), "Jr" (new)
438 local_irq_save(flags);
442 local_irq_restore(flags); /* implies memory barrier */
448 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
449 volatile int * m, unsigned long old, unsigned long new);
450 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
453 /* This function doesn't exist, so you'll get a linker error
454 if something tries to do an invalid cmpxchg(). */
455 extern void __cmpxchg_called_with_bad_pointer(void);
457 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
458 unsigned long new, int size)
462 return __cmpxchg_u32(ptr, old, new);
464 return __cmpxchg_u64(ptr, old, new);
466 __cmpxchg_called_with_bad_pointer();
470 #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
472 extern void *set_except_vector(int n, void *addr);
473 extern void per_cpu_trap_init(void);
475 extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file,
476 const char *func, unsigned long line);
477 extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
478 const char *func, unsigned long line);
480 #define die(msg, regs) \
481 __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
482 #define die_if_kernel(msg, regs) \
483 __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
485 extern int serial_console;
486 extern int stop_a_enabled;
488 static __inline__ int con_is_present(void)
490 return serial_console ? 0 : 1;
494 * Taken from include/asm-ia64/system.h; prevents deadlock on SMP
497 #define prepare_arch_switch(rq, next) \
499 spin_lock(&(next)->switch_lock); \
500 spin_unlock(&(rq)->lock); \
502 #define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
503 #define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
505 #endif /* _ASM_SYSTEM_H */