1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
9 #include <linux/config.h>
10 #include <linux/threads.h> /* For NR_CPUS */
14 /* To get debugging spinlocks which detect and catch
15 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16 * and rebuild your kernel.
19 /* All of these locking primitives are expected to work properly
20 * even in an RMO memory model, which currently is what the kernel
23 * There is another issue. Because we play games to save cycles
24 * in the non-contention case, we need to be extra careful about
25 * branch targets into the "spinning" code. They live in their
26 * own section, but the newer V9 branches have a shorter range
27 * than the traditional 32-bit sparc branch variants. The rule
28 * is that the branches that go into and out of the spinner sections
29 * must be pre-V9 branches.
32 #ifndef CONFIG_DEBUG_SPINLOCK
34 typedef unsigned char spinlock_t;
35 #define SPIN_LOCK_UNLOCKED 0
37 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
38 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
40 #define spin_unlock_wait(lock) \
41 do { membar("#LoadLoad"); \
42 } while(*((volatile unsigned char *)lock))
44 static __inline__ void _raw_spin_lock(spinlock_t *lock)
47 "1: ldstub [%0], %%g7\n"
49 " membar #StoreLoad | #StoreStore\n"
51 "2: ldub [%0], %%g7\n"
61 static __inline__ int _raw_spin_trylock(spinlock_t *lock)
64 __asm__ __volatile__("ldstub [%1], %0\n\t"
65 "membar #StoreLoad | #StoreStore"
72 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
74 __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
81 extern void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags);
83 #else /* !(CONFIG_DEBUG_SPINLOCK) */
87 unsigned int owner_pc, owner_cpu;
89 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
90 #define spin_lock_init(__lock) \
91 do { (__lock)->lock = 0; \
92 (__lock)->owner_pc = 0; \
93 (__lock)->owner_cpu = 0xff; \
95 #define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0)
96 #define spin_unlock_wait(__lock) \
98 membar("#LoadLoad"); \
99 } while(*((volatile unsigned char *)(&((__lock)->lock))))
101 extern void _do_spin_lock (spinlock_t *lock, char *str);
102 extern void _do_spin_unlock (spinlock_t *lock);
103 extern int _spin_trylock (spinlock_t *lock);
105 #define _raw_spin_trylock(lp) _spin_trylock(lp)
106 #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
107 #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
108 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
110 #endif /* CONFIG_DEBUG_SPINLOCK */
112 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
114 #ifndef CONFIG_DEBUG_SPINLOCK
116 typedef unsigned int rwlock_t;
117 #define RW_LOCK_UNLOCKED 0
118 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
119 #define rwlock_is_locked(x) (*(x) != RW_LOCK_UNLOCKED)
121 extern void __read_lock(rwlock_t *);
122 extern void __read_unlock(rwlock_t *);
123 extern void __write_lock(rwlock_t *);
124 extern void __write_unlock(rwlock_t *);
125 extern int __write_trylock(rwlock_t *);
127 #define _raw_read_lock(p) __read_lock(p)
128 #define _raw_read_unlock(p) __read_unlock(p)
129 #define _raw_write_lock(p) __write_lock(p)
130 #define _raw_write_unlock(p) __write_unlock(p)
131 #define _raw_write_trylock(p) __write_trylock(p)
133 #else /* !(CONFIG_DEBUG_SPINLOCK) */
137 unsigned int writer_pc, writer_cpu;
138 unsigned int reader_pc[NR_CPUS];
140 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
141 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
142 #define rwlock_is_locked(x) ((x)->lock != 0)
144 extern void _do_read_lock(rwlock_t *rw, char *str);
145 extern void _do_read_unlock(rwlock_t *rw, char *str);
146 extern void _do_write_lock(rwlock_t *rw, char *str);
147 extern void _do_write_unlock(rwlock_t *rw);
148 extern int _do_write_trylock(rwlock_t *rw, char *str);
150 #define _raw_read_lock(lock) \
151 do { unsigned long flags; \
152 local_irq_save(flags); \
153 _do_read_lock(lock, "read_lock"); \
154 local_irq_restore(flags); \
157 #define _raw_read_unlock(lock) \
158 do { unsigned long flags; \
159 local_irq_save(flags); \
160 _do_read_unlock(lock, "read_unlock"); \
161 local_irq_restore(flags); \
164 #define _raw_write_lock(lock) \
165 do { unsigned long flags; \
166 local_irq_save(flags); \
167 _do_write_lock(lock, "write_lock"); \
168 local_irq_restore(flags); \
171 #define _raw_write_unlock(lock) \
172 do { unsigned long flags; \
173 local_irq_save(flags); \
174 _do_write_unlock(lock); \
175 local_irq_restore(flags); \
178 #define _raw_write_trylock(lock) \
179 ({ unsigned long flags; \
181 local_irq_save(flags); \
182 val = _do_write_trylock(lock, "write_trylock"); \
183 local_irq_restore(flags); \
187 #endif /* CONFIG_DEBUG_SPINLOCK */
189 #endif /* !(__ASSEMBLY__) */
191 #endif /* !(__SPARC64_SPINLOCK_H) */