1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
5 * Simple spin lock operations.
7 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Type of int is used as a full 64b word is not necessary.
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #include <linux/config.h>
21 volatile unsigned int lock;
25 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
27 #define spin_is_locked(x) ((x)->lock != 0)
28 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
30 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
32 __asm__ __volatile__("lwsync # spin_unlock": : :"memory");
37 * Normally we use the spinlock functions in arch/ppc64/lib/locks.c.
38 * For special applications such as profiling, we can have the
39 * spinlock functions inline by defining CONFIG_SPINLINE.
40 * This is not recommended on partitioned systems with shared
41 * processors, since the inline spinlock functions don't include
42 * the code for yielding the CPU to the lock holder.
45 #ifndef CONFIG_SPINLINE
46 extern int _raw_spin_trylock(spinlock_t *lock);
47 extern void _raw_spin_lock(spinlock_t *lock);
48 extern void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags);
49 extern void spin_unlock_wait(spinlock_t *lock);
53 static __inline__ int _raw_spin_trylock(spinlock_t *lock)
55 unsigned int tmp, tmp2;
58 "1: lwarx %0,0,%2 # spin_trylock\n\
65 2:" : "=&r"(tmp), "=&r"(tmp2)
66 : "r"(&lock->lock), "i"(offsetof(struct paca_struct, lock_token))
72 static __inline__ void _raw_spin_lock(spinlock_t *lock)
92 : "r"(&lock->lock), "i"(offsetof(struct paca_struct, lock_token))
97 * Note: if we ever want to inline the spinlocks on iSeries,
98 * we will have to change the irq enable/disable stuff in here.
100 static __inline__ void _raw_spin_lock_flags(spinlock_t *lock,
106 __asm__ __volatile__(
123 : "=&r"(tmp), "=&r"(tmp2)
124 : "r"(&lock->lock), "r"(flags),
125 "i" (offsetof(struct paca_struct, lock_token))
129 #define spin_unlock_wait(x) do { cpu_relax(); } while (spin_is_locked(x))
131 #endif /* CONFIG_SPINLINE */
134 * Read-write spinlocks, allowing multiple readers
135 * but only one writer.
137 * NOTE! it is quite common to have readers in interrupts
138 * but no interrupt writers. For those circumstances we
139 * can "mix" irq-safe locks - any writer needs to get a
140 * irq-safe write-lock, but readers can get non-irqsafe
144 volatile signed int lock;
147 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
149 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
150 #define rwlock_is_locked(x) ((x)->lock)
152 static __inline__ int is_read_locked(rwlock_t *rw)
157 static __inline__ int is_write_locked(rwlock_t *rw)
162 static __inline__ void _raw_write_unlock(rwlock_t *rw)
164 __asm__ __volatile__("lwsync # write_unlock": : :"memory");
168 #ifndef CONFIG_SPINLINE
169 extern int _raw_read_trylock(rwlock_t *rw);
170 extern void _raw_read_lock(rwlock_t *rw);
171 extern void _raw_read_unlock(rwlock_t *rw);
172 extern int _raw_write_trylock(rwlock_t *rw);
173 extern void _raw_write_lock(rwlock_t *rw);
174 extern void _raw_write_unlock(rwlock_t *rw);
177 static __inline__ int _raw_read_trylock(rwlock_t *rw)
182 __asm__ __volatile__(
183 "1: lwarx %0,0,%2 # read_trylock\n\
192 2:" : "=&r"(tmp), "=&r"(ret)
199 static __inline__ void _raw_read_lock(rwlock_t *rw)
203 __asm__ __volatile__(
223 static __inline__ void _raw_read_unlock(rwlock_t *rw)
227 __asm__ __volatile__(
228 "lwsync # read_unlock\n\
238 static __inline__ int _raw_write_trylock(rwlock_t *rw)
243 __asm__ __volatile__(
244 "1: lwarx %0,0,%2 # write_trylock\n\
252 2:" : "=&r"(tmp), "=&r"(ret)
253 : "r"(&rw->lock), "r"(-1)
259 static __inline__ void _raw_write_lock(rwlock_t *rw)
263 __asm__ __volatile__(
264 "b 2f # write_lock\n\
278 : "r"(&rw->lock), "r"(-1)
281 #endif /* CONFIG_SPINLINE */
283 #endif /* __KERNEL__ */
284 #endif /* __ASM_SPINLOCK_H */