1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
5 * Simple spin lock operations.
7 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Type of int is used as a full 64b word is not necessary.
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #include <linux/config.h>
20 volatile unsigned int lock;
24 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
26 #define spin_is_locked(x) ((x)->lock != 0)
27 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
29 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
31 __asm__ __volatile__("lwsync # spin_unlock": : :"memory");
36 * Normally we use the spinlock functions in arch/ppc64/lib/locks.c.
37 * For special applications such as profiling, we can have the
38 * spinlock functions inline by defining CONFIG_SPINLINE.
39 * This is not recommended on partitioned systems with shared
40 * processors, since the inline spinlock functions don't include
41 * the code for yielding the CPU to the lock holder.
44 #ifndef CONFIG_SPINLINE
45 extern int _raw_spin_trylock(spinlock_t *lock);
46 extern void _raw_spin_lock(spinlock_t *lock);
47 extern void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags);
48 extern void spin_unlock_wait(spinlock_t *lock);
52 static __inline__ int _raw_spin_trylock(spinlock_t *lock)
54 unsigned int tmp, tmp2;
57 "1: lwarx %0,0,%2 # spin_trylock\n\
64 2:" : "=&r"(tmp), "=&r"(tmp2)
71 static __inline__ void _raw_spin_lock(spinlock_t *lock)
96 * Note: if we ever want to inline the spinlocks on iSeries,
97 * we will have to change the irq enable/disable stuff in here.
99 static __inline__ void _raw_spin_lock_flags(spinlock_t *lock,
105 __asm__ __volatile__(
122 : "=&r"(tmp), "=&r"(tmp2)
123 : "r"(&lock->lock), "r"(flags)
127 #define spin_unlock_wait(x) do { cpu_relax(); } while (spin_is_locked(x))
129 #endif /* CONFIG_SPINLINE */
132 * Read-write spinlocks, allowing multiple readers
133 * but only one writer.
135 * NOTE! it is quite common to have readers in interrupts
136 * but no interrupt writers. For those circumstances we
137 * can "mix" irq-safe locks - any writer needs to get a
138 * irq-safe write-lock, but readers can get non-irqsafe
142 volatile signed int lock;
145 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
147 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
148 #define rwlock_is_locked(x) ((x)->lock)
150 static __inline__ int is_read_locked(rwlock_t *rw)
155 static __inline__ int is_write_locked(rwlock_t *rw)
160 static __inline__ void _raw_write_unlock(rwlock_t *rw)
162 __asm__ __volatile__("lwsync # write_unlock": : :"memory");
166 #ifndef CONFIG_SPINLINE
167 extern int _raw_read_trylock(rwlock_t *rw);
168 extern void _raw_read_lock(rwlock_t *rw);
169 extern void _raw_read_unlock(rwlock_t *rw);
170 extern int _raw_write_trylock(rwlock_t *rw);
171 extern void _raw_write_lock(rwlock_t *rw);
172 extern void _raw_write_unlock(rwlock_t *rw);
175 static __inline__ int _raw_read_trylock(rwlock_t *rw)
180 __asm__ __volatile__(
181 "1: lwarx %0,0,%2 # read_trylock\n\
190 2:" : "=&r"(tmp), "=&r"(ret)
197 static __inline__ void _raw_read_lock(rwlock_t *rw)
201 __asm__ __volatile__(
221 static __inline__ void _raw_read_unlock(rwlock_t *rw)
225 __asm__ __volatile__(
226 "lwsync # read_unlock\n\
236 static __inline__ int _raw_write_trylock(rwlock_t *rw)
241 __asm__ __volatile__(
242 "1: lwarx %0,0,%2 # write_trylock\n\
250 2:" : "=&r"(tmp), "=&r"(ret)
251 : "r"(&rw->lock), "r"(-1)
257 static __inline__ void _raw_write_lock(rwlock_t *rw)
261 __asm__ __volatile__(
262 "b 2f # write_lock\n\
276 : "r"(&rw->lock), "r"(-1)
279 #endif /* CONFIG_SPINLINE */
281 #endif /* __KERNEL__ */
282 #endif /* __ASM_SPINLOCK_H */