1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
5 * Simple spin lock operations.
7 * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Type of int is used as a full 64b word is not necessary.
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 volatile unsigned int lock;
22 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
24 #define spin_is_locked(x) ((x)->lock != 0)
26 static __inline__ int _raw_spin_trylock(spinlock_t *lock)
31 "1: lwarx %0,0,%1 # spin_trylock\n\
46 static __inline__ void _raw_spin_lock(spinlock_t *lock)
65 : "r"(&lock->lock), "r"(1)
69 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
71 __asm__ __volatile__("eieio # spin_unlock": : :"memory");
76 * Read-write spinlocks, allowing multiple readers
77 * but only one writer.
79 * NOTE! it is quite common to have readers in interrupts
80 * but no interrupt writers. For those circumstances we
81 * can "mix" irq-safe locks - any writer needs to get a
82 * irq-safe write-lock, but readers can get non-irqsafe
86 volatile signed int lock;
89 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
91 static __inline__ int _raw_read_trylock(rwlock_t *rw)
97 "1: lwarx %0,0,%2 # read_trylock\n\
106 2:" : "=&r"(tmp), "=&r"(ret)
113 static __inline__ void _raw_read_lock(rwlock_t *rw)
117 __asm__ __volatile__(
137 static __inline__ void _raw_read_unlock(rwlock_t *rw)
141 __asm__ __volatile__(
142 "eieio # read_unlock\n\
152 static __inline__ int _raw_write_trylock(rwlock_t *rw)
157 __asm__ __volatile__(
158 "1: lwarx %0,0,%2 # write_trylock\n\
166 2:" : "=&r"(tmp), "=&r"(ret)
167 : "r"(&rw->lock), "r"(-1)
173 static __inline__ void _raw_write_lock(rwlock_t *rw)
177 __asm__ __volatile__(
178 "b 2f # write_lock\n\
192 : "r"(&rw->lock), "r"(-1)
196 static __inline__ void _raw_write_unlock(rwlock_t *rw)
198 __asm__ __volatile__("eieio # write_unlock": : :"memory");
202 static __inline__ int is_read_locked(rwlock_t *rw)
207 static __inline__ int is_write_locked(rwlock_t *rw)
212 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
213 #define spin_unlock_wait(x) do { cpu_relax(); } while(spin_is_locked(x))
215 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
217 #define rwlock_is_locked(x) ((x)->lock)
219 #endif /* __KERNEL__ */
220 #endif /* __ASM_SPINLOCK_H */