1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
11 * We (exclusively) read the old value, and decrement it. If it
12 * hits zero, we may have won the lock, so we try (exclusively)
19 volatile unsigned int lock;
22 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
24 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
25 #define spin_is_locked(x) ((x)->lock != 0)
26 #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
28 static inline void _raw_spin_lock(spinlock_t *lock)
35 " strexeq %0, %2, [%1]\n"
39 : "r" (&lock->lock), "r" (1)
43 static inline int _raw_spin_trylock(spinlock_t *lock)
50 " strexeq %0, %2, [%1]"
52 : "r" (&lock->lock), "r" (1)
58 static inline void _raw_spin_unlock(spinlock_t *lock)
63 : "r" (&lock->lock), "r" (0)
71 volatile unsigned int lock;
74 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
75 #define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0)
78 * Write locks are easy - we just set bit 31. When unlocking, we can
79 * just write zero since the lock is exclusively held.
81 static inline void _raw_write_lock(rwlock_t *rw)
88 " strexeq %0, %2, [%1]\n"
92 : "r" (&rw->lock), "r" (0x80000000)
96 static inline void _raw_write_unlock(rwlock_t *rw)
101 : "r" (&rw->lock), "r" (0)
106 * Read locks are a bit more hairy:
107 * - Exclusively load the lock value.
109 * - Store new lock value if positive, and we still own this location.
110 * If the value is negative, we've already failed.
111 * - If we failed to store the value, we want a negative result.
112 * - If we failed, try again.
113 * Unlocking is similarly hairy. We may have multiple read locks
114 * currently active. However, we know we won't have any write
117 static inline void _raw_read_lock(rwlock_t *rw)
119 unsigned long tmp, tmp2;
121 __asm__ __volatile__(
122 "1: ldrex %0, [%2]\n"
124 " strexpl %1, %0, [%2]\n"
125 " rsbpls %0, %1, #0\n"
127 : "=&r" (tmp), "=&r" (tmp2)
132 static inline void _raw_read_unlock(rwlock_t *rw)
134 __asm__ __volatile__(
135 "1: ldrex %0, [%2]\n"
137 " strex %1, %0, [%2]\n"
140 : "=&r" (tmp), "=&r" (tmp2)
145 static inline int _raw_write_trylock(rwlock_t *rw)
149 __asm__ __volatile__(
150 "1: ldrex %0, [%1]\n"
152 " strexeq %0, %2, [%1]"
154 : "r" (&rw->lock), "r" (0x80000000)
160 #endif /* __ASM_SPINLOCK_H */