ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / asm-ppc64 / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 /*
5  * Simple spin lock operations.  
6  *
7  * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
8  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  *
10  * Type of int is used as a full 64b word is not necessary.
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version
15  * 2 of the License, or (at your option) any later version.
16  */
17 typedef struct {
18         volatile unsigned int lock;
19 } spinlock_t;
20
21 #ifdef __KERNEL__
22 #define SPIN_LOCK_UNLOCKED      (spinlock_t) { 0 }
23
24 #define spin_is_locked(x)       ((x)->lock != 0)
25
26 static __inline__ int _raw_spin_trylock(spinlock_t *lock)
27 {
28         unsigned int tmp;
29
30         __asm__ __volatile__(
31 "1:     lwarx           %0,0,%1         # spin_trylock\n\
32         cmpwi           0,%0,0\n\
33         li              %0,0\n\
34         bne-            2f\n\
35         li              %0,1\n\
36         stwcx.          %0,0,%1\n\
37         bne-            1b\n\
38         isync\n\
39 2:"     : "=&r"(tmp)
40         : "r"(&lock->lock)
41         : "cr0", "memory");
42
43         return tmp;
44 }
45
46 static __inline__ void _raw_spin_lock(spinlock_t *lock)
47 {
48         unsigned int tmp;
49
50         __asm__ __volatile__(
51         "b              2f              # spin_lock\n\
52 1:"
53         HMT_LOW
54 "       lwzx            %0,0,%1\n\
55         cmpwi           0,%0,0\n\
56         bne+            1b\n"
57         HMT_MEDIUM
58 "2:     lwarx           %0,0,%1\n\
59         cmpwi           0,%0,0\n\
60         bne-            1b\n\
61         stwcx.          %2,0,%1\n\
62         bne-            2b\n\
63         isync"
64         : "=&r"(tmp)
65         : "r"(&lock->lock), "r"(1)
66         : "cr0", "memory");
67 }
68
69 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
70 {
71         __asm__ __volatile__("eieio     # spin_unlock": : :"memory");
72         lock->lock = 0;
73 }
74
75 /*
76  * Read-write spinlocks, allowing multiple readers
77  * but only one writer.
78  *
79  * NOTE! it is quite common to have readers in interrupts
80  * but no interrupt writers. For those circumstances we
81  * can "mix" irq-safe locks - any writer needs to get a
82  * irq-safe write-lock, but readers can get non-irqsafe
83  * read-locks.
84  */
85 typedef struct {
86         volatile signed int lock;
87 } rwlock_t;
88
89 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
90
91 static __inline__ int _raw_read_trylock(rwlock_t *rw)
92 {
93         unsigned int tmp;
94         unsigned int ret;
95
96         __asm__ __volatile__(
97 "1:     lwarx           %0,0,%2         # read_trylock\n\
98         li              %1,0\n\
99         extsw           %0,%0\n\
100         addic.          %0,%0,1\n\
101         ble-            2f\n\
102         stwcx.          %0,0,%2\n\
103         bne-            1b\n\
104         li              %1,1\n\
105         isync\n\
106 2:"     : "=&r"(tmp), "=&r"(ret)
107         : "r"(&rw->lock)
108         : "cr0", "memory");
109
110         return ret;
111 }
112
113 static __inline__ void _raw_read_lock(rwlock_t *rw)
114 {
115         unsigned int tmp;
116
117         __asm__ __volatile__(
118         "b              2f              # read_lock\n\
119 1:"
120         HMT_LOW
121 "       lwax            %0,0,%1\n\
122         cmpwi           0,%0,0\n\
123         blt+            1b\n"
124         HMT_MEDIUM
125 "2:     lwarx           %0,0,%1\n\
126         extsw           %0,%0\n\
127         addic.          %0,%0,1\n\
128         ble-            1b\n\
129         stwcx.          %0,0,%1\n\
130         bne-            2b\n\
131         isync"
132         : "=&r"(tmp)
133         : "r"(&rw->lock)
134         : "cr0", "memory");
135 }
136
137 static __inline__ void _raw_read_unlock(rwlock_t *rw)
138 {
139         unsigned int tmp;
140
141         __asm__ __volatile__(
142         "eieio                          # read_unlock\n\
143 1:      lwarx           %0,0,%1\n\
144         addic           %0,%0,-1\n\
145         stwcx.          %0,0,%1\n\
146         bne-            1b"
147         : "=&r"(tmp)
148         : "r"(&rw->lock)
149         : "cr0", "memory");
150 }
151
152 static __inline__ int _raw_write_trylock(rwlock_t *rw)
153 {
154         unsigned int tmp;
155         unsigned int ret;
156
157         __asm__ __volatile__(
158 "1:     lwarx           %0,0,%2         # write_trylock\n\
159         cmpwi           0,%0,0\n\
160         li              %1,0\n\
161         bne-            2f\n\
162         stwcx.          %3,0,%2\n\
163         bne-            1b\n\
164         li              %1,1\n\
165         isync\n\
166 2:"     : "=&r"(tmp), "=&r"(ret)
167         : "r"(&rw->lock), "r"(-1)
168         : "cr0", "memory");
169
170         return ret;
171 }
172
173 static __inline__ void _raw_write_lock(rwlock_t *rw)
174 {
175         unsigned int tmp;
176
177         __asm__ __volatile__(
178         "b              2f              # write_lock\n\
179 1:"
180         HMT_LOW
181         "lwax           %0,0,%1\n\
182         cmpwi           0,%0,0\n\
183         bne+            1b\n"
184         HMT_MEDIUM
185 "2:     lwarx           %0,0,%1\n\
186         cmpwi           0,%0,0\n\
187         bne-            1b\n\
188         stwcx.          %2,0,%1\n\
189         bne-            2b\n\
190         isync"
191         : "=&r"(tmp)
192         : "r"(&rw->lock), "r"(-1)
193         : "cr0", "memory");
194 }
195
196 static __inline__ void _raw_write_unlock(rwlock_t *rw)
197 {
198         __asm__ __volatile__("eieio             # write_unlock": : :"memory");
199         rw->lock = 0;
200 }
201
202 static __inline__ int is_read_locked(rwlock_t *rw)
203 {
204         return rw->lock > 0;
205 }
206
207 static __inline__ int is_write_locked(rwlock_t *rw)
208 {
209         return rw->lock < 0;
210 }
211
212 #define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
213 #define spin_unlock_wait(x)    do { cpu_relax(); } while(spin_is_locked(x))
214
215 #define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
216
217 #define rwlock_is_locked(x)     ((x)->lock)
218
219 #endif /* __KERNEL__ */
220 #endif /* __ASM_SPINLOCK_H */