ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / asm-sparc64 / spinlock.h
1 /* spinlock.h: 64-bit Sparc spinlock support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
8
9 #include <linux/config.h>
10 #include <linux/threads.h>      /* For NR_CPUS */
11
12 #ifndef __ASSEMBLY__
13
14 /* To get debugging spinlocks which detect and catch
15  * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16  * and rebuild your kernel.
17  */
18
19 /* All of these locking primitives are expected to work properly
20  * even in an RMO memory model, which currently is what the kernel
21  * runs in.
22  *
23  * There is another issue.  Because we play games to save cycles
24  * in the non-contention case, we need to be extra careful about
25  * branch targets into the "spinning" code.  They live in their
26  * own section, but the newer V9 branches have a shorter range
27  * than the traditional 32-bit sparc branch variants.  The rule
28  * is that the branches that go into and out of the spinner sections
29  * must be pre-V9 branches.
30  */
31
32 #ifndef CONFIG_DEBUG_SPINLOCK
33
34 typedef unsigned char spinlock_t;
35 #define SPIN_LOCK_UNLOCKED      0
36
37 #define spin_lock_init(lock)    (*((unsigned char *)(lock)) = 0)
38 #define spin_is_locked(lock)    (*((volatile unsigned char *)(lock)) != 0)
39
40 #define spin_unlock_wait(lock)  \
41 do {    membar("#LoadLoad");    \
42 } while(*((volatile unsigned char *)lock))
43
44 static __inline__ void _raw_spin_lock(spinlock_t *lock)
45 {
46         __asm__ __volatile__(
47 "1:     ldstub          [%0], %%g7\n"
48 "       brnz,pn         %%g7, 2f\n"
49 "        membar         #StoreLoad | #StoreStore\n"
50 "       .subsection     2\n"
51 "2:     ldub            [%0], %%g7\n"
52 "       brnz,pt         %%g7, 2b\n"
53 "        membar         #LoadLoad\n"
54 "       b,a,pt          %%xcc, 1b\n"
55 "       .previous\n"
56         : /* no outputs */
57         : "r" (lock)
58         : "g7", "memory");
59 }
60
61 static __inline__ int _raw_spin_trylock(spinlock_t *lock)
62 {
63         unsigned int result;
64         __asm__ __volatile__("ldstub [%1], %0\n\t"
65                              "membar #StoreLoad | #StoreStore"
66                              : "=r" (result)
67                              : "r" (lock)
68                              : "memory");
69         return (result == 0);
70 }
71
72 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
73 {
74         __asm__ __volatile__("membar    #StoreStore | #LoadStore\n\t"
75                              "stb       %%g0, [%0]"
76                              : /* No outputs */
77                              : "r" (lock)
78                              : "memory");
79 }
80
81 #else /* !(CONFIG_DEBUG_SPINLOCK) */
82
83 typedef struct {
84         unsigned char lock;
85         unsigned int owner_pc, owner_cpu;
86 } spinlock_t;
87 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
88 #define spin_lock_init(__lock)  \
89 do {    (__lock)->lock = 0; \
90         (__lock)->owner_pc = 0; \
91         (__lock)->owner_cpu = 0xff; \
92 } while(0)
93 #define spin_is_locked(__lock)  (*((volatile unsigned char *)(&((__lock)->lock))) != 0)
94 #define spin_unlock_wait(__lock)        \
95 do { \
96         membar("#LoadLoad"); \
97 } while(*((volatile unsigned char *)(&((__lock)->lock))))
98
99 extern void _do_spin_lock (spinlock_t *lock, char *str);
100 extern void _do_spin_unlock (spinlock_t *lock);
101 extern int _spin_trylock (spinlock_t *lock);
102
103 #define _raw_spin_trylock(lp)   _spin_trylock(lp)
104 #define _raw_spin_lock(lock)    _do_spin_lock(lock, "spin_lock")
105 #define _raw_spin_unlock(lock)  _do_spin_unlock(lock)
106
107 #endif /* CONFIG_DEBUG_SPINLOCK */
108
109 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
110
111 #ifndef CONFIG_DEBUG_SPINLOCK
112
113 typedef unsigned int rwlock_t;
114 #define RW_LOCK_UNLOCKED        0
115 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
116 #define rwlock_is_locked(x) (*(x) != RW_LOCK_UNLOCKED)
117
118 extern void __read_lock(rwlock_t *);
119 extern void __read_unlock(rwlock_t *);
120 extern void __write_lock(rwlock_t *);
121 extern void __write_unlock(rwlock_t *);
122 extern int __write_trylock(rwlock_t *);
123
124 #define _raw_read_lock(p)       __read_lock(p)
125 #define _raw_read_unlock(p)     __read_unlock(p)
126 #define _raw_write_lock(p)      __write_lock(p)
127 #define _raw_write_unlock(p)    __write_unlock(p)
128 #define _raw_write_trylock(p)   __write_trylock(p)
129
130 #else /* !(CONFIG_DEBUG_SPINLOCK) */
131
132 typedef struct {
133         unsigned long lock;
134         unsigned int writer_pc, writer_cpu;
135         unsigned int reader_pc[NR_CPUS];
136 } rwlock_t;
137 #define RW_LOCK_UNLOCKED        (rwlock_t) { 0, 0, 0xff, { } }
138 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
139 #define rwlock_is_locked(x) ((x)->lock != 0)
140
141 extern void _do_read_lock(rwlock_t *rw, char *str);
142 extern void _do_read_unlock(rwlock_t *rw, char *str);
143 extern void _do_write_lock(rwlock_t *rw, char *str);
144 extern void _do_write_unlock(rwlock_t *rw);
145
146 #define _raw_read_lock(lock) \
147 do {    unsigned long flags; \
148         local_irq_save(flags); \
149         _do_read_lock(lock, "read_lock"); \
150         local_irq_restore(flags); \
151 } while(0)
152
153 #define _raw_read_unlock(lock) \
154 do {    unsigned long flags; \
155         local_irq_save(flags); \
156         _do_read_unlock(lock, "read_unlock"); \
157         local_irq_restore(flags); \
158 } while(0)
159
160 #define _raw_write_lock(lock) \
161 do {    unsigned long flags; \
162         local_irq_save(flags); \
163         _do_write_lock(lock, "write_lock"); \
164         local_irq_restore(flags); \
165 } while(0)
166
167 #define _raw_write_unlock(lock) \
168 do {    unsigned long flags; \
169         local_irq_save(flags); \
170         _do_write_unlock(lock); \
171         local_irq_restore(flags); \
172 } while(0)
173
174 #endif /* CONFIG_DEBUG_SPINLOCK */
175
176 #endif /* !(__ASSEMBLY__) */
177
178 #endif /* !(__SPARC64_SPINLOCK_H) */