This commit was generated by cvs2svn to compensate for changes in r2,
[linux-2.6.git] / include / asm-h8300 / semaphore.h
1 #ifndef _H8300_SEMAPHORE_H
2 #define _H8300_SEMAPHORE_H
3
4 #define RW_LOCK_BIAS             0x01000000
5
6 #ifndef __ASSEMBLY__
7
8 #include <linux/linkage.h>
9 #include <linux/wait.h>
10 #include <linux/spinlock.h>
11 #include <linux/rwsem.h>
12
13 #include <asm/system.h>
14 #include <asm/atomic.h>
15
16 /*
17  * Interrupt-safe semaphores..
18  *
19  * (C) Copyright 1996 Linus Torvalds
20  *
21  * H8/300 version by Yoshinori Sato
22  */
23
24
25 struct semaphore {
26         atomic_t count;
27         int sleepers;
28         wait_queue_head_t wait;
29 #if WAITQUEUE_DEBUG
30         long __magic;
31 #endif
32 };
33
34 #if WAITQUEUE_DEBUG
35 # define __SEM_DEBUG_INIT(name) \
36                 , (long)&(name).__magic
37 #else
38 # define __SEM_DEBUG_INIT(name)
39 #endif
40
41 #define __SEMAPHORE_INITIALIZER(name,count) \
42 { ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
43         __SEM_DEBUG_INIT(name) }
44
45 #define __MUTEX_INITIALIZER(name) \
46         __SEMAPHORE_INITIALIZER(name,1)
47
48 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
49         struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
50
51 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
52 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
53
54 static inline void sema_init (struct semaphore *sem, int val)
55 {
56         *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
57 }
58
59 static inline void init_MUTEX (struct semaphore *sem)
60 {
61         sema_init(sem, 1);
62 }
63
64 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
65 {
66         sema_init(sem, 0);
67 }
68
69 asmlinkage void __down_failed(void /* special register calling convention */);
70 asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
71 asmlinkage int  __down_failed_trylock(void  /* params in registers */);
72 asmlinkage void __up_wakeup(void /* special register calling convention */);
73
74 asmlinkage void __down(struct semaphore * sem);
75 asmlinkage int  __down_interruptible(struct semaphore * sem);
76 asmlinkage int  __down_trylock(struct semaphore * sem);
77 asmlinkage void __up(struct semaphore * sem);
78
79 extern spinlock_t semaphore_wake_lock;
80
81 /*
82  * This is ugly, but we want the default case to fall through.
83  * "down_failed" is a special asm handler that calls the C
84  * routine that actually waits. See arch/m68k/lib/semaphore.S
85  */
86 static inline void down(struct semaphore * sem)
87 {
88         register atomic_t *count asm("er0");
89
90 #if WAITQUEUE_DEBUG
91         CHECK_MAGIC(sem->__magic);
92 #endif
93         might_sleep();
94
95         count = &(sem->count);
96         __asm__ __volatile__(
97                 "stc ccr,r3l\n\t"
98                 "orc #0x80,ccr\n\t"
99                 "mov.l %0, er1\n\t"
100                 "dec.l #1,er1\n\t"
101                 "mov.l er1,%0\n\t"
102                 "bpl 1f\n\t"
103                 "ldc r3l,ccr\n\t"
104                 "mov.l %1,er0\n\t"
105                 "jsr @___down\n\t"
106                 "bra 2f\n"
107                 "1:\n\t"
108                 "ldc r3l,ccr\n"
109                 "2:"
110                 : "+m"(*count)
111                 : "g"(sem)
112                 : "cc",  "er1", "er2", "er3");
113 }
114
115 static inline int down_interruptible(struct semaphore * sem)
116 {
117         register atomic_t *count asm("er0");
118
119 #if WAITQUEUE_DEBUG
120         CHECK_MAGIC(sem->__magic);
121 #endif
122         might_sleep();
123
124         count = &(sem->count);
125         __asm__ __volatile__(
126                 "stc ccr,r1l\n\t"
127                 "orc #0x80,ccr\n\t"
128                 "mov.l %1, er2\n\t"
129                 "dec.l #1,er2\n\t"
130                 "mov.l er2,%1\n\t"
131                 "bpl 1f\n\t"
132                 "ldc r1l,ccr\n\t"
133                 "mov.l %2,er0\n\t"
134                 "jsr @___down_interruptible\n\t"
135                 "bra 2f\n"
136                 "1:\n\t"
137                 "ldc r1l,ccr\n\t"
138                 "sub.l %0,%0\n\t"
139                 "2:\n\t"
140                 : "=r" (count),"+m" (*count)
141                 : "g"(sem)
142                 : "cc", "er1", "er2", "er3");
143         return (int)count;
144 }
145
146 static inline int down_trylock(struct semaphore * sem)
147 {
148         register atomic_t *count asm("er0");
149
150 #if WAITQUEUE_DEBUG
151         CHECK_MAGIC(sem->__magic);
152 #endif
153
154         count = &(sem->count);
155         __asm__ __volatile__(
156                 "stc ccr,r3l\n\t"
157                 "orc #0x80,ccr\n\t"
158                 "mov.l %0,er2\n\t"
159                 "dec.l #1,er2\n\t"
160                 "mov.l er2,%0\n\t"
161                 "bpl 1f\n\t"
162                 "ldc r3l,ccr\n\t"
163                 "jmp @3f\n\t"
164                 LOCK_SECTION_START(".align 2\n\t")
165                 "3:\n\t"
166                 "mov.l %2,er0\n\t"
167                 "jsr @___down_trylock\n\t"
168                 "jmp @2f\n\t"
169                 LOCK_SECTION_END
170                 "1:\n\t"
171                 "ldc r3l,ccr\n\t"
172                 "sub.l %1,%1\n"
173                 "2:"
174                 : "+m" (*count),"=r"(count)
175                 : "g"(sem)
176                 : "cc", "er1","er2", "er3");
177         return (int)count;
178 }
179
180 /*
181  * Note! This is subtle. We jump to wake people up only if
182  * the semaphore was negative (== somebody was waiting on it).
183  * The default case (no contention) will result in NO
184  * jumps for both down() and up().
185  */
186 static inline void up(struct semaphore * sem)
187 {
188         register atomic_t *count asm("er0");
189
190 #if WAITQUEUE_DEBUG
191         CHECK_MAGIC(sem->__magic);
192 #endif
193
194         count = &(sem->count);
195         __asm__ __volatile__(
196                 "stc ccr,r3l\n\t"
197                 "orc #0x80,ccr\n\t"
198                 "mov.l %0,er1\n\t"
199                 "inc.l #1,er1\n\t"
200                 "mov.l er1,%0\n\t"
201                 "ldc r3l,ccr\n\t"
202                 "sub.l er2,er2\n\t"
203                 "cmp.l er2,er1\n\t"
204                 "bgt 1f\n\t"
205                 "mov.l %1,er0\n\t"
206                 "jsr @___up\n"
207                 "1:"
208                 : "+m"(*count)
209                 : "g"(sem)
210                 : "cc", "er1", "er2", "er3");
211 }
212
213 #endif /* __ASSEMBLY__ */
214
215 #endif