ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / asm-s390 / atomic.h
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3
4 /*
5  *  include/asm-s390/atomic.h
6  *
7  *  S390 version
8  *    Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
10  *               Denis Joseph Barrow,
11  *               Arnd Bergmann (arndb@de.ibm.com)
12  *
13  *  Derived from "include/asm-i386/bitops.h"
14  *    Copyright (C) 1992, Linus Torvalds
15  *
16  */
17
18 /*
19  * Atomic operations that C can't guarantee us.  Useful for
20  * resource counting etc..
21  * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
22  */
23
24 typedef struct {
25         volatile int counter;
26 } __attribute__ ((aligned (4))) atomic_t;
27 #define ATOMIC_INIT(i)  { (i) }
28
29 #ifdef __KERNEL__
30
31 #define __CS_LOOP(ptr, op_val, op_string) ({                            \
32         typeof(ptr->counter) old_val, new_val;                          \
33         __asm__ __volatile__("   l     %0,0(%3)\n"                      \
34                              "0: lr    %1,%0\n"                         \
35                              op_string "  %1,%4\n"                      \
36                              "   cs    %0,%1,0(%3)\n"                   \
37                              "   jl    0b"                              \
38                              : "=&d" (old_val), "=&d" (new_val),        \
39                                "=m" (((atomic_t *)(ptr))->counter)      \
40                              : "a" (ptr), "d" (op_val),                 \
41                                "m" (((atomic_t *)(ptr))->counter)       \
42                              : "cc", "memory" );                        \
43         new_val;                                                        \
44 })
45 #define atomic_read(v)          ((v)->counter)
46 #define atomic_set(v,i)         (((v)->counter) = (i))
47
48 static __inline__ void atomic_add(int i, atomic_t * v)
49 {
50                __CS_LOOP(v, i, "ar");
51 }
52 static __inline__ int atomic_add_return(int i, atomic_t * v)
53 {
54         return __CS_LOOP(v, i, "ar");
55 }
56 static __inline__ int atomic_add_negative(int i, atomic_t * v)
57 {
58         return __CS_LOOP(v, i, "ar") < 0;
59 }
60 static __inline__ void atomic_sub(int i, atomic_t * v)
61 {
62                __CS_LOOP(v, i, "sr");
63 }
64 static __inline__ void atomic_inc(volatile atomic_t * v)
65 {
66                __CS_LOOP(v, 1, "ar");
67 }
68 static __inline__ int atomic_inc_return(volatile atomic_t * v)
69 {
70         return __CS_LOOP(v, 1, "ar");
71 }
72 static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
73 {
74         return __CS_LOOP(v, 1, "ar") != 0;
75 }
76 static __inline__ void atomic_dec(volatile atomic_t * v)
77 {
78                __CS_LOOP(v, 1, "sr");
79 }
80 static __inline__ int atomic_dec_return(volatile atomic_t * v)
81 {
82         return __CS_LOOP(v, 1, "sr");
83 }
84 static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
85 {
86         return __CS_LOOP(v, 1, "sr") == 0;
87 }
88 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
89 {
90                __CS_LOOP(v, ~mask, "nr");
91 }
92 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
93 {
94                __CS_LOOP(v, mask, "or");
95 }
96 #undef __CS_LOOP
97
98 #ifdef __s390x__
99 typedef struct {
100         volatile long long counter;
101 } __attribute__ ((aligned (8))) atomic64_t;
102 #define ATOMIC64_INIT(i)  { (i) }
103
104 #define __CSG_LOOP(ptr, op_val, op_string) ({                           \
105         typeof(ptr->counter) old_val, new_val;                          \
106         __asm__ __volatile__("   lg    %0,0(%3)\n"                      \
107                              "0: lgr   %1,%0\n"                         \
108                              op_string "  %1,%4\n"                      \
109                              "   csg   %0,%1,0(%3)\n"                   \
110                              "   jl    0b"                              \
111                              : "=&d" (old_val), "=&d" (new_val),        \
112                                "=m" (((atomic_t *)(ptr))->counter)      \
113                              : "a" (ptr), "d" (op_val),                 \
114                                "m" (((atomic_t *)(ptr))->counter)       \
115                              : "cc", "memory" );                        \
116         new_val;                                                        \
117 })
118 #define atomic64_read(v)          ((v)->counter)
119 #define atomic64_set(v,i)         (((v)->counter) = (i))
120
121 static __inline__ void atomic64_add(int i, atomic64_t * v)
122 {
123                __CSG_LOOP(v, i, "agr");
124 }
125 static __inline__ long long atomic64_add_return(int i, atomic64_t * v)
126 {
127         return __CSG_LOOP(v, i, "agr");
128 }
129 static __inline__ long long atomic64_add_negative(int i, atomic64_t * v)
130 {
131         return __CSG_LOOP(v, i, "agr") < 0;
132 }
133 static __inline__ void atomic64_sub(int i, atomic64_t * v)
134 {
135                __CSG_LOOP(v, i, "sgr");
136 }
137 static __inline__ void atomic64_inc(volatile atomic64_t * v)
138 {
139                __CSG_LOOP(v, 1, "agr");
140 }
141 static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
142 {
143         return __CSG_LOOP(v, 1, "agr");
144 }
145 static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
146 {
147         return __CSG_LOOP(v, 1, "agr") != 0;
148 }
149 static __inline__ void atomic64_dec(volatile atomic64_t * v)
150 {
151                __CSG_LOOP(v, 1, "sgr");
152 }
153 static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
154 {
155         return __CSG_LOOP(v, 1, "sgr");
156 }
157 static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
158 {
159         return __CSG_LOOP(v, 1, "sgr") == 0;
160 }
161 static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
162 {
163                __CSG_LOOP(v, ~mask, "ngr");
164 }
165 static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
166 {
167                __CSG_LOOP(v, mask, "ogr");
168 }
169
170 #undef __CSG_LOOP
171 #endif
172
173 /*
174   returns 0  if expected_oldval==value in *v ( swap was successful )
175   returns 1  if unsuccessful.
176
177   This is non-portable, use bitops or spinlocks instead!
178 */
179 static __inline__ int
180 atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
181 {
182         int retval;
183
184         __asm__ __volatile__(
185                 "  lr   %0,%3\n"
186                 "  cs   %0,%4,0(%2)\n"
187                 "  ipm  %0\n"
188                 "  srl  %0,28\n"
189                 "0:"
190                 : "=&d" (retval), "=m" (v->counter)
191                 : "a" (v), "d" (expected_oldval) , "d" (new_val),
192                   "m" (v->counter) : "cc", "memory" );
193         return retval;
194 }
195
196 #define smp_mb__before_atomic_dec()     smp_mb()
197 #define smp_mb__after_atomic_dec()      smp_mb()
198 #define smp_mb__before_atomic_inc()     smp_mb()
199 #define smp_mb__after_atomic_inc()      smp_mb()
200
201 #endif /* __KERNEL__ */
202 #endif /* __ARCH_S390_ATOMIC__  */