1 #ifndef _ASM_PARISC_ATOMIC_H_
2 #define _ASM_PARISC_ATOMIC_H_
4 #include <linux/config.h>
5 #include <asm/system.h>
6 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
12 * And probably incredibly slow on parisc. OTOH, we don't
13 * have to write any serious assembly. prumpf
17 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
19 typedef spinlock_t atomic_lock_t;
21 /* Use an array of spinlocks for our atomic_ts.
22 * Hash function to index into a different SPINLOCK.
23 * Since "a" is usually an address, use one spinlock per cacheline.
25 # define ATOMIC_HASH_SIZE 4
26 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
28 extern atomic_lock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
30 static inline void atomic_spin_lock(atomic_lock_t *a)
32 while (__ldcw(a) == 0)
33 while (a->lock[0] == 0);
36 static inline void atomic_spin_unlock(atomic_lock_t *a)
42 # define ATOMIC_HASH_SIZE 1
43 # define ATOMIC_HASH(a) (0)
44 # define atomic_spin_lock(x) (void)(x)
45 # define atomic_spin_unlock(x) do { } while(0)
48 /* copied from <linux/spinlock.h> and modified */
49 #define atomic_spin_lock_irqsave(lock, flags) do { \
50 local_irq_save(flags); \
51 atomic_spin_lock(lock); \
54 #define atomic_spin_unlock_irqrestore(lock, flags) do { \
55 atomic_spin_unlock(lock); \
56 local_irq_restore(flags); \
59 /* Note that we need not lock read accesses - aligned word writes/reads
60 * are atomic, so a reader never sees unconsistent values.
62 * Cache-line alignment would conflict with, for example, linux/module.h
65 typedef struct { volatile long counter; } atomic_t;
68 /* This should get optimized out since it's never called.
69 ** Or get a link error if xchg is used "wrong".
71 extern void __xchg_called_with_bad_pointer(void);
74 /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
75 extern unsigned long __xchg8(char, char *);
76 extern unsigned long __xchg32(int, int *);
78 extern unsigned long __xchg64(unsigned long, unsigned long *);
81 /* optimizer better get rid of switch since size is a constant */
82 static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
88 case 8: return __xchg64(x,(unsigned long *) ptr);
90 case 4: return __xchg32((int) x, (int *) ptr);
91 case 1: return __xchg8((char) x, (char *) ptr);
93 __xchg_called_with_bad_pointer();
99 ** REVISIT - Abandoned use of LDCW in xchg() for now:
100 ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
101 ** o and while we are at it, could __LP64__ code use LDCD too?
103 ** if (__builtin_constant_p(x) && (x == NULL))
104 ** if (((unsigned long)p & 0xf) == 0)
107 #define xchg(ptr,x) \
108 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
111 #define __HAVE_ARCH_CMPXCHG 1
113 /* bug catcher for when unsupported size is used - won't link */
114 extern void __cmpxchg_called_with_bad_pointer(void);
116 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
117 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
118 extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
120 /* don't worry...optimizer will get rid of most of this */
121 static __inline__ unsigned long
122 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
126 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
128 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
130 __cmpxchg_called_with_bad_pointer();
134 #define cmpxchg(ptr,o,n) \
136 __typeof__(*(ptr)) _o_ = (o); \
137 __typeof__(*(ptr)) _n_ = (n); \
138 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
139 (unsigned long)_n_, sizeof(*(ptr))); \
144 /* It's possible to reduce all atomic operations to either
145 * __atomic_add_return, atomic_set and atomic_read (the latter
146 * is there only for consistency).
149 static __inline__ int __atomic_add_return(int i, atomic_t *v)
153 atomic_spin_lock_irqsave(ATOMIC_HASH(v), flags);
155 ret = (v->counter += i);
157 atomic_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
161 static __inline__ void atomic_set(atomic_t *v, int i)
164 atomic_spin_lock_irqsave(ATOMIC_HASH(v), flags);
168 atomic_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
171 static __inline__ int atomic_read(const atomic_t *v)
176 /* exported interface */
178 #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
179 #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
180 #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
181 #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
183 #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v)))
184 #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v)))
185 #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
186 #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
188 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
191 * atomic_inc_and_test - increment and test
192 * @v: pointer of type atomic_t
194 * Atomically increments @v by 1
195 * and returns true if the result is zero, or false for all
198 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
200 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
202 #define ATOMIC_INIT(i) { (i) }
204 #define smp_mb__before_atomic_dec() smp_mb()
205 #define smp_mb__after_atomic_dec() smp_mb()
206 #define smp_mb__before_atomic_inc() smp_mb()
207 #define smp_mb__after_atomic_inc() smp_mb()