1 #ifndef _ASM_PARISC_ATOMIC_H_
2 #define _ASM_PARISC_ATOMIC_H_
4 #include <linux/config.h>
5 #include <asm/system.h>
7 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
13 * And probably incredibly slow on parisc. OTOH, we don't
14 * have to write any serious assembly. prumpf
18 /* Use an array of spinlocks for our atomic_ts.
19 ** Hash function to index into a different SPINLOCK.
20 ** Since "a" is usually an address, ">>8" makes one spinlock per 64-bytes.
22 # define ATOMIC_HASH_SIZE 4
23 # define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long) a)>>8)&(ATOMIC_HASH_SIZE-1)])
25 extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
26 /* copied from <asm/spinlock.h> and modified */
27 # define SPIN_LOCK(x) \
28 do { while(__ldcw(&(x)->lock) == 0); } while(0)
30 # define SPIN_UNLOCK(x) \
31 do { (x)->lock = 1; } while(0)
33 # define ATOMIC_HASH_SIZE 1
34 # define ATOMIC_HASH(a) (0)
36 /* copied from <linux/spinlock.h> and modified */
37 # define SPIN_LOCK(x) (void)(x)
39 # define SPIN_UNLOCK(x) do { } while(0)
42 /* copied from <linux/spinlock.h> and modified */
43 #define SPIN_LOCK_IRQSAVE(lock, flags) do { local_irq_save(flags); SPIN_LOCK(lock); } while (0)
44 #define SPIN_UNLOCK_IRQRESTORE(lock, flags) do { SPIN_UNLOCK(lock); local_irq_restore(flags); } while (0)
46 /* Note that we need not lock read accesses - aligned word writes/reads
47 * are atomic, so a reader never sees unconsistent values.
49 * Cache-line alignment would conflict with, for example, linux/module.h
52 typedef struct { volatile long counter; } atomic_t;
55 /* This should get optimized out since it's never called.
56 ** Or get a link error if xchg is used "wrong".
58 extern void __xchg_called_with_bad_pointer(void);
61 /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
62 extern unsigned long __xchg8(char, char *);
63 extern unsigned long __xchg32(int, int *);
65 extern unsigned long __xchg64(unsigned long, unsigned long *);
68 /* optimizer better get rid of switch since size is a constant */
69 static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
75 case 8: return __xchg64(x,(unsigned long *) ptr);
77 case 4: return __xchg32((int) x, (int *) ptr);
78 case 1: return __xchg8((char) x, (char *) ptr);
80 __xchg_called_with_bad_pointer();
86 ** REVISIT - Abandoned use of LDCW in xchg() for now:
87 ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
88 ** o and while we are at it, could __LP64__ code use LDCD too?
90 ** if (__builtin_constant_p(x) && (x == NULL))
91 ** if (((unsigned long)p & 0xf) == 0)
95 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
98 #define __HAVE_ARCH_CMPXCHG 1
100 /* bug catcher for when unsupported size is used - won't link */
101 extern void __cmpxchg_called_with_bad_pointer(void);
103 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
104 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
105 extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
107 /* don't worry...optimizer will get rid of most of this */
108 static __inline__ unsigned long
109 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
113 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
115 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
117 __cmpxchg_called_with_bad_pointer();
121 #define cmpxchg(ptr,o,n) \
123 __typeof__(*(ptr)) _o_ = (o); \
124 __typeof__(*(ptr)) _n_ = (n); \
125 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
126 (unsigned long)_n_, sizeof(*(ptr))); \
131 /* It's possible to reduce all atomic operations to either
132 * __atomic_add_return, atomic_set and atomic_read (the latter
133 * is there only for consistency).
136 static __inline__ int __atomic_add_return(int i, atomic_t *v)
140 SPIN_LOCK_IRQSAVE(ATOMIC_HASH(v), flags);
142 ret = (v->counter += i);
144 SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
148 static __inline__ void atomic_set(atomic_t *v, int i)
151 SPIN_LOCK_IRQSAVE(ATOMIC_HASH(v), flags);
155 SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
158 static __inline__ int atomic_read(const atomic_t *v)
163 /* exported interface */
165 #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
166 #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
167 #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
168 #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
170 #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v)))
171 #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v)))
172 #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
173 #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
175 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
177 #define ATOMIC_INIT(i) { (i) }
179 #define smp_mb__before_atomic_dec() smp_mb()
180 #define smp_mb__after_atomic_dec() smp_mb()
181 #define smp_mb__before_atomic_inc() smp_mb()
182 #define smp_mb__after_atomic_inc() smp_mb()