X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-parisc%2Fatomic.h;h=e24f7579adb0d70dd4024f81883d2a4ada961c9a;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=4b04759ec6770e8efabc3975015500b2d41517ab;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index 4b04759ec..e24f7579a 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h @@ -14,10 +14,9 @@ */ #ifdef CONFIG_SMP +#include #include /* we use L1_CACHE_BYTES */ -typedef spinlock_t atomic_lock_t; - /* Use an array of spinlocks for our atomic_ts. * Hash function to index into a different SPINLOCK. * Since "a" is usually an address, use one spinlock per cacheline. @@ -25,44 +24,35 @@ typedef spinlock_t atomic_lock_t; # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern atomic_lock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; -static inline void atomic_spin_lock(atomic_lock_t *a) -{ - while (__ldcw(a) == 0) - while (a->lock[0] == 0); -} +/* Can't use _raw_spin_lock_irq because of #include problems, so + * this is the substitute */ +#define _atomic_spin_lock_irqsave(l,f) do { \ + spinlock_t *s = ATOMIC_HASH(l); \ + local_irq_save(f); \ + _raw_spin_lock(s); \ +} while(0) + +#define _atomic_spin_unlock_irqrestore(l,f) do { \ + spinlock_t *s = ATOMIC_HASH(l); \ + _raw_spin_unlock(s); \ + local_irq_restore(f); \ +} while(0) -static inline void atomic_spin_unlock(atomic_lock_t *a) -{ - a->lock[0] = 1; -} #else -# define ATOMIC_HASH_SIZE 1 -# define ATOMIC_HASH(a) (0) -# define atomic_spin_lock(x) (void)(x) -# define atomic_spin_unlock(x) do { } while(0) +# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) +# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) #endif -/* copied from and modified */ -#define atomic_spin_lock_irqsave(lock, flags) do { \ - local_irq_save(flags); \ - atomic_spin_lock(lock); \ -} while (0) - -#define atomic_spin_unlock_irqrestore(lock, flags) do { \ - atomic_spin_unlock(lock); \ - local_irq_restore(flags); \ -} while (0) - /* Note that we need not lock read accesses - aligned word writes/reads * are atomic, so a reader never sees unconsistent values. * * Cache-line alignment would conflict with, for example, linux/module.h */ -typedef struct { volatile long counter; } atomic_t; +typedef struct { volatile int counter; } atomic_t; /* This should get optimized out since it's never called. @@ -150,22 +140,22 @@ static __inline__ int __atomic_add_return(int i, atomic_t *v) { int ret; unsigned long flags; - atomic_spin_lock_irqsave(ATOMIC_HASH(v), flags); + _atomic_spin_lock_irqsave(v, flags); ret = (v->counter += i); - atomic_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + _atomic_spin_unlock_irqrestore(v, flags); return ret; } static __inline__ void atomic_set(atomic_t *v, int i) { unsigned long flags; - atomic_spin_lock_irqsave(ATOMIC_HASH(v), flags); + _atomic_spin_lock_irqsave(v, flags); v->counter = i; - atomic_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + _atomic_spin_unlock_irqrestore(v, flags); } static __inline__ int atomic_read(const atomic_t *v)