X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Flocal.h;fp=include%2Fasm-x86_64%2Flocal.h;h=3e72c41727c53af2f7ea75666aab6da56be79a5f;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=e769e620022530c751a244f1b83e301d683d1d2a;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index e769e6200..3e72c4172 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h @@ -5,7 +5,7 @@ typedef struct { - volatile long counter; + volatile unsigned int counter; } local_t; #define LOCAL_INIT(i) { (i) } @@ -13,34 +13,34 @@ typedef struct #define local_read(v) ((v)->counter) #define local_set(v,i) (((v)->counter) = (i)) -static inline void local_inc(local_t *v) +static __inline__ void local_inc(local_t *v) { __asm__ __volatile__( - "incq %0" + "incl %0" :"=m" (v->counter) :"m" (v->counter)); } -static inline void local_dec(local_t *v) +static __inline__ void local_dec(local_t *v) { __asm__ __volatile__( - "decq %0" + "decl %0" :"=m" (v->counter) :"m" (v->counter)); } -static inline void local_add(long i, local_t *v) +static __inline__ void local_add(unsigned int i, local_t *v) { __asm__ __volatile__( - "addq %1,%0" + "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } -static inline void local_sub(long i, local_t *v) +static __inline__ void local_sub(unsigned int i, local_t *v) { __asm__ __volatile__( - "subq %1,%0" + "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -59,26 +59,12 @@ static inline void local_sub(long i, local_t *v) * This could be done better if we moved the per cpu data directly * after GS. */ - -/* Need to disable preemption for the cpu local counters otherwise we could - still access a variable of a previous CPU in a non atomic way. */ -#define cpu_local_wrap_v(v) \ - ({ local_t res__; \ - preempt_disable(); \ - res__ = (v); \ - preempt_enable(); \ - res__; }) -#define cpu_local_wrap(v) \ - ({ preempt_disable(); \ - v; \ - preempt_enable(); }) \ - -#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) -#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) -#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) -#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) -#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) -#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) +#define cpu_local_read(v) local_read(&__get_cpu_var(v)) +#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) +#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) +#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) +#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) +#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v)