X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Flocal.h;h=e769e620022530c751a244f1b83e301d683d1d2a;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=3e72c41727c53af2f7ea75666aab6da56be79a5f;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index 3e72c4172..e769e6200 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h @@ -5,7 +5,7 @@ typedef struct { - volatile unsigned int counter; + volatile long counter; } local_t; #define LOCAL_INIT(i) { (i) } @@ -13,34 +13,34 @@ typedef struct #define local_read(v) ((v)->counter) #define local_set(v,i) (((v)->counter) = (i)) -static __inline__ void local_inc(local_t *v) +static inline void local_inc(local_t *v) { __asm__ __volatile__( - "incl %0" + "incq %0" :"=m" (v->counter) :"m" (v->counter)); } -static __inline__ void local_dec(local_t *v) +static inline void local_dec(local_t *v) { __asm__ __volatile__( - "decl %0" + "decq %0" :"=m" (v->counter) :"m" (v->counter)); } -static __inline__ void local_add(unsigned int i, local_t *v) +static inline void local_add(long i, local_t *v) { __asm__ __volatile__( - "addl %1,%0" + "addq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } -static __inline__ void local_sub(unsigned int i, local_t *v) +static inline void local_sub(long i, local_t *v) { __asm__ __volatile__( - "subl %1,%0" + "subq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -59,12 +59,26 @@ static __inline__ void local_sub(unsigned int i, local_t *v) * This could be done better if we moved the per cpu data directly * after GS. */ -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(v) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (v); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(v) \ + ({ preempt_disable(); \ + v; \ + preempt_enable(); }) \ + +#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) +#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) +#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) +#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) +#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) +#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v)