X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fpercpu_counter.h;h=682525511c9e90d352f2d46730932e9711ce457e;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=f5aa593ccf32fe358da38271e4a25d0653cfd8a0;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index f5aa593cc..682525511 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -6,18 +6,18 @@ * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. */ +#include #include #include #include #include -#include #ifdef CONFIG_SMP struct percpu_counter { spinlock_t lock; - s64 count; - s32 *counters; + long count; + long *counters; }; #if NR_CPUS >= 16 @@ -26,11 +26,11 @@ struct percpu_counter { #define FBC_BATCH (NR_CPUS*4) #endif -static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount) +static inline void percpu_counter_init(struct percpu_counter *fbc) { spin_lock_init(&fbc->lock); - fbc->count = amount; - fbc->counters = alloc_percpu(s32); + fbc->count = 0; + fbc->counters = alloc_percpu(long); } static inline void percpu_counter_destroy(struct percpu_counter *fbc) @@ -38,10 +38,10 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc) free_percpu(fbc->counters); } -void percpu_counter_mod(struct percpu_counter *fbc, s32 amount); -s64 percpu_counter_sum(struct percpu_counter *fbc); +void percpu_counter_mod(struct percpu_counter *fbc, long amount); +long percpu_counter_sum(struct percpu_counter *fbc); -static inline s64 percpu_counter_read(struct percpu_counter *fbc) +static inline long percpu_counter_read(struct percpu_counter *fbc) { return fbc->count; } @@ -49,14 +49,13 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) /* * It is possible for the percpu_counter_read() to return a small negative * number for some counter which should never be negative. - * */ -static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) +static inline long percpu_counter_read_positive(struct percpu_counter *fbc) { - s64 ret = fbc->count; + long ret = fbc->count; barrier(); /* Prevent reloads of fbc->count */ - if (ret >= 0) + if (ret > 0) return ret; return 1; } @@ -64,12 +63,12 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) #else struct percpu_counter { - s64 count; + long count; }; -static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount) +static inline void percpu_counter_init(struct percpu_counter *fbc) { - fbc->count = amount; + fbc->count = 0; } static inline void percpu_counter_destroy(struct percpu_counter *fbc) @@ -77,24 +76,24 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc) } static inline void -percpu_counter_mod(struct percpu_counter *fbc, s32 amount) +percpu_counter_mod(struct percpu_counter *fbc, long amount) { preempt_disable(); fbc->count += amount; preempt_enable(); } -static inline s64 percpu_counter_read(struct percpu_counter *fbc) +static inline long percpu_counter_read(struct percpu_counter *fbc) { return fbc->count; } -static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) +static inline long percpu_counter_read_positive(struct percpu_counter *fbc) { return fbc->count; } -static inline s64 percpu_counter_sum(struct percpu_counter *fbc) +static inline long percpu_counter_sum(struct percpu_counter *fbc) { return percpu_counter_read_positive(fbc); }