X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm26%2Fbitops.h;fp=include%2Fasm-arm26%2Fbitops.h;h=d87f8634e62525d1a67bf33014f5a299ab340eb5;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=19a69573a6549955b94e3f02a3488da5bd336c20;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/include/asm-arm26/bitops.h b/include/asm-arm26/bitops.h index 19a69573a..d87f8634e 100644 --- a/include/asm-arm26/bitops.h +++ b/include/asm-arm26/bitops.h @@ -117,7 +117,65 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) return res & mask; } -#include +/* + * Now the non-atomic variants. We let the compiler handle all + * optimisations for these. These are all _native_ endian. + */ +static inline void __set_bit(int nr, volatile unsigned long *p) +{ + p[nr >> 5] |= (1UL << (nr & 31)); +} + +static inline void __clear_bit(int nr, volatile unsigned long *p) +{ + p[nr >> 5] &= ~(1UL << (nr & 31)); +} + +static inline void __change_bit(int nr, volatile unsigned long *p) +{ + p[nr >> 5] ^= (1UL << (nr & 31)); +} + +static inline int __test_and_set_bit(int nr, volatile unsigned long *p) +{ + unsigned long oldval, mask = 1UL << (nr & 31); + + p += nr >> 5; + + oldval = *p; + *p = oldval | mask; + return oldval & mask; +} + +static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) +{ + unsigned long oldval, mask = 1UL << (nr & 31); + + p += nr >> 5; + + oldval = *p; + *p = oldval & ~mask; + return oldval & mask; +} + +static inline int __test_and_change_bit(int nr, volatile unsigned long *p) +{ + unsigned long oldval, mask = 1UL << (nr & 31); + + p += nr >> 5; + + oldval = *p; + *p = oldval ^ mask; + return oldval & mask; +} + +/* + * This routine doesn't need to be atomic. + */ +static inline int __test_bit(int nr, const volatile unsigned long * p) +{ + return (p[nr >> 5] >> (nr & 31)) & 1UL; +} /* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. @@ -153,6 +211,7 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) +#define test_bit(nr,p) __test_bit(nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_le(p,sz) @@ -160,13 +219,80 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); #define WORD_BITOFF_TO_LE(x) ((x)) -#include -#include -#include -#include -#include -#include -#include +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + */ +static inline unsigned long ffz(unsigned long word) +{ + int k; + + word = ~word; + k = 31; + if (word & 0x0000ffff) { k -= 16; word <<= 16; } + if (word & 0x00ff0000) { k -= 8; word <<= 8; } + if (word & 0x0f000000) { k -= 4; word <<= 4; } + if (word & 0x30000000) { k -= 2; word <<= 2; } + if (word & 0x40000000) { k -= 1; } + return k; +} + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + */ +static inline unsigned long __ffs(unsigned long word) +{ + int k; + + k = 31; + if (word & 0x0000ffff) { k -= 16; word <<= 16; } + if (word & 0x00ff0000) { k -= 8; word <<= 8; } + if (word & 0x0f000000) { k -= 4; word <<= 4; } + if (word & 0x30000000) { k -= 2; word <<= 2; } + if (word & 0x40000000) { k -= 1; } + return k; +} + +/* + * fls: find last bit set. + */ + +#define fls(x) generic_fls(x) +#define fls64(x) generic_fls64(x) + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ + +#define ffs(x) generic_ffs(x) + +/* + * Find first bit set in a 168-bit bitmap, where the first + * 128 bits are unlikely to be set. + */ +static inline int sched_find_first_bit(unsigned long *b) +{ + unsigned long v; + unsigned int off; + + for (off = 0; v = b[off], off < 4; off++) { + if (unlikely(v)) + break; + } + return __ffs(v) + off * 32; +} + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ + +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) /* * Ext2 is defined to use little-endian byte ordering. @@ -181,7 +307,7 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); #define ext2_clear_bit_atomic(lock,nr,p) \ test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_test_bit(nr,p) \ - test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_find_first_zero_bit(p,sz) \ _find_first_zero_bit_le(p,sz) #define ext2_find_next_zero_bit(p,sz,off) \ @@ -194,7 +320,7 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); #define minix_set_bit(nr,p) \ __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_bit(nr,p) \ - test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_set_bit(nr,p) \ __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_clear_bit(nr,p) \