#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H
+#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/bug.h>
#ifdef __KERNEL__
-#include <linux/irqflags.h>
+#include <asm/interrupt.h>
#include <asm/sgidefs.h>
#include <asm/war.h>
}
}
+/*
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
+{
+ unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+
+ *m |= 1UL << (nr & SZLONG_MASK);
+}
+
/*
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
}
}
+/*
+ * __clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
+{
+ unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+
+ *m &= ~(1UL << (nr & SZLONG_MASK));
+}
+
/*
* change_bit - Toggle a bit in memory
* @nr: Bit to change
}
}
+/*
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
+{
+ unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+
+ *m ^= 1UL << (nr & SZLONG_MASK);
+}
+
/*
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
}
}
+/*
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ int retval;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << (nr & SZLONG_MASK);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+
+ return retval;
+}
+
/*
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
}
}
+/*
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_clear_bit(unsigned long nr,
+ volatile unsigned long * addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ int retval;
+
+ a += (nr >> SZLONG_LOG);
+ mask = 1UL << (nr & SZLONG_MASK);
+ retval = ((mask & *a) != 0);
+ *a &= ~mask;
+
+ return retval;
+}
+
/*
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
}
}
+/*
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ int retval;
+
+ a += (nr >> SZLONG_LOG);
+ mask = 1UL << (nr & SZLONG_MASK);
+ retval = ((mask & *a) != 0);
+ *a ^= mask;
+
+ return retval;
+}
+
#undef __bi_flags
#undef __bi_local_irq_save
#undef __bi_local_irq_restore
-#include <asm-generic/bitops/non-atomic.h>
+/*
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
+}
/*
* Return the bit position (0..63) of the most significant 1 bit in a word
return 63 - lz;
}
-#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
-
/*
* __ffs - find first bit in word.
* @word: The word to search
*/
static inline unsigned long __ffs(unsigned long word)
{
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
return __ilog2(word & -word);
+#else
+ int b = 0, s;
+
+#ifdef CONFIG_32BIT
+ s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
+ s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
+ s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
+ s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
+ s = 1; if (word << 31 != 0) s = 0; b += s;
+
+ return b;
+#endif
+#ifdef CONFIG_64BIT
+ s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
+ s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
+ s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
+ s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
+ s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
+ s = 1; if (word << 63 != 0) s = 0; b += s;
+
+ return b;
+#endif
+#endif
+}
+
+/*
+ * ffs - find first bit set.
+ * @word: The word to search
+ *
+ * Returns 1..SZLONG
+ * Returns 0 if no bit exists
+ */
+
+static inline unsigned long ffs(unsigned long word)
+{
+ if (!word)
+ return 0;
+
+ return __ffs(word) + 1;
+}
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+ return __ffs (~word);
}
/*
* fls - find last bit set.
* @word: The word to search
*
- * This is defined the same way as ffs.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ * Returns 1..SZLONG
+ * Returns 0 if no bit exists
*/
-static inline int fls(int word)
+static inline unsigned long fls(unsigned long word)
{
- __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
+#ifdef CONFIG_32BIT
+#ifdef CONFIG_CPU_MIPS32
+ __asm__ (
+ " .set mips32 \n"
+ " clz %0, %1 \n"
+ " .set mips0 \n"
+ : "=r" (word)
+ : "r" (word));
return 32 - word;
+#else
+ {
+ int r = 32, s;
+
+ if (word == 0)
+ return 0;
+
+ s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
+ s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
+ s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
+ s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s;
+ s = 1; if ((word & 0x80000000)) s = 0; r -= s;
+
+ return r;
+ }
+#endif
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+#ifdef CONFIG_CPU_MIPS64
+
+ __asm__ (
+ " .set mips64 \n"
+ " dclz %0, %1 \n"
+ " .set mips0 \n"
+ : "=r" (word)
+ : "r" (word));
+
+ return 64 - word;
+#else
+ {
+ int r = 64, s;
+
+ if (word == 0)
+ return 0;
+
+ s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
+ s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
+ s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
+ s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s;
+ s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s;
+ s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
+
+ return r;
+ }
+#endif
+#endif /* CONFIG_64BIT */
}
-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
-static inline int fls64(__u64 word)
+#define fls64(x) generic_fls64(x)
+
+/*
+ * find_next_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static inline unsigned long find_next_zero_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
- __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
+ const unsigned long *p = addr + (offset >> SZLONG_LOG);
+ unsigned long result = offset & ~SZLONG_MASK;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= SZLONG_MASK;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (_MIPS_SZLONG-offset);
+ if (size < _MIPS_SZLONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= _MIPS_SZLONG;
+ result += _MIPS_SZLONG;
+ }
+ while (size & ~SZLONG_MASK) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += _MIPS_SZLONG;
+ size -= _MIPS_SZLONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffz(tmp);
+}
- return 64 - word;
+#define find_first_zero_bit(addr, size) \
+ find_next_zero_bit((addr), (size), 0)
+
+/*
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static inline unsigned long find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ const unsigned long *p = addr + (offset >> SZLONG_LOG);
+ unsigned long result = offset & ~SZLONG_MASK;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= SZLONG_MASK;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= ~0UL << offset;
+ if (size < _MIPS_SZLONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= _MIPS_SZLONG;
+ result += _MIPS_SZLONG;
+ }
+ while (size & ~SZLONG_MASK) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += _MIPS_SZLONG;
+ size -= _MIPS_SZLONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= ~0UL >> (_MIPS_SZLONG - size);
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
}
-#else
-#include <asm-generic/bitops/fls64.h>
+
+/*
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+#define find_first_bit(addr, size) \
+ find_next_bit((addr), (size), 0)
+
+#ifdef __KERNEL__
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+#ifdef CONFIG_32BIT
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 32;
+ if (unlikely(b[2]))
+ return __ffs(b[2]) + 64;
+ if (b[3])
+ return __ffs(b[3]) + 96;
+ return __ffs(b[4]) + 128;
#endif
+#ifdef CONFIG_64BIT
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 64;
+ return __ffs(b[2]) + 128;
+#endif
+}
/*
- * ffs - find first bit set.
- * @word: The word to search
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
*
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * The Hamming Weight of a number is the total number of bits set in it.
*/
-static inline int ffs(int word)
+
+#define hweight64(x) generic_hweight64(x)
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
{
- if (!word)
- return 0;
+ unsigned char *ADDR = (unsigned char *) addr;
+ int mask, retval;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ retval = (mask & *ADDR) != 0;
+ *ADDR |= mask;
- return fls(word & -word);
+ return retval;
}
-#else
+static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
+{
+ unsigned char *ADDR = (unsigned char *) addr;
+ int mask, retval;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ retval = (mask & *ADDR) != 0;
+ *ADDR &= ~mask;
-#include <asm-generic/bitops/__ffs.h>
-#include <asm-generic/bitops/ffs.h>
-#include <asm-generic/bitops/fls.h>
-#include <asm-generic/bitops/fls64.h>
+ return retval;
+}
-#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
+static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
+{
+ const unsigned char *ADDR = (const unsigned char *) addr;
+ int mask;
-#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/find.h>
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
-#ifdef __KERNEL__
+ return ((mask & *ADDR) != 0);
+}
+
+static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
+ unsigned long result = offset & ~SZLONG_MASK;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= SZLONG_MASK;
+ if (offset) {
+ tmp = cpu_to_lelongp(p++);
+ tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
+ if (size < _MIPS_SZLONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= _MIPS_SZLONG;
+ result += _MIPS_SZLONG;
+ }
+ while (size & ~SZLONG_MASK) {
+ if (~(tmp = cpu_to_lelongp(p++)))
+ goto found_middle;
+ result += _MIPS_SZLONG;
+ size -= _MIPS_SZLONG;
+ }
+ if (!size)
+ return result;
+ tmp = cpu_to_lelongp(p);
+
+found_first:
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+
+found_middle:
+ return result + ffz(tmp);
+}
+
+#define find_first_zero_le_bit(addr, size) \
+ find_next_zero_le_bit((addr), (size), 0)
+
+#define ext2_set_bit(nr,addr) \
+ __test_and_set_le_bit((nr),(unsigned long*)addr)
+#define ext2_clear_bit(nr, addr) \
+ __test_and_clear_le_bit((nr),(unsigned long*)addr)
+ #define ext2_set_bit_atomic(lock, nr, addr) \
+({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_set_bit((nr), (addr)); \
+ spin_unlock(lock); \
+ ret; \
+})
+
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_clear_bit((nr), (addr)); \
+ spin_unlock(lock); \
+ ret; \
+})
+#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
+#define ext2_find_first_zero_bit(addr, size) \
+ find_first_zero_le_bit((unsigned long*)addr, size)
+#define ext2_find_next_zero_bit(addr, size, off) \
+ find_next_zero_le_bit((unsigned long*)addr, size, off)
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/ext2-non-atomic.h>
-#include <asm-generic/bitops/ext2-atomic.h>
-#include <asm-generic/bitops/minix.h>
+/*
+ * Bitmap functions for the minix filesystem.
+ *
+ * FIXME: These assume that Minix uses the native byte/bitorder.
+ * This limits the Minix filesystem's value for data exchange very much.
+ */
+#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
#endif /* __KERNEL__ */