-/*
- * Now the non-atomic variants. We let the compiler handle all
- * optimisations for these. These are all _native_ endian.
- */
-static inline void __set_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] |= (1UL << (nr & 31));
-}
-
-static inline void __clear_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] &= ~(1UL << (nr & 31));
-}
-
-static inline void __change_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] ^= (1UL << (nr & 31));
-}
-
-static inline int __test_and_set_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval | mask;
- return oldval & mask;
-}
-
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval & ~mask;
- return oldval & mask;
-}
-
-static inline int __test_and_change_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval ^ mask;
- return oldval & mask;
-}
-
-/*
- * This routine doesn't need to be atomic.
- */
-static inline int __test_bit(int nr, const volatile unsigned long * p)
-{
- return (p[nr >> 5] >> (nr & 31)) & 1UL;
-}