X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-m32r%2Fbitops.h;h=e78443981349df080c2812d82927429f10aa2d85;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=250057554dde43a3cc3869d5a1d94ab462c73446;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h index 250057554..e78443981 100644 --- a/include/asm-m32r/bitops.h +++ b/include/asm-m32r/bitops.h @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -25,18 +26,6 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#undef LOAD -#undef STORE -#ifdef CONFIG_SMP -#define LOAD "lock" -#define STORE "unlock" -#else -#define LOAD "ld" -#define STORE "st" -#endif - -/* #define ADDR (*(volatile long *) addr) */ - /** * set_bit - Atomically set a bit in memory * @nr: the bit to set @@ -47,7 +36,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(int nr, volatile void * addr) +static __inline__ void set_bit(int nr, volatile void * addr) { __u32 mask; volatile __u32 *a = addr; @@ -60,9 +49,9 @@ static inline void set_bit(int nr, volatile void * addr) local_irq_save(flags); __asm__ __volatile__ ( DCACHE_CLEAR("%0", "r6", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "or %0, %2; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (tmp) : "r" (a), "r" (mask) : "memory" @@ -82,7 +71,7 @@ static inline void set_bit(int nr, volatile void * addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __set_bit(int nr, volatile void * addr) +static __inline__ void __set_bit(int nr, volatile void * addr) { __u32 mask; volatile __u32 *a = addr; @@ -102,7 +91,7 @@ static inline void __set_bit(int nr, volatile void * addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void clear_bit(int nr, volatile void * addr) +static __inline__ void clear_bit(int nr, volatile void * addr) { __u32 mask; volatile __u32 *a = addr; @@ -116,9 +105,9 @@ static inline void clear_bit(int nr, volatile void * addr) __asm__ __volatile__ ( DCACHE_CLEAR("%0", "r6", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "and %0, %2; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (tmp) : "r" (a), "r" (~mask) : "memory" @@ -129,7 +118,7 @@ static inline void clear_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static inline void __clear_bit(int nr, volatile unsigned long * addr) +static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) { unsigned long mask; volatile unsigned long *a = addr; @@ -151,7 +140,7 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile void * addr) +static __inline__ void __change_bit(int nr, volatile void * addr) { __u32 mask; volatile __u32 *a = addr; @@ -170,7 +159,7 @@ static inline void __change_bit(int nr, volatile void * addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void change_bit(int nr, volatile void * addr) +static __inline__ void change_bit(int nr, volatile void * addr) { __u32 mask; volatile __u32 *a = addr; @@ -183,9 +172,9 @@ static inline void change_bit(int nr, volatile void * addr) local_irq_save(flags); __asm__ __volatile__ ( DCACHE_CLEAR("%0", "r6", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "xor %0, %2; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (tmp) : "r" (a), "r" (mask) : "memory" @@ -204,7 +193,7 @@ static inline void change_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_set_bit(int nr, volatile void * addr) +static __inline__ int test_and_set_bit(int nr, volatile void * addr) { __u32 mask, oldbit; volatile __u32 *a = addr; @@ -217,11 +206,11 @@ static inline int test_and_set_bit(int nr, volatile void * addr) local_irq_save(flags); __asm__ __volatile__ ( DCACHE_CLEAR("%0", "%1", "%2") - LOAD" %0, @%2; \n\t" + M32R_LOCK" %0, @%2; \n\t" "mv %1, %0; \n\t" "and %0, %3; \n\t" "or %1, %3; \n\t" - STORE" %1, @%2; \n\t" + M32R_UNLOCK" %1, @%2; \n\t" : "=&r" (oldbit), "=&r" (tmp) : "r" (a), "r" (mask) : "memory" @@ -240,7 +229,7 @@ static inline int test_and_set_bit(int nr, volatile void * addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile void * addr) +static __inline__ int __test_and_set_bit(int nr, volatile void * addr) { __u32 mask, oldbit; volatile __u32 *a = addr; @@ -261,7 +250,7 @@ static inline int __test_and_set_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int test_and_clear_bit(int nr, volatile void * addr) { __u32 mask, oldbit; volatile __u32 *a = addr; @@ -275,12 +264,12 @@ static inline int test_and_clear_bit(int nr, volatile void * addr) __asm__ __volatile__ ( DCACHE_CLEAR("%0", "%1", "%3") - LOAD" %0, @%3; \n\t" - "mv %1, %0; \n\t" - "and %0, %2; \n\t" - "not %2, %2; \n\t" - "and %1, %2; \n\t" - STORE" %1, @%3; \n\t" + M32R_LOCK" %0, @%3; \n\t" + "mv %1, %0; \n\t" + "and %0, %2; \n\t" + "not %2, %2; \n\t" + "and %1, %2; \n\t" + M32R_UNLOCK" %1, @%3; \n\t" : "=&r" (oldbit), "=&r" (tmp), "+r" (mask) : "r" (a) : "memory" @@ -299,7 +288,7 @@ static inline int test_and_clear_bit(int nr, volatile void * addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) { __u32 mask, oldbit; volatile __u32 *a = addr; @@ -313,7 +302,7 @@ static inline int __test_and_clear_bit(int nr, volatile void * addr) } /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, volatile void * addr) +static __inline__ int __test_and_change_bit(int nr, volatile void * addr) { __u32 mask, oldbit; volatile __u32 *a = addr; @@ -334,7 +323,7 @@ static inline int __test_and_change_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_change_bit(int nr, volatile void * addr) +static __inline__ int test_and_change_bit(int nr, volatile void * addr) { __u32 mask, oldbit; volatile __u32 *a = addr; @@ -347,11 +336,11 @@ static inline int test_and_change_bit(int nr, volatile void * addr) local_irq_save(flags); __asm__ __volatile__ ( DCACHE_CLEAR("%0", "%1", "%2") - LOAD" %0, @%2; \n\t" + M32R_LOCK" %0, @%2; \n\t" "mv %1, %0; \n\t" "and %0, %3; \n\t" "xor %1, %3; \n\t" - STORE" %1, @%2; \n\t" + M32R_UNLOCK" %1, @%2; \n\t" : "=&r" (oldbit), "=&r" (tmp) : "r" (a), "r" (mask) : "memory" @@ -361,16 +350,12 @@ static inline int test_and_change_bit(int nr, volatile void * addr) return (oldbit != 0); } -#if 0 /* Fool kernel-doc since it doesn't do macros yet */ /** * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static int test_bit(int nr, const volatile void * addr); -#endif - -static inline int test_bit(int nr, const volatile void * addr) +static __inline__ int test_bit(int nr, const volatile void * addr) { __u32 mask; const volatile __u32 *a = addr; @@ -387,7 +372,7 @@ static inline int test_bit(int nr, const volatile void * addr) * * Undefined if no zero exists, so code should check against ~0UL first. */ -static inline unsigned long ffz(unsigned long word) +static __inline__ unsigned long ffz(unsigned long word) { int k; @@ -420,9 +405,10 @@ static inline unsigned long ffz(unsigned long word) * @offset: The bitnumber to start searching at * @size: The maximum size to search */ -static inline int find_next_zero_bit(void *addr, int size, int offset) +static __inline__ int find_next_zero_bit(const unsigned long *addr, + int size, int offset) { - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + const unsigned long *p = addr + (offset >> 5); unsigned long result = offset & ~31UL; unsigned long tmp; @@ -462,7 +448,7 @@ found_middle: * * Undefined if no bit exists, so code should check against 0 first. */ -static inline unsigned long __ffs(unsigned long word) +static __inline__ unsigned long __ffs(unsigned long word) { int k = 0;