#include <linux/config.h>
#include <linux/compiler.h>
+#include <asm/assembler.h>
#include <asm/system.h>
#include <asm/byteorder.h>
#include <asm/types.h>
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
-#undef LOAD
-#undef STORE
-#ifdef CONFIG_SMP
-#define LOAD "lock"
-#define STORE "unlock"
-#else
-#define LOAD "ld"
-#define STORE "st"
-#endif
-
-/* #define ADDR (*(volatile long *) addr) */
-
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(int nr, volatile void * addr)
+static __inline__ void set_bit(int nr, volatile void * addr)
{
__u32 mask;
volatile __u32 *a = addr;
local_irq_save(flags);
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "r6", "%1")
- LOAD" %0, @%1; \n\t"
+ M32R_LOCK" %0, @%1; \n\t"
"or %0, %2; \n\t"
- STORE" %0, @%1; \n\t"
+ M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp)
: "r" (a), "r" (mask)
: "memory"
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __set_bit(int nr, volatile void * addr)
+static __inline__ void __set_bit(int nr, volatile void * addr)
{
__u32 mask;
volatile __u32 *a = addr;
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static inline void clear_bit(int nr, volatile void * addr)
+static __inline__ void clear_bit(int nr, volatile void * addr)
{
__u32 mask;
volatile __u32 *a = addr;
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "r6", "%1")
- LOAD" %0, @%1; \n\t"
+ M32R_LOCK" %0, @%1; \n\t"
"and %0, %2; \n\t"
- STORE" %0, @%1; \n\t"
+ M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp)
: "r" (a), "r" (~mask)
: "memory"
local_irq_restore(flags);
}
-static inline void __clear_bit(int nr, volatile unsigned long * addr)
+static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask;
volatile unsigned long *a = addr;
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile void * addr)
+static __inline__ void __change_bit(int nr, volatile void * addr)
{
__u32 mask;
volatile __u32 *a = addr;
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(int nr, volatile void * addr)
+static __inline__ void change_bit(int nr, volatile void * addr)
{
__u32 mask;
volatile __u32 *a = addr;
local_irq_save(flags);
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "r6", "%1")
- LOAD" %0, @%1; \n\t"
+ M32R_LOCK" %0, @%1; \n\t"
"xor %0, %2; \n\t"
- STORE" %0, @%1; \n\t"
+ M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp)
: "r" (a), "r" (mask)
: "memory"
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_set_bit(int nr, volatile void * addr)
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
{
__u32 mask, oldbit;
volatile __u32 *a = addr;
local_irq_save(flags);
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "%1", "%2")
- LOAD" %0, @%2; \n\t"
+ M32R_LOCK" %0, @%2; \n\t"
"mv %1, %0; \n\t"
"and %0, %3; \n\t"
"or %1, %3; \n\t"
- STORE" %1, @%2; \n\t"
+ M32R_UNLOCK" %1, @%2; \n\t"
: "=&r" (oldbit), "=&r" (tmp)
: "r" (a), "r" (mask)
: "memory"
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile void * addr)
+static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
{
__u32 mask, oldbit;
volatile __u32 *a = addr;
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_clear_bit(int nr, volatile void * addr)
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{
__u32 mask, oldbit;
volatile __u32 *a = addr;
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "%1", "%3")
- LOAD" %0, @%3; \n\t"
- "mv %1, %0; \n\t"
- "and %0, %2; \n\t"
- "not %2, %2; \n\t"
- "and %1, %2; \n\t"
- STORE" %1, @%3; \n\t"
+ M32R_LOCK" %0, @%3; \n\t"
+ "mv %1, %0; \n\t"
+ "and %0, %2; \n\t"
+ "not %2, %2; \n\t"
+ "and %1, %2; \n\t"
+ M32R_UNLOCK" %1, @%3; \n\t"
: "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
: "r" (a)
: "memory"
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_clear_bit(int nr, volatile void * addr)
+static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
{
__u32 mask, oldbit;
volatile __u32 *a = addr;
}
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr, volatile void * addr)
+static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
{
__u32 mask, oldbit;
volatile __u32 *a = addr;
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_change_bit(int nr, volatile void * addr)
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
{
__u32 mask, oldbit;
volatile __u32 *a = addr;
local_irq_save(flags);
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "%1", "%2")
- LOAD" %0, @%2; \n\t"
+ M32R_LOCK" %0, @%2; \n\t"
"mv %1, %0; \n\t"
"and %0, %3; \n\t"
"xor %1, %3; \n\t"
- STORE" %1, @%2; \n\t"
+ M32R_UNLOCK" %1, @%2; \n\t"
: "=&r" (oldbit), "=&r" (tmp)
: "r" (a), "r" (mask)
: "memory"
return (oldbit != 0);
}
-#if 0 /* Fool kernel-doc since it doesn't do macros yet */
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static int test_bit(int nr, const volatile void * addr);
-#endif
-
-static inline int test_bit(int nr, const volatile void * addr)
+static __inline__ int test_bit(int nr, const volatile void * addr)
{
__u32 mask;
const volatile __u32 *a = addr;
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
-static inline unsigned long ffz(unsigned long word)
+static __inline__ unsigned long ffz(unsigned long word)
{
int k;
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*/
-static inline int find_next_zero_bit(void *addr, int size, int offset)
+static __inline__ int find_next_zero_bit(const unsigned long *addr,
+ int size, int offset)
{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+ const unsigned long *p = addr + (offset >> 5);
unsigned long result = offset & ~31UL;
unsigned long tmp;
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static inline unsigned long __ffs(unsigned long word)
+static __inline__ unsigned long __ffs(unsigned long word)
{
int k = 0;
* fls: find last bit set.
*/
#define fls(x) generic_fls(x)
+#define fls64(x) generic_fls64(x)
#ifdef __KERNEL__