1 #ifndef _M68KNOMMU_BITOPS_H
2 #define _M68KNOMMU_BITOPS_H
5 * Copyright 1992, Linus Torvalds.
8 #include <linux/config.h>
9 #include <linux/compiler.h>
10 #include <asm/byteorder.h> /* swab32 */
11 #include <asm/system.h> /* save_flags */
18 static inline int ffs(int x)
50 static inline int __ffs(int x)
80 * fls: find last bit set.
82 #define fls(x) generic_fls(x)
86 * Every architecture must define this function. It's the fastest
87 * way of searching a 140-bit bitmap where the first 100 bits are
88 * unlikely to be set. It's guaranteed that at least one of the 140
91 static inline int sched_find_first_bit(unsigned long *b)
96 return __ffs(b[1]) + 32;
98 return __ffs(b[2]) + 64;
100 return __ffs(b[3]) + 96;
101 return __ffs(b[4]) + 128;
105 * ffz = Find First Zero in word. Undefined if no zero exists,
106 * so code should check against ~0UL first..
108 static __inline__ unsigned long ffz(unsigned long word)
110 unsigned long result = 0;
120 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
122 int * a = (int *) addr;
127 mask = 1 << (nr & 0x1f);
128 local_irq_save(flags);
130 local_irq_restore(flags);
133 static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
135 int * a = (int *) addr;
139 mask = 1 << (nr & 0x1f);
144 * clear_bit() doesn't provide any barrier for the compiler.
146 #define smp_mb__before_clear_bit() barrier()
147 #define smp_mb__after_clear_bit() barrier()
149 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
151 int * a = (int *) addr;
156 mask = 1 << (nr & 0x1f);
157 local_irq_save(flags);
159 local_irq_restore(flags);
162 static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
164 int * a = (int *) addr;
168 mask = 1 << (nr & 0x1f);
172 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
175 unsigned long *ADDR = (unsigned long *) addr;
178 mask = 1 << (nr & 31);
179 local_irq_save(flags);
181 local_irq_restore(flags);
184 static __inline__ void __change_bit(int nr, volatile unsigned long * addr)
187 unsigned long *ADDR = (unsigned long *) addr;
190 mask = 1 << (nr & 31);
194 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
197 volatile unsigned int *a = (volatile unsigned int *) addr;
201 mask = 1 << (nr & 0x1f);
202 local_irq_save(flags);
203 retval = (mask & *a) != 0;
205 local_irq_restore(flags);
210 static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * addr)
213 volatile unsigned int *a = (volatile unsigned int *) addr;
216 mask = 1 << (nr & 0x1f);
217 retval = (mask & *a) != 0;
222 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
225 volatile unsigned int *a = (volatile unsigned int *) addr;
229 mask = 1 << (nr & 0x1f);
230 local_irq_save(flags);
231 retval = (mask & *a) != 0;
233 local_irq_restore(flags);
238 static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * addr)
241 volatile unsigned int *a = (volatile unsigned int *) addr;
244 mask = 1 << (nr & 0x1f);
245 retval = (mask & *a) != 0;
250 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
253 volatile unsigned int *a = (volatile unsigned int *) addr;
257 mask = 1 << (nr & 0x1f);
258 local_irq_save(flags);
259 retval = (mask & *a) != 0;
261 local_irq_restore(flags);
266 static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * addr)
269 volatile unsigned int *a = (volatile unsigned int *) addr;
272 mask = 1 << (nr & 0x1f);
273 retval = (mask & *a) != 0;
279 * This routine doesn't need to be atomic.
281 static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
283 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
286 static __inline__ int __test_bit(int nr, const unsigned long * addr)
288 int * a = (int *) addr;
292 mask = 1 << (nr & 0x1f);
293 return ((mask & *a) != 0);
296 #define test_bit(nr,addr) \
297 (__builtin_constant_p(nr) ? \
298 __constant_test_bit((nr),(addr)) : \
299 __test_bit((nr),(addr)))
301 #define find_first_zero_bit(addr, size) \
302 find_next_zero_bit((addr), (size), 0)
304 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
306 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
307 unsigned long result = offset & ~31UL;
316 tmp |= ~0UL >> (32-offset);
324 while (size & ~31UL) {
337 return result + ffz(tmp);
341 * hweightN: returns the hamming weight (i.e. the number
342 * of bits set) of a N-bit word
345 #define hweight32(x) generic_hweight32(x)
346 #define hweight16(x) generic_hweight16(x)
347 #define hweight8(x) generic_hweight8(x)
350 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
354 volatile unsigned char *ADDR = (unsigned char *) addr;
357 mask = 1 << (nr & 0x07);
358 local_irq_save(flags);
359 retval = (mask & *ADDR) != 0;
361 local_irq_restore(flags);
365 static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
369 volatile unsigned char *ADDR = (unsigned char *) addr;
372 mask = 1 << (nr & 0x07);
373 local_irq_save(flags);
374 retval = (mask & *ADDR) != 0;
376 local_irq_restore(flags);
380 #define ext2_set_bit_atomic(lock, nr, addr) \
384 ret = ext2_set_bit((nr), (addr)); \
389 #define ext2_clear_bit_atomic(lock, nr, addr) \
393 ret = ext2_clear_bit((nr), (addr)); \
398 static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
401 const volatile unsigned char *ADDR = (const unsigned char *) addr;
404 mask = 1 << (nr & 0x07);
405 return ((mask & *ADDR) != 0);
408 #define ext2_find_first_zero_bit(addr, size) \
409 ext2_find_next_zero_bit((addr), (size), 0)
411 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
413 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
414 unsigned long result = offset & ~31UL;
422 /* We hold the little endian value in tmp, but then the
423 * shift is illegal. So we could keep a big endian value
426 * tmp = __swab32(*(p++));
427 * tmp |= ~0UL >> (32-offset);
429 * but this would decrease preformance, so we change the
433 tmp |= __swab32(~0UL >> (32-offset));
441 while(size & ~31UL) {
452 /* tmp is little endian, so we would have to swab the shift,
453 * see above. But then we have to swab tmp below for ffz, so
454 * we might as well do this here.
456 return result + ffz(__swab32(tmp) | (~0UL << size));
458 return result + ffz(__swab32(tmp));
461 /* Bitmap functions for the minix filesystem. */
462 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
463 #define minix_set_bit(nr,addr) set_bit(nr,addr)
464 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
465 #define minix_test_bit(nr,addr) test_bit(nr,addr)
466 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
469 * hweightN - returns the hamming weight of a N-bit word
470 * @x: the word to weigh
472 * The Hamming Weight of a number is the total number of bits set in it.
475 #define hweight32(x) generic_hweight32(x)
476 #define hweight16(x) generic_hweight16(x)
477 #define hweight8(x) generic_hweight8(x)
479 #endif /* __KERNEL__ */
481 #endif /* _M68KNOMMU_BITOPS_H */