1 #ifndef _M68KNOMMU_BITOPS_H
2 #define _M68KNOMMU_BITOPS_H
5 * Copyright 1992, Linus Torvalds.
8 #include <linux/config.h>
9 #include <linux/compiler.h>
10 #include <asm/byteorder.h> /* swab32 */
11 #include <asm/system.h> /* save_flags */
18 static inline int ffs(int x)
50 static inline int __ffs(int x)
80 * Every architecture must define this function. It's the fastest
81 * way of searching a 140-bit bitmap where the first 100 bits are
82 * unlikely to be set. It's guaranteed that at least one of the 140
85 static inline int sched_find_first_bit(unsigned long *b)
90 return __ffs(b[1]) + 32;
92 return __ffs(b[2]) + 64;
94 return __ffs(b[3]) + 96;
95 return __ffs(b[4]) + 128;
99 * ffz = Find First Zero in word. Undefined if no zero exists,
100 * so code should check against ~0UL first..
102 static __inline__ unsigned long ffz(unsigned long word)
104 unsigned long result = 0;
114 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
116 int * a = (int *) addr;
121 mask = 1 << (nr & 0x1f);
122 local_irq_save(flags);
124 local_irq_restore(flags);
127 static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
129 int * a = (int *) addr;
133 mask = 1 << (nr & 0x1f);
138 * clear_bit() doesn't provide any barrier for the compiler.
140 #define smp_mb__before_clear_bit() barrier()
141 #define smp_mb__after_clear_bit() barrier()
143 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
145 int * a = (int *) addr;
150 mask = 1 << (nr & 0x1f);
151 local_irq_save(flags);
153 local_irq_restore(flags);
156 static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
158 int * a = (int *) addr;
162 mask = 1 << (nr & 0x1f);
166 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
169 unsigned long *ADDR = (unsigned long *) addr;
172 mask = 1 << (nr & 31);
173 local_irq_save(flags);
175 local_irq_restore(flags);
178 static __inline__ void __change_bit(int nr, volatile unsigned long * addr)
181 unsigned long *ADDR = (unsigned long *) addr;
184 mask = 1 << (nr & 31);
188 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
191 volatile unsigned int *a = (volatile unsigned int *) addr;
195 mask = 1 << (nr & 0x1f);
196 local_irq_save(flags);
197 retval = (mask & *a) != 0;
199 local_irq_restore(flags);
204 static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * addr)
207 volatile unsigned int *a = (volatile unsigned int *) addr;
210 mask = 1 << (nr & 0x1f);
211 retval = (mask & *a) != 0;
216 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
219 volatile unsigned int *a = (volatile unsigned int *) addr;
223 mask = 1 << (nr & 0x1f);
224 local_irq_save(flags);
225 retval = (mask & *a) != 0;
227 local_irq_restore(flags);
232 static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * addr)
235 volatile unsigned int *a = (volatile unsigned int *) addr;
238 mask = 1 << (nr & 0x1f);
239 retval = (mask & *a) != 0;
244 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
247 volatile unsigned int *a = (volatile unsigned int *) addr;
251 mask = 1 << (nr & 0x1f);
252 local_irq_save(flags);
253 retval = (mask & *a) != 0;
255 local_irq_restore(flags);
260 static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * addr)
263 volatile unsigned int *a = (volatile unsigned int *) addr;
266 mask = 1 << (nr & 0x1f);
267 retval = (mask & *a) != 0;
273 * This routine doesn't need to be atomic.
275 static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
277 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
280 static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
282 int * a = (int *) addr;
286 mask = 1 << (nr & 0x1f);
287 return ((mask & *a) != 0);
290 #define test_bit(nr,addr) \
291 (__builtin_constant_p(nr) ? \
292 __constant_test_bit((nr),(addr)) : \
293 __test_bit((nr),(addr)))
295 #define find_first_zero_bit(addr, size) \
296 find_next_zero_bit((addr), (size), 0)
298 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
300 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
301 unsigned long result = offset & ~31UL;
310 tmp |= ~0UL >> (32-offset);
318 while (size & ~31UL) {
331 return result + ffz(tmp);
335 * Find next one bit in a bitmap reasonably efficiently.
337 static __inline__ unsigned long find_next_bit(const unsigned long *addr,
338 unsigned long size, unsigned long offset)
340 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
341 unsigned int result = offset & ~31UL;
350 tmp &= ~0UL << offset;
359 if ((tmp = *p++) != 0)
369 tmp &= ~0UL >> (32 - size);
370 if (tmp == 0UL) /* Are any bits set? */
371 return result + size; /* Nope. */
373 return result + __ffs(tmp);
377 * hweightN: returns the hamming weight (i.e. the number
378 * of bits set) of a N-bit word
381 #define hweight32(x) generic_hweight32(x)
382 #define hweight16(x) generic_hweight16(x)
383 #define hweight8(x) generic_hweight8(x)
386 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
390 volatile unsigned char *ADDR = (unsigned char *) addr;
393 mask = 1 << (nr & 0x07);
394 local_irq_save(flags);
395 retval = (mask & *ADDR) != 0;
397 local_irq_restore(flags);
401 static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
405 volatile unsigned char *ADDR = (unsigned char *) addr;
408 mask = 1 << (nr & 0x07);
409 local_irq_save(flags);
410 retval = (mask & *ADDR) != 0;
412 local_irq_restore(flags);
416 #define ext2_set_bit_atomic(lock, nr, addr) \
420 ret = ext2_set_bit((nr), (addr)); \
425 #define ext2_clear_bit_atomic(lock, nr, addr) \
429 ret = ext2_clear_bit((nr), (addr)); \
434 static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
437 const volatile unsigned char *ADDR = (const unsigned char *) addr;
440 mask = 1 << (nr & 0x07);
441 return ((mask & *ADDR) != 0);
444 #define ext2_find_first_zero_bit(addr, size) \
445 ext2_find_next_zero_bit((addr), (size), 0)
447 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
449 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
450 unsigned long result = offset & ~31UL;
458 /* We hold the little endian value in tmp, but then the
459 * shift is illegal. So we could keep a big endian value
462 * tmp = __swab32(*(p++));
463 * tmp |= ~0UL >> (32-offset);
465 * but this would decrease preformance, so we change the
469 tmp |= __swab32(~0UL >> (32-offset));
477 while(size & ~31UL) {
488 /* tmp is little endian, so we would have to swab the shift,
489 * see above. But then we have to swab tmp below for ffz, so
490 * we might as well do this here.
492 return result + ffz(__swab32(tmp) | (~0UL << size));
494 return result + ffz(__swab32(tmp));
497 /* Bitmap functions for the minix filesystem. */
498 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
499 #define minix_set_bit(nr,addr) set_bit(nr,addr)
500 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
501 #define minix_test_bit(nr,addr) test_bit(nr,addr)
502 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
505 * hweightN - returns the hamming weight of a N-bit word
506 * @x: the word to weigh
508 * The Hamming Weight of a number is the total number of bits set in it.
511 #define hweight32(x) generic_hweight32(x)
512 #define hweight16(x) generic_hweight16(x)
513 #define hweight8(x) generic_hweight8(x)
515 #endif /* __KERNEL__ */
518 * fls: find last bit set.
520 #define fls(x) generic_fls(x)
522 #endif /* _M68KNOMMU_BITOPS_H */