2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/config.h>
13 #include <linux/compiler.h>
14 #include <linux/types.h>
15 #include <asm/byteorder.h> /* sigh ... */
17 #if (_MIPS_SZLONG == 32)
19 #define SZLONG_MASK 31UL
22 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
23 #elif (_MIPS_SZLONG == 64)
25 #define SZLONG_MASK 63UL
28 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
33 #include <asm/sgidefs.h>
34 #include <asm/system.h>
37 * clear_bit() doesn't provide any barrier for the compiler.
39 #define smp_mb__before_clear_bit() smp_mb()
40 #define smp_mb__after_clear_bit() smp_mb()
43 * Only disable interrupt for kernel mode stuff to keep usermode stuff
44 * that dares to use kernel include files alive.
46 #define __bi_flags unsigned long flags
47 #define __bi_cli() local_irq_disable()
48 #define __bi_save_flags(x) local_save_flags(x)
49 #define __bi_local_irq_save(x) local_irq_save(x)
50 #define __bi_local_irq_restore(x) local_irq_restore(x)
54 #define __bi_save_flags(x)
55 #define __bi_local_irq_save(x)
56 #define __bi_local_irq_restore(x)
57 #endif /* __KERNEL__ */
59 #ifdef CONFIG_CPU_HAS_LLSC
62 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
67 * set_bit - Atomically set a bit in memory
69 * @addr: the address to start counting from
71 * This function is atomic and may not be reordered. See __set_bit()
72 * if you do not require the atomic guarantees.
73 * Note that @nr may be almost arbitrarily large; this function is not
74 * restricted to acting on a single-word quantity.
76 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
78 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
82 "1:\t" __LL "\t%0, %1\t\t# set_bit\n\t"
86 : "=&r" (temp), "=m" (*m)
87 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
91 * __set_bit - Set a bit in memory
93 * @addr: the address to start counting from
95 * Unlike set_bit(), this function is non-atomic and may be reordered.
96 * If it's called on the same region of memory simultaneously, the effect
97 * may be that only one operation succeeds.
99 static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
101 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
103 *m |= 1UL << (nr & SZLONG_MASK);
107 * clear_bit - Clears a bit in memory
109 * @addr: Address to start counting from
111 * clear_bit() is atomic and may not be reordered. However, it does
112 * not contain a memory barrier, so if it is used for locking purposes,
113 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
114 * in order to ensure changes are visible on other processors.
116 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
118 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
121 __asm__ __volatile__(
122 "1:\t" __LL "\t%0, %1\t\t# clear_bit\n\t"
126 : "=&r" (temp), "=m" (*m)
127 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
131 * __clear_bit - Clears a bit in memory
133 * @addr: Address to start counting from
135 * Unlike clear_bit(), this function is non-atomic and may be reordered.
136 * If it's called on the same region of memory simultaneously, the effect
137 * may be that only one operation succeeds.
139 static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
141 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
143 *m &= ~(1UL << (nr & SZLONG_MASK));
147 * change_bit - Toggle a bit in memory
149 * @addr: Address to start counting from
151 * change_bit() is atomic and may not be reordered.
152 * Note that @nr may be almost arbitrarily large; this function is not
153 * restricted to acting on a single-word quantity.
155 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
157 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
160 __asm__ __volatile__(
161 "1:\t" __LL "\t%0, %1\t\t# change_bit\n\t"
165 : "=&r" (temp), "=m" (*m)
166 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
170 * __change_bit - Toggle a bit in memory
171 * @nr: the bit to change
172 * @addr: the address to start counting from
174 * Unlike change_bit(), this function is non-atomic and may be reordered.
175 * If it's called on the same region of memory simultaneously, the effect
176 * may be that only one operation succeeds.
178 static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
180 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
182 *m ^= 1UL << (nr & SZLONG_MASK);
186 * test_and_set_bit - Set a bit and return its old value
188 * @addr: Address to count from
190 * This operation is atomic and cannot be reordered.
191 * It also implies a memory barrier.
193 static inline int test_and_set_bit(unsigned long nr,
194 volatile unsigned long *addr)
196 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
197 unsigned long temp, res;
199 __asm__ __volatile__(
200 ".set\tnoreorder\t\t# test_and_set_bit\n"
201 "1:\t" __LL "\t%0, %1\n\t"
205 " and\t%2, %0, %3\n\t"
210 : "=&r" (temp), "=m" (*m), "=&r" (res)
211 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
218 * __test_and_set_bit - Set a bit and return its old value
220 * @addr: Address to count from
222 * This operation is non-atomic and can be reordered.
223 * If two examples of this operation race, one can appear to succeed
224 * but actually fail. You must protect multiple accesses with a lock.
226 static inline int __test_and_set_bit(unsigned long nr,
227 volatile unsigned long *addr)
229 volatile unsigned long *a = addr;
233 a += nr >> SZLONG_LOG;
234 mask = 1 << (nr & SZLONG_MASK);
235 retval = (mask & *a) != 0;
242 * test_and_clear_bit - Clear a bit and return its old value
244 * @addr: Address to count from
246 * This operation is atomic and cannot be reordered.
247 * It also implies a memory barrier.
249 static inline int test_and_clear_bit(unsigned long nr,
250 volatile unsigned long *addr)
252 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
253 unsigned long temp, res;
255 __asm__ __volatile__(
256 ".set\tnoreorder\t\t# test_and_clear_bit\n"
257 "1:\t" __LL "\t%0, %1\n\t"
262 " and\t%2, %0, %3\n\t"
267 : "=&r" (temp), "=m" (*m), "=&r" (res)
268 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
275 * __test_and_clear_bit - Clear a bit and return its old value
277 * @addr: Address to count from
279 * This operation is non-atomic and can be reordered.
280 * If two examples of this operation race, one can appear to succeed
281 * but actually fail. You must protect multiple accesses with a lock.
283 static inline int __test_and_clear_bit(unsigned long nr,
284 volatile unsigned long * addr)
286 volatile unsigned long *a = addr;
290 a += (nr >> SZLONG_LOG);
291 mask = 1UL << (nr & SZLONG_MASK);
292 retval = ((mask & *a) != 0);
299 * test_and_change_bit - Change a bit and return its old value
301 * @addr: Address to count from
303 * This operation is atomic and cannot be reordered.
304 * It also implies a memory barrier.
306 static inline int test_and_change_bit(unsigned long nr,
307 volatile unsigned long *addr)
309 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
310 unsigned long temp, res;
312 __asm__ __volatile__(
313 ".set\tnoreorder\t\t# test_and_change_bit\n"
314 "1:\t" __LL "\t%0, %1\n\t"
315 "xor\t%2, %0, %3\n\t"
318 " and\t%2, %0, %3\n\t"
323 : "=&r" (temp), "=m" (*m), "=&r" (res)
324 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
331 * __test_and_change_bit - Change a bit and return its old value
333 * @addr: Address to count from
335 * This operation is non-atomic and can be reordered.
336 * If two examples of this operation race, one can appear to succeed
337 * but actually fail. You must protect multiple accesses with a lock.
339 static inline int __test_and_change_bit(unsigned long nr,
340 volatile unsigned long *addr)
342 volatile unsigned long *a = addr;
346 a += (nr >> SZLONG_LOG);
347 mask = 1UL << (nr & SZLONG_MASK);
348 retval = ((mask & *a) != 0);
357 * set_bit - Atomically set a bit in memory
358 * @nr: the bit to set
359 * @addr: the address to start counting from
361 * This function is atomic and may not be reordered. See __set_bit()
362 * if you do not require the atomic guarantees.
363 * Note that @nr may be almost arbitrarily large; this function is not
364 * restricted to acting on a single-word quantity.
366 static inline void set_bit(unsigned long nr, volatile unsigned long * addr)
368 volatile unsigned long *a = addr;
372 a += nr >> SZLONG_LOG;
373 mask = 1 << (nr & SZLONG_MASK);
374 __bi_local_irq_save(flags);
376 __bi_local_irq_restore(flags);
380 * __set_bit - Set a bit in memory
381 * @nr: the bit to set
382 * @addr: the address to start counting from
384 * Unlike set_bit(), this function is non-atomic and may be reordered.
385 * If it's called on the same region of memory simultaneously, the effect
386 * may be that only one operation succeeds.
388 static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
390 volatile unsigned long *a = addr;
393 a += nr >> SZLONG_LOG;
394 mask = 1 << (nr & SZLONG_MASK);
399 * clear_bit - Clears a bit in memory
401 * @addr: Address to start counting from
403 * clear_bit() is atomic and may not be reordered. However, it does
404 * not contain a memory barrier, so if it is used for locking purposes,
405 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
406 * in order to ensure changes are visible on other processors.
408 static inline void clear_bit(unsigned long nr, volatile unsigned long * addr)
410 volatile unsigned long *a = addr;
414 a += nr >> SZLONG_LOG;
415 mask = 1 << (nr & SZLONG_MASK);
416 __bi_local_irq_save(flags);
418 __bi_local_irq_restore(flags);
421 static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
423 volatile unsigned long *a = addr;
426 a += nr >> SZLONG_LOG;
427 mask = 1 << (nr & SZLONG_MASK);
432 * change_bit - Toggle a bit in memory
434 * @addr: Address to start counting from
436 * change_bit() is atomic and may not be reordered.
437 * Note that @nr may be almost arbitrarily large; this function is not
438 * restricted to acting on a single-word quantity.
440 static inline void change_bit(unsigned long nr, volatile unsigned long * addr)
442 volatile unsigned long *a = addr;
446 a += nr >> SZLONG_LOG;
447 mask = 1 << (nr & SZLONG_MASK);
448 __bi_local_irq_save(flags);
450 __bi_local_irq_restore(flags);
454 * __change_bit - Toggle a bit in memory
455 * @nr: the bit to change
456 * @addr: the address to start counting from
458 * Unlike change_bit(), this function is non-atomic and may be reordered.
459 * If it's called on the same region of memory simultaneously, the effect
460 * may be that only one operation succeeds.
462 static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
464 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
466 *m ^= 1UL << (nr & SZLONG_MASK);
470 * test_and_set_bit - Set a bit and return its old value
472 * @addr: Address to count from
474 * This operation is atomic and cannot be reordered.
475 * It also implies a memory barrier.
477 static inline int test_and_set_bit(unsigned long nr,
478 volatile unsigned long * addr)
480 volatile unsigned long *a = addr;
485 a += nr >> SZLONG_LOG;
486 mask = 1 << (nr & SZLONG_MASK);
487 __bi_local_irq_save(flags);
488 retval = (mask & *a) != 0;
490 __bi_local_irq_restore(flags);
496 * __test_and_set_bit - Set a bit and return its old value
498 * @addr: Address to count from
500 * This operation is non-atomic and can be reordered.
501 * If two examples of this operation race, one can appear to succeed
502 * but actually fail. You must protect multiple accesses with a lock.
504 static inline int __test_and_set_bit(unsigned long nr,
505 volatile unsigned long *addr)
507 volatile unsigned long *a = addr;
511 a += nr >> SZLONG_LOG;
512 mask = 1 << (nr & SZLONG_MASK);
513 retval = (mask & *a) != 0;
520 * test_and_clear_bit - Clear a bit and return its old value
522 * @addr: Address to count from
524 * This operation is atomic and cannot be reordered.
525 * It also implies a memory barrier.
527 static inline int test_and_clear_bit(unsigned long nr,
528 volatile unsigned long * addr)
530 volatile unsigned long *a = addr;
535 a += nr >> SZLONG_LOG;
536 mask = 1 << (nr & SZLONG_MASK);
537 __bi_local_irq_save(flags);
538 retval = (mask & *a) != 0;
540 __bi_local_irq_restore(flags);
546 * __test_and_clear_bit - Clear a bit and return its old value
548 * @addr: Address to count from
550 * This operation is non-atomic and can be reordered.
551 * If two examples of this operation race, one can appear to succeed
552 * but actually fail. You must protect multiple accesses with a lock.
554 static inline int __test_and_clear_bit(unsigned long nr,
555 volatile unsigned long * addr)
557 volatile unsigned long *a = addr;
561 a += (nr >> SZLONG_LOG);
562 mask = 1UL << (nr & SZLONG_MASK);
563 retval = ((mask & *a) != 0);
570 * test_and_change_bit - Change a bit and return its old value
572 * @addr: Address to count from
574 * This operation is atomic and cannot be reordered.
575 * It also implies a memory barrier.
577 static inline int test_and_change_bit(unsigned long nr,
578 volatile unsigned long * addr)
580 volatile unsigned long *a = addr;
581 unsigned long mask, retval;
584 a += nr >> SZLONG_LOG;
585 mask = 1 << (nr & SZLONG_MASK);
586 __bi_local_irq_save(flags);
587 retval = (mask & *a) != 0;
589 __bi_local_irq_restore(flags);
595 * __test_and_change_bit - Change a bit and return its old value
597 * @addr: Address to count from
599 * This operation is non-atomic and can be reordered.
600 * If two examples of this operation race, one can appear to succeed
601 * but actually fail. You must protect multiple accesses with a lock.
603 static inline int __test_and_change_bit(unsigned long nr,
604 volatile unsigned long * addr)
606 volatile unsigned long *a = addr;
610 a += (nr >> SZLONG_LOG);
611 mask = 1 << (nr & SZLONG_MASK);
612 retval = (mask & *a) != 0;
620 #undef __bi_save_flags
621 #undef __bi_local_irq_restore
626 * test_bit - Determine whether a bit is set
627 * @nr: bit number to test
628 * @addr: Address to start counting from
630 static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
632 return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
636 * ffz - find first zero in word.
637 * @word: The word to search
639 * Undefined if no zero exists, so code should check against ~0UL first.
641 static inline unsigned long ffz(unsigned long word)
647 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
648 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
649 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
650 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
651 s = 1; if (word << 31 != 0) s = 0; b += s;
654 s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
655 s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
656 s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
657 s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
658 s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
659 s = 1; if (word << 63 != 0) s = 0; b += s;
666 * __ffs - find first bit in word.
667 * @word: The word to search
669 * Undefined if no bit exists, so code should check against 0 first.
671 static inline unsigned long __ffs(unsigned long word)
677 * fls: find last bit set.
680 #define fls(x) generic_fls(x)
683 * find_next_zero_bit - find the first zero bit in a memory region
684 * @addr: The address to base the search on
685 * @offset: The bitnumber to start searching at
686 * @size: The maximum size to search
688 static inline unsigned long find_next_zero_bit(const unsigned long *addr,
689 unsigned long size, unsigned long offset)
691 const unsigned long *p = addr + (offset >> SZLONG_LOG);
692 unsigned long result = offset & ~SZLONG_MASK;
698 offset &= SZLONG_MASK;
701 tmp |= ~0UL >> (_MIPS_SZLONG-offset);
702 if (size < _MIPS_SZLONG)
706 size -= _MIPS_SZLONG;
707 result += _MIPS_SZLONG;
709 while (size & ~SZLONG_MASK) {
712 result += _MIPS_SZLONG;
713 size -= _MIPS_SZLONG;
721 if (tmp == ~0UL) /* Are any bits zero? */
722 return result + size; /* Nope. */
724 return result + ffz(tmp);
727 #define find_first_zero_bit(addr, size) \
728 find_next_zero_bit((addr), (size), 0)
731 * find_next_bit - find the next set bit in a memory region
732 * @addr: The address to base the search on
733 * @offset: The bitnumber to start searching at
734 * @size: The maximum size to search
736 static inline unsigned long find_next_bit(const unsigned long *addr,
737 unsigned long size, unsigned long offset)
739 const unsigned long *p = addr + (offset >> SZLONG_LOG);
740 unsigned long result = offset & ~SZLONG_MASK;
746 offset &= SZLONG_MASK;
749 tmp &= ~0UL << offset;
750 if (size < _MIPS_SZLONG)
754 size -= _MIPS_SZLONG;
755 result += _MIPS_SZLONG;
757 while (size & ~SZLONG_MASK) {
760 result += _MIPS_SZLONG;
761 size -= _MIPS_SZLONG;
768 tmp &= ~0UL >> (_MIPS_SZLONG - size);
769 if (tmp == 0UL) /* Are any bits set? */
770 return result + size; /* Nope. */
772 return result + __ffs(tmp);
776 * find_first_bit - find the first set bit in a memory region
777 * @addr: The address to start the search at
778 * @size: The maximum size to search
780 * Returns the bit-number of the first set bit, not the number of the byte
783 #define find_first_bit(addr, size) \
784 find_next_bit((addr), (size), 0)
789 * Every architecture must define this function. It's the fastest
790 * way of searching a 140-bit bitmap where the first 100 bits are
791 * unlikely to be set. It's guaranteed that at least one of the 140
794 static inline int sched_find_first_bit(const unsigned long *b)
800 return __ffs(b[1]) + 32;
802 return __ffs(b[2]) + 64;
804 return __ffs(b[3]) + 96;
805 return __ffs(b[4]) + 128;
811 return __ffs(b[1]) + 64;
812 return __ffs(b[2]) + 128;
817 * ffs - find first bit set
818 * @x: the word to search
820 * This is defined the same way as
821 * the libc and compiler builtin ffs routines, therefore
822 * differs in spirit from the above ffz (man ffs).
825 #define ffs(x) generic_ffs(x)
828 * hweightN - returns the hamming weight of a N-bit word
829 * @x: the word to weigh
831 * The Hamming Weight of a number is the total number of bits set in it.
834 #define hweight64(x) generic_hweight64(x)
835 #define hweight32(x) generic_hweight32(x)
836 #define hweight16(x) generic_hweight16(x)
837 #define hweight8(x) generic_hweight8(x)
839 static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
841 unsigned char *ADDR = (unsigned char *) addr;
845 mask = 1 << (nr & 0x07);
846 retval = (mask & *ADDR) != 0;
852 static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
854 unsigned char *ADDR = (unsigned char *) addr;
858 mask = 1 << (nr & 0x07);
859 retval = (mask & *ADDR) != 0;
865 static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
867 const unsigned char *ADDR = (const unsigned char *) addr;
871 mask = 1 << (nr & 0x07);
873 return ((mask & *ADDR) != 0);
876 static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
877 unsigned long size, unsigned long offset)
879 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
880 unsigned long result = offset & ~SZLONG_MASK;
886 offset &= SZLONG_MASK;
888 tmp = cpu_to_lelongp(p++);
889 tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
890 if (size < _MIPS_SZLONG)
894 size -= _MIPS_SZLONG;
895 result += _MIPS_SZLONG;
897 while (size & ~SZLONG_MASK) {
898 if (~(tmp = cpu_to_lelongp(p++)))
900 result += _MIPS_SZLONG;
901 size -= _MIPS_SZLONG;
905 tmp = cpu_to_lelongp(p);
909 if (tmp == ~0UL) /* Are any bits zero? */
910 return result + size; /* Nope. */
913 return result + ffz(tmp);
916 #define find_first_zero_le_bit(addr, size) \
917 find_next_zero_le_bit((addr), (size), 0)
919 #define ext2_set_bit(nr,addr) \
920 __test_and_set_le_bit((nr),(unsigned long*)addr)
921 #define ext2_clear_bit(nr, addr) \
922 __test_and_clear_le_bit((nr),(unsigned long*)addr)
923 #define ext2_set_bit_atomic(lock, nr, addr) \
927 ret = ext2_set_bit((nr), (addr)); \
932 #define ext2_clear_bit_atomic(lock, nr, addr) \
936 ret = ext2_clear_bit((nr), (addr)); \
940 #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
941 #define ext2_find_first_zero_bit(addr, size) \
942 find_first_zero_le_bit((unsigned long*)addr, size)
943 #define ext2_find_next_zero_bit(addr, size, off) \
944 find_next_zero_le_bit((unsigned long*)addr, size, off)
947 * Bitmap functions for the minix filesystem.
949 * FIXME: These assume that Minix uses the native byte/bitorder.
950 * This limits the Minix filesystem's value for data exchange very much.
952 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
953 #define minix_set_bit(nr,addr) set_bit(nr,addr)
954 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
955 #define minix_test_bit(nr,addr) test_bit(nr,addr)
956 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
958 #endif /* __KERNEL__ */
960 #endif /* _ASM_BITOPS_H */