2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 2000 Silicon Graphics, Inc.
12 #include <linux/config.h>
13 #include <linux/types.h>
14 #include <asm/byteorder.h> /* sigh ... */
16 #if (_MIPS_SZLONG == 32)
18 #define SZLONG_MASK 31UL
19 #elif (_MIPS_SZLONG == 64)
21 #define SZLONG_MASK 63UL
26 #include <asm/sgidefs.h>
27 #include <asm/system.h>
30 * clear_bit() doesn't provide any barrier for the compiler.
32 #define smp_mb__before_clear_bit() smp_mb()
33 #define smp_mb__after_clear_bit() smp_mb()
36 * Only disable interrupt for kernel mode stuff to keep usermode stuff
37 * that dares to use kernel include files alive.
39 #define __bi_flags unsigned long flags
40 #define __bi_cli() local_irq_disable()
41 #define __bi_save_flags(x) local_save_flags(x)
42 #define __bi_local_irq_save(x) local_irq_save(x)
43 #define __bi_local_irq_restore(x) local_irq_restore(x)
47 #define __bi_save_flags(x)
48 #define __bi_local_irq_save(x)
49 #define __bi_local_irq_restore(x)
50 #endif /* __KERNEL__ */
52 #ifdef CONFIG_CPU_HAS_LLSC
55 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
60 * set_bit - Atomically set a bit in memory
62 * @addr: the address to start counting from
64 * This function is atomic and may not be reordered. See __set_bit()
65 * if you do not require the atomic guarantees.
66 * Note that @nr may be almost arbitrarily large; this function is not
67 * restricted to acting on a single-word quantity.
69 static __inline__ void set_bit(int nr, volatile void *addr)
71 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
75 "1:\tll\t%0, %1\t\t# set_bit\n\t"
79 : "=&r" (temp), "=m" (*m)
80 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
84 * __set_bit - Set a bit in memory
86 * @addr: the address to start counting from
88 * Unlike set_bit(), this function is non-atomic and may be reordered.
89 * If it's called on the same region of memory simultaneously, the effect
90 * may be that only one operation succeeds.
92 static __inline__ void __set_bit(int nr, volatile void * addr)
94 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
96 *m |= 1UL << (nr & 31);
100 * clear_bit - Clears a bit in memory
102 * @addr: Address to start counting from
104 * clear_bit() is atomic and may not be reordered. However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
107 * in order to ensure changes are visible on other processors.
109 static __inline__ void clear_bit(int nr, volatile void *addr)
111 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
114 __asm__ __volatile__(
115 "1:\tll\t%0, %1\t\t# clear_bit\n\t"
119 : "=&r" (temp), "=m" (*m)
120 : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
124 * change_bit - Toggle a bit in memory
126 * @addr: Address to start counting from
128 * change_bit() is atomic and may not be reordered.
129 * Note that @nr may be almost arbitrarily large; this function is not
130 * restricted to acting on a single-word quantity.
132 static __inline__ void change_bit(int nr, volatile void *addr)
134 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
137 __asm__ __volatile__(
138 "1:\tll\t%0, %1\t\t# change_bit\n\t"
142 : "=&r" (temp), "=m" (*m)
143 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
147 * __change_bit - Toggle a bit in memory
148 * @nr: the bit to change
149 * @addr: the address to start counting from
151 * Unlike change_bit(), this function is non-atomic and may be reordered.
152 * If it's called on the same region of memory simultaneously, the effect
153 * may be that only one operation succeeds.
155 static __inline__ void __change_bit(int nr, volatile void * addr)
157 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
159 *m ^= 1UL << (nr & 31);
163 * test_and_set_bit - Set a bit and return its old value
165 * @addr: Address to count from
167 * This operation is atomic and cannot be reordered.
168 * It also implies a memory barrier.
170 static __inline__ int test_and_set_bit(int nr, volatile void *addr)
172 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
176 __asm__ __volatile__(
177 ".set\tnoreorder\t\t# test_and_set_bit\n"
182 " and\t%2, %0, %3\n\t"
187 : "=&r" (temp), "=m" (*m), "=&r" (res)
188 : "r" (1UL << (nr & 0x1f)), "m" (*m)
195 * __test_and_set_bit - Set a bit and return its old value
197 * @addr: Address to count from
199 * This operation is non-atomic and can be reordered.
200 * If two examples of this operation race, one can appear to succeed
201 * but actually fail. You must protect multiple accesses with a lock.
203 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
205 volatile unsigned long *a = addr;
210 mask = 1 << (nr & 0x1f);
211 retval = (mask & *a) != 0;
218 * test_and_clear_bit - Clear a bit and return its old value
220 * @addr: Address to count from
222 * This operation is atomic and cannot be reordered.
223 * It also implies a memory barrier.
225 static __inline__ int test_and_clear_bit(int nr, volatile void *addr)
227 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
228 unsigned long temp, res;
230 __asm__ __volatile__(
231 ".set\tnoreorder\t\t# test_and_clear_bit\n"
237 " and\t%2, %0, %3\n\t"
242 : "=&r" (temp), "=m" (*m), "=&r" (res)
243 : "r" (1UL << (nr & 0x1f)), "m" (*m)
250 * __test_and_clear_bit - Clear a bit and return its old value
252 * @addr: Address to count from
254 * This operation is non-atomic and can be reordered.
255 * If two examples of this operation race, one can appear to succeed
256 * but actually fail. You must protect multiple accesses with a lock.
258 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
260 volatile unsigned long *a = addr;
261 unsigned long mask, retval;
264 mask = 1 << (nr & 0x1f);
265 retval = (mask & *a) != 0;
272 * test_and_change_bit - Change a bit and return its new value
274 * @addr: Address to count from
276 * This operation is atomic and cannot be reordered.
277 * It also implies a memory barrier.
279 static __inline__ int test_and_change_bit(int nr, volatile void *addr)
281 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
282 unsigned long temp, res;
284 __asm__ __volatile__(
285 ".set\tnoreorder\t\t# test_and_change_bit\n"
287 "xor\t%2, %0, %3\n\t"
290 " and\t%2, %0, %3\n\t"
295 : "=&r" (temp), "=m" (*m), "=&r" (res)
296 : "r" (1UL << (nr & 0x1f)), "m" (*m)
303 * __test_and_change_bit - Change a bit and return its old value
305 * @addr: Address to count from
307 * This operation is non-atomic and can be reordered.
308 * If two examples of this operation race, one can appear to succeed
309 * but actually fail. You must protect multiple accesses with a lock.
311 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
313 volatile unsigned long *a = addr;
318 mask = 1 << (nr & 0x1f);
319 retval = (mask & *a) != 0;
328 * set_bit - Atomically set a bit in memory
329 * @nr: the bit to set
330 * @addr: the address to start counting from
332 * This function is atomic and may not be reordered. See __set_bit()
333 * if you do not require the atomic guarantees.
334 * Note that @nr may be almost arbitrarily large; this function is not
335 * restricted to acting on a single-word quantity.
337 static __inline__ void set_bit(int nr, volatile void * addr)
339 volatile unsigned long *a = addr;
344 mask = 1 << (nr & 0x1f);
345 __bi_local_irq_save(flags);
347 __bi_local_irq_restore(flags);
351 * __set_bit - Set a bit in memory
352 * @nr: the bit to set
353 * @addr: the address to start counting from
355 * Unlike set_bit(), this function is non-atomic and may be reordered.
356 * If it's called on the same region of memory simultaneously, the effect
357 * may be that only one operation succeeds.
359 static __inline__ void __set_bit(int nr, volatile void * addr)
361 volatile unsigned long *a = addr;
365 mask = 1 << (nr & 0x1f);
370 * clear_bit - Clears a bit in memory
372 * @addr: Address to start counting from
374 * clear_bit() is atomic and may not be reordered. However, it does
375 * not contain a memory barrier, so if it is used for locking purposes,
376 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
377 * in order to ensure changes are visible on other processors.
379 static __inline__ void clear_bit(int nr, volatile void * addr)
381 volatile unsigned long *a = addr;
386 mask = 1 << (nr & 0x1f);
387 __bi_local_irq_save(flags);
389 __bi_local_irq_restore(flags);
393 * change_bit - Toggle a bit in memory
395 * @addr: Address to start counting from
397 * change_bit() is atomic and may not be reordered.
398 * Note that @nr may be almost arbitrarily large; this function is not
399 * restricted to acting on a single-word quantity.
401 static __inline__ void change_bit(int nr, volatile void * addr)
403 volatile unsigned long *a = addr;
408 mask = 1 << (nr & 0x1f);
409 __bi_local_irq_save(flags);
411 __bi_local_irq_restore(flags);
415 * __change_bit - Toggle a bit in memory
416 * @nr: the bit to change
417 * @addr: the address to start counting from
419 * Unlike change_bit(), this function is non-atomic and may be reordered.
420 * If it's called on the same region of memory simultaneously, the effect
421 * may be that only one operation succeeds.
423 static __inline__ void __change_bit(int nr, volatile void * addr)
425 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
427 *m ^= 1UL << (nr & 31);
431 * test_and_set_bit - Set a bit and return its old value
433 * @addr: Address to count from
435 * This operation is atomic and cannot be reordered.
436 * It also implies a memory barrier.
438 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
440 volatile unsigned long *a = addr;
446 mask = 1 << (nr & 0x1f);
447 __bi_local_irq_save(flags);
448 retval = (mask & *a) != 0;
450 __bi_local_irq_restore(flags);
456 * __test_and_set_bit - Set a bit and return its old value
458 * @addr: Address to count from
460 * This operation is non-atomic and can be reordered.
461 * If two examples of this operation race, one can appear to succeed
462 * but actually fail. You must protect multiple accesses with a lock.
464 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
466 volatile unsigned long *a = addr;
471 mask = 1 << (nr & 0x1f);
472 retval = (mask & *a) != 0;
479 * test_and_clear_bit - Clear a bit and return its old value
481 * @addr: Address to count from
483 * This operation is atomic and cannot be reordered.
484 * It also implies a memory barrier.
486 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
488 volatile unsigned long *a = addr;
494 mask = 1 << (nr & 0x1f);
495 __bi_local_irq_save(flags);
496 retval = (mask & *a) != 0;
498 __bi_local_irq_restore(flags);
504 * __test_and_clear_bit - Clear a bit and return its old value
506 * @addr: Address to count from
508 * This operation is non-atomic and can be reordered.
509 * If two examples of this operation race, one can appear to succeed
510 * but actually fail. You must protect multiple accesses with a lock.
512 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
514 volatile unsigned long *a = addr;
519 mask = 1 << (nr & 0x1f);
520 retval = (mask & *a) != 0;
527 * test_and_change_bit - Change a bit and return its new value
529 * @addr: Address to count from
531 * This operation is atomic and cannot be reordered.
532 * It also implies a memory barrier.
534 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
536 volatile unsigned long *a = addr;
537 unsigned long mask, retval;
541 mask = 1 << (nr & 0x1f);
542 __bi_local_irq_save(flags);
543 retval = (mask & *a) != 0;
545 __bi_local_irq_restore(flags);
551 * __test_and_change_bit - Change a bit and return its old value
553 * @addr: Address to count from
555 * This operation is non-atomic and can be reordered.
556 * If two examples of this operation race, one can appear to succeed
557 * but actually fail. You must protect multiple accesses with a lock.
559 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
561 volatile unsigned long *a = addr;
566 mask = 1 << (nr & 0x1f);
567 retval = (mask & *a) != 0;
575 #undef __bi_save_flags
576 #undef __bi_local_irq_restore
581 * test_bit - Determine whether a bit is set
582 * @nr: bit number to test
583 * @addr: Address to start counting from
585 static inline int test_bit(int nr, volatile void *addr)
587 return 1UL & (((const volatile unsigned long *) addr)[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
591 * ffz - find first zero in word.
592 * @word: The word to search
594 * Undefined if no zero exists, so code should check against ~0UL first.
596 static __inline__ unsigned long ffz(unsigned long word)
601 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
602 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
603 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
604 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
605 s = 1; if (word << 31 != 0) s = 0; b += s;
614 * ffs - find first bit set
615 * @x: the word to search
617 * Undefined if no bit exists, so code should check against 0 first.
620 #define ffs(x) generic_ffs(x)
623 * find_next_zero_bit - find the first zero bit in a memory region
624 * @addr: The address to base the search on
625 * @offset: The bitnumber to start searching at
626 * @size: The maximum size to search
628 static inline long find_next_zero_bit(void *addr, unsigned long size,
629 unsigned long offset)
631 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
632 unsigned long result = offset & ~31UL;
641 tmp |= ~0UL >> (32-offset);
649 while (size & ~31UL) {
661 if (tmp == ~0UL) /* Are any bits zero? */
662 return result + size; /* Nope. */
664 return result + ffz(tmp);
667 #define find_first_zero_bit(addr, size) \
668 find_next_zero_bit((addr), (size), 0)
670 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
672 * find_first_zero_bit - find the first zero bit in a memory region
673 * @addr: The address to start the search at
674 * @size: The maximum size to search
676 * Returns the bit-number of the first zero bit, not the number of the byte
679 static int find_first_zero_bit (void *addr, unsigned size);
682 #define find_first_zero_bit(addr, size) \
683 find_next_zero_bit((addr), (size), 0)
687 * hweightN - returns the hamming weight of a N-bit word
688 * @x: the word to weigh
690 * The Hamming Weight of a number is the total number of bits set in it.
693 #define hweight32(x) generic_hweight32(x)
694 #define hweight16(x) generic_hweight16(x)
695 #define hweight8(x) generic_hweight8(x)
698 static __inline__ int __test_and_set_le_bit(int nr, void * addr)
700 unsigned char *ADDR = (unsigned char *) addr;
704 mask = 1 << (nr & 0x07);
705 retval = (mask & *ADDR) != 0;
711 static __inline__ int __test_and_clear_le_bit(int nr, void * addr)
713 unsigned char *ADDR = (unsigned char *) addr;
717 mask = 1 << (nr & 0x07);
718 retval = (mask & *ADDR) != 0;
724 static __inline__ int test_le_bit(int nr, const void * addr)
726 const unsigned char *ADDR = (const unsigned char *) addr;
730 mask = 1 << (nr & 0x07);
732 return ((mask & *ADDR) != 0);
735 static inline unsigned long ext2_ffz(unsigned int word)
740 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
741 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
742 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
743 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
744 s = 1; if (word << 31 != 0) s = 0; b += s;
749 static inline unsigned long find_next_zero_le_bit(void *addr,
750 unsigned long size, unsigned long offset)
752 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
753 unsigned int result = offset & ~31;
762 tmp = cpu_to_le32p(p++);
763 tmp |= ~0U >> (32-offset); /* bug or feature ? */
772 if ((tmp = cpu_to_le32p(p++)) != ~0U)
780 tmp = cpu_to_le32p(p);
783 if (tmp == ~0U) /* Are any bits zero? */
784 return result + size; /* Nope. */
787 return result + ext2_ffz(tmp);
790 #define find_first_zero_le_bit(addr, size) \
791 find_next_zero_le_bit((addr), (size), 0)
793 #define ext2_set_bit __test_and_set_le_bit
794 #define ext2_clear_bit __test_and_clear_le_bit
795 #define ext2_test_bit test_le_bit
796 #define ext2_find_first_zero_bit find_first_zero_le_bit
797 #define ext2_find_next_zero_bit find_next_zero_le_bit
800 * Bitmap functions for the minix filesystem.
802 * FIXME: These assume that Minix uses the native byte/bitorder.
803 * This limits the Minix filesystem's value for data exchange very much.
805 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
806 #define minix_set_bit(nr,addr) set_bit(nr,addr)
807 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
808 #define minix_test_bit(nr,addr) test_bit(nr,addr)
809 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
811 #endif /* __KERNEL__ */
813 #endif /* _ASM_BITOPS_H */