1 #ifndef _ASM_IA64_BITOPS_H
2 #define _ASM_IA64_BITOPS_H
5 * Copyright (C) 1998-2001 Hewlett-Packard Co
6 * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
9 #include <asm/system.h>
12 * set_bit - Atomically set a bit in memory
14 * @addr: the address to start counting from
16 * This function is atomic and may not be reordered. See __set_bit()
17 * if you do not require the atomic guarantees.
18 * Note that @nr may be almost arbitrarily large; this function is not
19 * restricted to acting on a single-word quantity.
21 * The address must be (at least) "long" aligned.
22 * Note that there are driver (e.g., eepro100) which use these operations to operate on
23 * hw-defined data-structures, so we can't easily change these operations to force a
26 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
28 static __inline__ void
29 set_bit (int nr, volatile void *addr)
35 m = (volatile __u32 *) addr + (nr >> 5);
41 } while (cmpxchg_acq(m, old, new) != old);
45 * __set_bit - Set a bit in memory
47 * @addr: the address to start counting from
49 * Unlike set_bit(), this function is non-atomic and may be reordered.
50 * If it's called on the same region of memory simultaneously, the effect
51 * may be that only one operation succeeds.
53 static __inline__ void
54 __set_bit (int nr, volatile void *addr)
56 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
60 * clear_bit() has "acquire" semantics.
62 #define smp_mb__before_clear_bit() smp_mb()
63 #define smp_mb__after_clear_bit() do { /* skip */; } while (0)
66 * clear_bit - Clears a bit in memory
68 * @addr: Address to start counting from
70 * clear_bit() is atomic and may not be reordered. However, it does
71 * not contain a memory barrier, so if it is used for locking purposes,
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors.
75 static __inline__ void
76 clear_bit (int nr, volatile void *addr)
82 m = (volatile __u32 *) addr + (nr >> 5);
83 mask = ~(1 << (nr & 31));
88 } while (cmpxchg_acq(m, old, new) != old);
92 * change_bit - Toggle a bit in memory
94 * @addr: Address to start counting from
96 * change_bit() is atomic and may not be reordered.
97 * Note that @nr may be almost arbitrarily large; this function is not
98 * restricted to acting on a single-word quantity.
100 static __inline__ void
101 change_bit (int nr, volatile void *addr)
105 CMPXCHG_BUGCHECK_DECL
107 m = (volatile __u32 *) addr + (nr >> 5);
108 bit = (1 << (nr & 31));
113 } while (cmpxchg_acq(m, old, new) != old);
117 * __change_bit - Toggle a bit in memory
118 * @nr: the bit to set
119 * @addr: the address to start counting from
121 * Unlike change_bit(), this function is non-atomic and may be reordered.
122 * If it's called on the same region of memory simultaneously, the effect
123 * may be that only one operation succeeds.
125 static __inline__ void
126 __change_bit (int nr, volatile void *addr)
128 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
132 * test_and_set_bit - Set a bit and return its old value
134 * @addr: Address to count from
136 * This operation is atomic and cannot be reordered.
137 * It also implies a memory barrier.
139 static __inline__ int
140 test_and_set_bit (int nr, volatile void *addr)
144 CMPXCHG_BUGCHECK_DECL
146 m = (volatile __u32 *) addr + (nr >> 5);
147 bit = 1 << (nr & 31);
152 } while (cmpxchg_acq(m, old, new) != old);
153 return (old & bit) != 0;
157 * __test_and_set_bit - Set a bit and return its old value
159 * @addr: Address to count from
161 * This operation is non-atomic and can be reordered.
162 * If two examples of this operation race, one can appear to succeed
163 * but actually fail. You must protect multiple accesses with a lock.
165 static __inline__ int
166 __test_and_set_bit (int nr, volatile void *addr)
168 __u32 *p = (__u32 *) addr + (nr >> 5);
169 __u32 m = 1 << (nr & 31);
170 int oldbitset = (*p & m) != 0;
177 * test_and_clear_bit - Clear a bit and return its old value
179 * @addr: Address to count from
181 * This operation is atomic and cannot be reordered.
182 * It also implies a memory barrier.
184 static __inline__ int
185 test_and_clear_bit (int nr, volatile void *addr)
187 __u32 mask, old, new;
189 CMPXCHG_BUGCHECK_DECL
191 m = (volatile __u32 *) addr + (nr >> 5);
192 mask = ~(1 << (nr & 31));
197 } while (cmpxchg_acq(m, old, new) != old);
198 return (old & ~mask) != 0;
202 * __test_and_clear_bit - Clear a bit and return its old value
204 * @addr: Address to count from
206 * This operation is non-atomic and can be reordered.
207 * If two examples of this operation race, one can appear to succeed
208 * but actually fail. You must protect multiple accesses with a lock.
210 static __inline__ int
211 __test_and_clear_bit(int nr, volatile void * addr)
213 __u32 *p = (__u32 *) addr + (nr >> 5);
214 __u32 m = 1 << (nr & 31);
215 int oldbitset = *p & m;
222 * test_and_change_bit - Change a bit and return its new value
224 * @addr: Address to count from
226 * This operation is atomic and cannot be reordered.
227 * It also implies a memory barrier.
229 static __inline__ int
230 test_and_change_bit (int nr, volatile void *addr)
234 CMPXCHG_BUGCHECK_DECL
236 m = (volatile __u32 *) addr + (nr >> 5);
237 bit = (1 << (nr & 31));
242 } while (cmpxchg_acq(m, old, new) != old);
243 return (old & bit) != 0;
247 * WARNING: non atomic version.
249 static __inline__ int
250 __test_and_change_bit (int nr, void *addr)
252 __u32 old, bit = (1 << (nr & 31));
253 __u32 *m = (__u32 *) addr + (nr >> 5);
257 return (old & bit) != 0;
260 static __inline__ int
261 test_bit (int nr, volatile void *addr)
263 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
267 * ffz - find the first zero bit in a memory region
268 * @x: The address to start the search at
270 * Returns the bit-number (0..63) of the first (least significant) zero bit, not
271 * the number of the byte containing a bit. Undefined if no zero exists, so
272 * code should check against ~0UL first...
274 static inline unsigned long
275 ffz (unsigned long x)
277 unsigned long result;
279 __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1)));
286 * find_last_zero_bit - find the last zero bit in a 64 bit quantity
287 * @x: The value to search
289 static inline unsigned long
290 ia64_fls (unsigned long x)
295 __asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d));
300 * ffs: find first bit set. This is defined the same way as the libc and compiler builtin
301 * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on
302 * "int" values only and the result value is the bit number + 1. ffs(0) is defined to
305 #define ffs(x) __builtin_ffs(x)
308 * hweightN: returns the hamming weight (i.e. the number
309 * of bits set) of a N-bit word
311 static __inline__ unsigned long
312 hweight64 (unsigned long x)
314 unsigned long result;
315 __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x));
319 #define hweight32(x) hweight64 ((x) & 0xfffffffful)
320 #define hweight16(x) hweight64 ((x) & 0xfffful)
321 #define hweight8(x) hweight64 ((x) & 0xfful)
323 #endif /* __KERNEL__ */
326 * Find next zero bit in a bitmap reasonably efficiently..
328 static inline unsigned long
329 find_next_zero_bit (void *addr, unsigned long size, unsigned long offset)
331 unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
332 unsigned long result = offset & ~63UL;
341 tmp |= ~0UL >> (64-offset);
349 while (size & ~63UL) {
360 if (tmp == ~0UL) /* any bits zero? */
361 return result + size; /* nope */
363 return result + ffz(tmp);
367 * The optimizer actually does good code for this case..
369 #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
373 #define ext2_set_bit test_and_set_bit
374 #define ext2_clear_bit test_and_clear_bit
375 #define ext2_test_bit test_bit
376 #define ext2_find_first_zero_bit find_first_zero_bit
377 #define ext2_find_next_zero_bit find_next_zero_bit
379 /* Bitmap functions for the minix filesystem. */
380 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
381 #define minix_set_bit(nr,addr) set_bit(nr,addr)
382 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
383 #define minix_test_bit(nr,addr) test_bit(nr,addr)
384 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
386 #endif /* __KERNEL__ */
388 #endif /* _ASM_IA64_BITOPS_H */