5 * include/asm-s390/bitops.h
8 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
11 * Derived from "include/asm-i386/bitops.h"
12 * Copyright (C) 1992, Linus Torvalds
15 #include <linux/config.h>
18 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
19 * bit 32 is the LSB of *(addr+4). That combined with the
20 * big endian byte order on S390 give the following bit
22 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
23 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
24 * after that follows the next long with bit numbers
25 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
26 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
27 * The reason for this bit ordering is the fact that
28 * in the architecture independent code bits operations
29 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
30 * with operation of the form "set_bit(bitnr, flags)".
33 /* set ALIGN_CS to 1 if the SMP safe bit operations should
34 * align the address to 4 byte boundary. It seems to work
35 * without the alignment.
42 #error "bitops won't work without CONFIG_SMP"
46 /* bitmap tables from arch/S390/kernel/bitmap.S */
47 extern const char _oi_bitmap[];
48 extern const char _ni_bitmap[];
49 extern const char _zb_findmap[];
53 * SMP save set_bit routine based on compare and swap (CS)
55 static __inline__ void set_bit_cs(int nr, volatile void * addr)
57 unsigned long bits, mask;
60 " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
61 " nr %2,%1\n" /* isolate last 2 bits of address */
62 " xr %1,%2\n" /* make addr % 4 == 0 */
64 " ar %0,%2\n" /* add alignement to bitnr */
67 " nr %2,%0\n" /* make shift value */
71 " la %1,0(%0,%1)\n" /* calc. address for CS */
72 " sll %3,0(%2)\n" /* make OR mask */
74 "0: lr %2,%0\n" /* CS loop starts here */
75 " or %2,%3\n" /* set bit */
78 : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
83 * SMP save clear_bit routine based on compare and swap (CS)
85 static __inline__ void clear_bit_cs(int nr, volatile void * addr)
87 static const int minusone = -1;
88 unsigned long bits, mask;
91 " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
92 " nr %2,%1\n" /* isolate last 2 bits of address */
93 " xr %1,%2\n" /* make addr % 4 == 0 */
95 " ar %0,%2\n" /* add alignement to bitnr */
98 " nr %2,%0\n" /* make shift value */
102 " la %1,0(%0,%1)\n" /* calc. address for CS */
104 " x %3,%4\n" /* make AND mask */
106 "0: lr %2,%0\n" /* CS loop starts here */
107 " nr %2,%3\n" /* clear bit */
110 : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask)
111 : "m" (minusone) : "cc", "memory" );
115 * SMP save change_bit routine based on compare and swap (CS)
117 static __inline__ void change_bit_cs(int nr, volatile void * addr)
119 unsigned long bits, mask;
120 __asm__ __volatile__(
122 " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
123 " nr %2,%1\n" /* isolate last 2 bits of address */
124 " xr %1,%2\n" /* make addr % 4 == 0 */
126 " ar %0,%2\n" /* add alignement to bitnr */
129 " nr %2,%0\n" /* make shift value */
133 " la %1,0(%0,%1)\n" /* calc. address for CS */
134 " sll %3,0(%2)\n" /* make XR mask */
136 "0: lr %2,%0\n" /* CS loop starts here */
137 " xr %2,%3\n" /* change bit */
140 : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
145 * SMP save test_and_set_bit routine based on compare and swap (CS)
147 static __inline__ int test_and_set_bit_cs(int nr, volatile void * addr)
149 unsigned long bits, mask;
150 __asm__ __volatile__(
152 " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
153 " nr %2,%1\n" /* isolate last 2 bits of address */
154 " xr %1,%2\n" /* make addr % 4 == 0 */
156 " ar %0,%2\n" /* add alignement to bitnr */
159 " nr %2,%0\n" /* make shift value */
163 " la %1,0(%0,%1)\n" /* calc. address for CS */
164 " sll %3,0(%2)\n" /* make OR mask */
166 "0: lr %2,%0\n" /* CS loop starts here */
167 " or %2,%3\n" /* set bit */
170 " nr %0,%3\n" /* isolate old bit */
171 : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
177 * SMP save test_and_clear_bit routine based on compare and swap (CS)
179 static __inline__ int test_and_clear_bit_cs(int nr, volatile void * addr)
181 static const int minusone = -1;
182 unsigned long bits, mask;
183 __asm__ __volatile__(
185 " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
186 " nr %2,%1\n" /* isolate last 2 bits of address */
187 " xr %1,%2\n" /* make addr % 4 == 0 */
189 " ar %0,%2\n" /* add alignement to bitnr */
192 " nr %2,%0\n" /* make shift value */
196 " la %1,0(%0,%1)\n" /* calc. address for CS */
199 " x %3,%4\n" /* make AND mask */
200 "0: lr %2,%0\n" /* CS loop starts here */
201 " nr %2,%3\n" /* clear bit */
205 " nr %0,%3\n" /* isolate old bit */
206 : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask)
207 : "m" (minusone) : "cc", "memory" );
212 * SMP save test_and_change_bit routine based on compare and swap (CS)
214 static __inline__ int test_and_change_bit_cs(int nr, volatile void * addr)
216 unsigned long bits, mask;
217 __asm__ __volatile__(
219 " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
220 " nr %2,%1\n" /* isolate last 2 bits of address */
221 " xr %1,%2\n" /* make addr % 4 == 0 */
223 " ar %0,%2\n" /* add alignement to bitnr */
226 " nr %2,%0\n" /* make shift value */
230 " la %1,0(%0,%1)\n" /* calc. address for CS */
231 " sll %3,0(%2)\n" /* make OR mask */
233 "0: lr %2,%0\n" /* CS loop starts here */
234 " xr %2,%3\n" /* change bit */
237 " nr %0,%3\n" /* isolate old bit */
238 : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
242 #endif /* CONFIG_SMP */
245 * fast, non-SMP set_bit routine
247 static __inline__ void __set_bit(int nr, volatile void * addr)
249 unsigned long reg1, reg2;
250 __asm__ __volatile__(
259 : "=&a" (reg1), "=&a" (reg2)
260 : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
263 static __inline__ void
264 __constant_set_bit(const int nr, volatile void * addr)
268 __asm__ __volatile__ ("la 1,%0\n\t"
270 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
271 : : "1", "cc", "memory");
274 __asm__ __volatile__ ("la 1,%0\n\t"
276 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
277 : : "1", "cc", "memory" );
280 __asm__ __volatile__ ("la 1,%0\n\t"
282 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
283 : : "1", "cc", "memory" );
286 __asm__ __volatile__ ("la 1,%0\n\t"
288 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
289 : : "1", "cc", "memory" );
292 __asm__ __volatile__ ("la 1,%0\n\t"
294 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
295 : : "1", "cc", "memory" );
298 __asm__ __volatile__ ("la 1,%0\n\t"
300 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
301 : : "1", "cc", "memory" );
304 __asm__ __volatile__ ("la 1,%0\n\t"
306 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
307 : : "1", "cc", "memory" );
310 __asm__ __volatile__ ("la 1,%0\n\t"
312 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
313 : : "1", "cc", "memory" );
318 #define set_bit_simple(nr,addr) \
319 (__builtin_constant_p((nr)) ? \
320 __constant_set_bit((nr),(addr)) : \
321 __set_bit((nr),(addr)) )
324 * fast, non-SMP clear_bit routine
326 static __inline__ void
327 __clear_bit(int nr, volatile void * addr)
329 unsigned long reg1, reg2;
330 __asm__ __volatile__(
339 : "=&a" (reg1), "=&a" (reg2)
340 : "r" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
343 static __inline__ void
344 __constant_clear_bit(const int nr, volatile void * addr)
348 __asm__ __volatile__ ("la 1,%0\n\t"
350 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
351 : : "1", "cc", "memory" );
354 __asm__ __volatile__ ("la 1,%0\n\t"
356 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
357 : : "1", "cc", "memory" );
360 __asm__ __volatile__ ("la 1,%0\n\t"
362 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
363 : : "1", "cc", "memory" );
366 __asm__ __volatile__ ("la 1,%0\n\t"
368 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
369 : : "1", "cc", "memory" );
372 __asm__ __volatile__ ("la 1,%0\n\t"
374 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
375 : : "cc", "memory" );
378 __asm__ __volatile__ ("la 1,%0\n\t"
380 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
381 : : "1", "cc", "memory" );
384 __asm__ __volatile__ ("la 1,%0\n\t"
386 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
387 : : "1", "cc", "memory" );
390 __asm__ __volatile__ ("la 1,%0\n\t"
392 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
393 : : "1", "cc", "memory" );
398 #define clear_bit_simple(nr,addr) \
399 (__builtin_constant_p((nr)) ? \
400 __constant_clear_bit((nr),(addr)) : \
401 __clear_bit((nr),(addr)) )
404 * fast, non-SMP change_bit routine
406 static __inline__ void __change_bit(int nr, volatile void * addr)
408 unsigned long reg1, reg2;
409 __asm__ __volatile__(
418 : "=&a" (reg1), "=&a" (reg2)
419 : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
422 static __inline__ void
423 __constant_change_bit(const int nr, volatile void * addr)
427 __asm__ __volatile__ ("la 1,%0\n\t"
429 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
430 : : "cc", "memory" );
433 __asm__ __volatile__ ("la 1,%0\n\t"
435 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
436 : : "cc", "memory" );
439 __asm__ __volatile__ ("la 1,%0\n\t"
441 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
442 : : "cc", "memory" );
445 __asm__ __volatile__ ("la 1,%0\n\t"
447 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
448 : : "cc", "memory" );
451 __asm__ __volatile__ ("la 1,%0\n\t"
453 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
454 : : "cc", "memory" );
457 __asm__ __volatile__ ("la 1,%0\n\t"
459 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
460 : : "1", "cc", "memory" );
463 __asm__ __volatile__ ("la 1,%0\n\t"
465 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
466 : : "1", "cc", "memory" );
469 __asm__ __volatile__ ("la 1,%0\n\t"
471 : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
472 : : "1", "cc", "memory" );
477 #define change_bit_simple(nr,addr) \
478 (__builtin_constant_p((nr)) ? \
479 __constant_change_bit((nr),(addr)) : \
480 __change_bit((nr),(addr)) )
483 * fast, non-SMP test_and_set_bit routine
485 static __inline__ int test_and_set_bit_simple(int nr, volatile void * addr)
487 unsigned long reg1, reg2;
489 __asm__ __volatile__(
500 : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
501 : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
504 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
507 * fast, non-SMP test_and_clear_bit routine
509 static __inline__ int test_and_clear_bit_simple(int nr, volatile void * addr)
511 unsigned long reg1, reg2;
514 __asm__ __volatile__(
525 : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
526 : "r" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
529 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
532 * fast, non-SMP test_and_change_bit routine
534 static __inline__ int test_and_change_bit_simple(int nr, volatile void * addr)
536 unsigned long reg1, reg2;
539 __asm__ __volatile__(
550 : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
551 : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
554 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
557 #define set_bit set_bit_cs
558 #define clear_bit clear_bit_cs
559 #define change_bit change_bit_cs
560 #define test_and_set_bit test_and_set_bit_cs
561 #define test_and_clear_bit test_and_clear_bit_cs
562 #define test_and_change_bit test_and_change_bit_cs
564 #define set_bit set_bit_simple
565 #define clear_bit clear_bit_simple
566 #define change_bit change_bit_simple
567 #define test_and_set_bit test_and_set_bit_simple
568 #define test_and_clear_bit test_and_clear_bit_simple
569 #define test_and_change_bit test_and_change_bit_simple
574 * This routine doesn't need to be atomic.
577 static __inline__ int __test_bit(int nr, volatile void * addr)
579 unsigned long reg1, reg2;
582 __asm__ __volatile__(
590 : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
591 : "r" (nr), "a" (addr) : "cc" );
595 static __inline__ int __constant_test_bit(int nr, volatile void * addr) {
596 return (((volatile char *) addr)[(nr>>3)^3] & (1<<(nr&7))) != 0;
599 #define test_bit(nr,addr) \
600 (__builtin_constant_p((nr)) ? \
601 __constant_test_bit((nr),(addr)) : \
602 __test_bit((nr),(addr)) )
605 * Find-bit routines..
607 static __inline__ int find_first_zero_bit(void * addr, unsigned size)
609 unsigned long cmp, count;
614 __asm__(" lhi %1,-1\n"
640 : "=&a" (res), "=&d" (cmp), "=&a" (count)
641 : "a" (size), "a" (addr), "a" (&_zb_findmap) : "cc" );
642 return (res < size) ? res : size;
645 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
647 unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
648 unsigned long bitvec, reg;
649 int set, bit = offset & 31, res;
653 * Look for zero in first word
655 bitvec = (*p) >> bit;
656 __asm__(" slr %0,%0\n"
669 : "=&d" (set), "+a" (bitvec), "=&d" (reg)
670 : "a" (&_zb_findmap) : "cc" );
671 if (set < (32 - bit))
677 * No zero yet, search remaining full words for a zero
679 res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
680 return (offset + res);
684 * ffz = Find First Zero in word. Undefined if no zero exists,
685 * so code should check against ~0UL first..
687 static __inline__ unsigned long ffz(unsigned long word)
692 __asm__(" slr %0,%0\n"
705 : "=&d" (result), "+a" (word), "=&d" (reg)
706 : "a" (&_zb_findmap) : "cc" );
711 * ffs: find first bit set. This is defined the same way as
712 * the libc and compiler builtin ffs routines, therefore
713 * differs in spirit from the above ffz (man ffs).
716 extern int __inline__ ffs (int x)
722 __asm__(" slr %0,%0\n"
743 : "=&d" (r), "+d" (x) : : "cc" );
748 * hweightN: returns the hamming weight (i.e. the number
749 * of bits set) of a N-bit word
752 #define hweight32(x) generic_hweight32(x)
753 #define hweight16(x) generic_hweight16(x)
754 #define hweight8(x) generic_hweight8(x)
760 * ATTENTION: intel byte ordering convention for ext2 and minix !!
761 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
762 * bit 32 is the LSB of (addr+4).
763 * That combined with the little endian byte order of Intel gives the
764 * following bit order in memory:
765 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
766 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
769 #define ext2_set_bit(nr, addr) test_and_set_bit((nr)^24, addr)
770 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr)^24, addr)
771 #define ext2_test_bit(nr, addr) test_bit((nr)^24, addr)
772 static __inline__ int ext2_find_first_zero_bit(void *vaddr, unsigned size)
774 unsigned long cmp, count;
779 __asm__(" lhi %1,-1\n"
784 "0: cl %1,0(%0,%4)\n"
806 : "=&a" (res), "=&d" (cmp), "=&a" (count)
807 : "a" (size), "a" (vaddr), "a" (&_zb_findmap) : "cc" );
808 return (res < size) ? res : size;
811 static __inline__ int
812 ext2_find_next_zero_bit(void *vaddr, unsigned size, unsigned offset)
814 unsigned long *addr = vaddr;
815 unsigned long *p = addr + (offset >> 5);
816 unsigned long word, reg;
817 int bit = offset & 31UL, res;
823 __asm__(" ic %0,0(%1)\n"
827 : "=&a" (word) : "a" (p) : "cc" );
830 /* Look for zero in first longword */
831 __asm__(" lhi %2,0xff\n"
843 : "+&d" (res), "+&a" (word), "=&d" (reg)
844 : "a" (&_zb_findmap) : "cc" );
846 return (p - addr)*32 + res;
849 /* No zero yet, search remaining full bytes for a zero */
850 res = ext2_find_first_zero_bit (p, size - 32 * (p - addr));
851 return (p - addr) * 32 + res;
854 /* Bitmap functions for the minix filesystem. */
856 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
857 #define minix_set_bit(nr,addr) set_bit(nr,addr)
858 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
859 #define minix_test_bit(nr,addr) test_bit(nr,addr)
860 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
862 #endif /* __KERNEL__ */
864 #endif /* _S390_BITOPS_H */