X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Fasm-s390%2Fbitops.h;h=0ddcdba79e4a91b6b04b140810994f31bd5636a8;hb=dd990f16a39d4e615c0b70a0ab50b79b32bfb16d;hp=3628899f48bb34c45a17a45120e4b3acf47f846a;hpb=536ea4e4198eeaa5a73fb734ea675e621609bb7e;p=powerpc.git diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 3628899f48..0ddcdba79e 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -12,7 +12,9 @@ * Copyright (C) 1992, Linus Torvalds * */ -#include + +#ifdef __KERNEL__ + #include /* @@ -51,19 +53,6 @@ * with operation of the form "set_bit(bitnr, flags)". */ -/* set ALIGN_CS to 1 if the SMP safe bit operations should - * align the address to 4 byte boundary. It seems to work - * without the alignment. - */ -#ifdef __KERNEL__ -#define ALIGN_CS 0 -#else -#define ALIGN_CS 1 -#ifndef CONFIG_SMP -#error "bitops won't work without CONFIG_SMP" -#endif -#endif - /* bitmap tables from arch/S390/kernel/bitmap.S */ extern const char _oi_bitmap[]; extern const char _ni_bitmap[]; @@ -122,10 +111,6 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make OR mask */ @@ -142,10 +127,6 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make AND mask */ @@ -162,10 +143,6 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make XOR mask */ @@ -183,10 +160,6 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make OR/test mask */ @@ -206,10 +179,6 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make AND/test mask */ @@ -229,10 +198,6 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make XOR/test mask */ @@ -828,37 +793,12 @@ static inline int sched_find_first_bit(unsigned long *b) return find_first_bit(b, 140); } -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -#define ffs(x) generic_ffs(x) +#include -/* - * fls: find last bit set. - */ -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) +#include +#include -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ -#define hweight64(x) \ -({ \ - unsigned long __x = (x); \ - unsigned int __w; \ - __w = generic_hweight32((unsigned int) __x); \ - __w += generic_hweight32((unsigned int) (__x>>32)); \ - __w; \ -}) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - - -#ifdef __KERNEL__ +#include /* * ATTENTION: intel byte ordering convention for ext2 and minix !! @@ -871,11 +811,11 @@ static inline int sched_find_first_bit(unsigned long *b) */ #define ext2_set_bit(nr, addr) \ - test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) + __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_set_bit_atomic(lock, nr, addr) \ test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_clear_bit(nr, addr) \ - test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) + __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_clear_bit_atomic(lock, nr, addr) \ test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_test_bit(nr, addr) \ @@ -1011,18 +951,7 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) return offset + ext2_find_first_zero_bit(p, size); } -/* Bitmap functions for the minix filesystem. */ -/* FIXME !!! */ -#define minix_test_and_set_bit(nr,addr) \ - test_and_set_bit(nr,(unsigned long *)addr) -#define minix_set_bit(nr,addr) \ - set_bit(nr,(unsigned long *)addr) -#define minix_test_and_clear_bit(nr,addr) \ - test_and_clear_bit(nr,(unsigned long *)addr) -#define minix_test_bit(nr,addr) \ - test_bit(nr,(unsigned long *)addr) -#define minix_find_first_zero_bit(addr,size) \ - find_first_zero_bit(addr,size) +#include #endif /* __KERNEL__ */