From 2856f5e31c1413bf6e4f1371e07e17078a5fee5e Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 May 2007 00:34:38 -0700 Subject: [PATCH] atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency atomic_add_unless as inline. Remove system.h atomic.h circular dependency. I agree (with Andi Kleen) this typeof is not needed and more error prone. All the original atomic.h code that uses cmpxchg (which includes the atomic_add_unless) uses defines instead of inline functions, probably to circumvent a circular dependency between system.h and atomic.h on powerpc (which my patch addresses). Therefore, it makes sense to use inline functions that will provide type checking. atomic_add_unless as inline. Remove system.h atomic.h circular dependency. Digging into the FRV architecture shows me that it is also affected by such a circular dependency. Here is the diff applying this against the rest of my atomic.h patches. It applies over the atomic.h standardization patches. Signed-off-by: Mathieu Desnoyers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-alpha/atomic.h | 59 +++++++++++----------- include/asm-arm/atomic.h | 1 + include/asm-arm26/atomic.h | 1 - include/asm-frv/atomic.h | 91 ++++++---------------------------- include/asm-frv/system.h | 70 +++++++++++++++++++++++++- include/asm-generic/atomic.h | 17 +++++-- include/asm-i386/atomic.h | 29 +++++------ include/asm-ia64/atomic.h | 59 +++++++++++----------- include/asm-m32r/atomic.h | 23 ++++++--- include/asm-m68k/atomic.h | 31 ++++++------ include/asm-m68knommu/atomic.h | 25 ++++++---- include/asm-mips/atomic.h | 46 +++++++++++------ include/asm-parisc/atomic.h | 47 ++++++++++++------ include/asm-powerpc/atomic.h | 1 + include/asm-ppc/system.h | 1 - include/asm-sparc64/atomic.h | 59 +++++++++++----------- include/asm-x86_64/atomic.h | 59 +++++++++++----------- include/asm-xtensa/atomic.h | 23 ++++++--- 18 files changed, 360 insertions(+), 282 deletions(-) diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 7b4fba88cb..f5cb7b878a 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -2,6 +2,7 @@ #define _ALPHA_ATOMIC_H #include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -190,20 +191,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /** @@ -215,20 +217,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic64_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic64_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index f266c27951..3b59f94b5a 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -12,6 +12,7 @@ #define __ASM_ARM_ATOMIC_H #include +#include typedef struct { volatile int counter; } atomic_t; diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h index 97e944fe1c..d6dd42374c 100644 --- a/include/asm-arm26/atomic.h +++ b/include/asm-arm26/atomic.h @@ -20,7 +20,6 @@ #ifndef __ASM_ARM_ATOMIC_H #define __ASM_ARM_ATOMIC_H - #ifdef CONFIG_SMP #error SMP is NOT supported #endif diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index 066386ac23..d425d8d0ad 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h @@ -16,6 +16,7 @@ #include #include +#include #ifdef CONFIG_SMP #error not SMP safe @@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v); #define tas(ptr) (xchg((ptr), 1)) -/*****************************************************************************/ -/* - * compare and conditionally exchange value with memory - * - if (*ptr == test) then orig = *ptr; *ptr = test; - * - if (*ptr != test) then orig = *ptr; - */ -#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS - -#define cmpxchg(ptr, test, new) \ -({ \ - __typeof__(ptr) __xg_ptr = (ptr); \ - __typeof__(*(ptr)) __xg_orig, __xg_tmp; \ - __typeof__(*(ptr)) __xg_test = (test); \ - __typeof__(*(ptr)) __xg_new = (new); \ - \ - switch (sizeof(__xg_orig)) { \ - case 4: \ - asm volatile( \ - "0: \n" \ - " orcc gr0,gr0,gr0,icc3 \n" \ - " ckeq icc3,cc7 \n" \ - " ld.p %M0,%1 \n" \ - " orcr cc7,cc7,cc3 \n" \ - " sub%I4cc %1,%4,%2,icc0 \n" \ - " bne icc0,#0,1f \n" \ - " cst.p %3,%M0 ,cc3,#1 \n" \ - " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ - " beq icc3,#0,0b \n" \ - "1: \n" \ - : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ - : "r"(__xg_new), "NPr"(__xg_test) \ - : "memory", "cc7", "cc3", "icc3", "icc0" \ - ); \ - break; \ - \ - default: \ - __xg_orig = 0; \ - asm volatile("break"); \ - break; \ - } \ - \ - __xg_orig; \ -}) - -#else - -extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); - -#define cmpxchg(ptr, test, new) \ -({ \ - __typeof__(ptr) __xg_ptr = (ptr); \ - __typeof__(*(ptr)) __xg_orig; \ - __typeof__(*(ptr)) __xg_test = (test); \ - __typeof__(*(ptr)) __xg_new = (new); \ - \ - switch (sizeof(__xg_orig)) { \ - case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \ - default: \ - __xg_orig = 0; \ - asm volatile("break"); \ - break; \ - } \ - \ - __xg_orig; \ -}) - -#endif - #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h index 1166899317..be303b3eef 100644 --- a/include/asm-frv/system.h +++ b/include/asm-frv/system.h @@ -13,7 +13,6 @@ #define _ASM_SYSTEM_H #include -#include struct thread_struct; @@ -197,4 +196,73 @@ extern void free_initmem(void); #define arch_align_stack(x) (x) +/*****************************************************************************/ +/* + * compare and conditionally exchange value with memory + * - if (*ptr == test) then orig = *ptr; *ptr = test; + * - if (*ptr != test) then orig = *ptr; + */ +#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS + +#define cmpxchg(ptr, test, new) \ +({ \ + __typeof__(ptr) __xg_ptr = (ptr); \ + __typeof__(*(ptr)) __xg_orig, __xg_tmp; \ + __typeof__(*(ptr)) __xg_test = (test); \ + __typeof__(*(ptr)) __xg_new = (new); \ + \ + switch (sizeof(__xg_orig)) { \ + case 4: \ + asm volatile( \ + "0: \n" \ + " orcc gr0,gr0,gr0,icc3 \n" \ + " ckeq icc3,cc7 \n" \ + " ld.p %M0,%1 \n" \ + " orcr cc7,cc7,cc3 \n" \ + " sub%I4cc %1,%4,%2,icc0 \n" \ + " bne icc0,#0,1f \n" \ + " cst.p %3,%M0 ,cc3,#1 \n" \ + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ + " beq icc3,#0,0b \n" \ + "1: \n" \ + : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ + : "r"(__xg_new), "NPr"(__xg_test) \ + : "memory", "cc7", "cc3", "icc3", "icc0" \ + ); \ + break; \ + \ + default: \ + __xg_orig = 0; \ + asm volatile("break"); \ + break; \ + } \ + \ + __xg_orig; \ +}) + +#else + +extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); + +#define cmpxchg(ptr, test, new) \ +({ \ + __typeof__(ptr) __xg_ptr = (ptr); \ + __typeof__(*(ptr)) __xg_orig; \ + __typeof__(*(ptr)) __xg_test = (test); \ + __typeof__(*(ptr)) __xg_new = (new); \ + \ + switch (sizeof(__xg_orig)) { \ + case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \ + default: \ + __xg_orig = 0; \ + asm volatile("break"); \ + break; \ + } \ + \ + __xg_orig; \ +}) + +#endif + + #endif /* _ASM_SYSTEM_H */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 5ae6dce1cb..85fd0aa27a 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -9,7 +9,6 @@ */ #include -#include /* * Suppport for atomic_long_t @@ -123,8 +122,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l) return (long)atomic64_dec_return(v); } -#define atomic_long_add_unless(l, a, u) \ - atomic64_add_unless((atomic64_t *)(l), (a), (u)) +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_unless(v, a, u); +} #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) @@ -236,8 +239,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l) return (long)atomic_dec_return(v); } -#define atomic_long_add_unless(l, a, u) \ - atomic_add_unless((atomic_t *)(l), (a), (u)) +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_add_unless(v, a, u); +} #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 0893511320..ff90c6e3fc 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -219,20 +219,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_return(v) (atomic_add_return(1,v)) diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index b16ad235c7..1fc3b83325 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -15,6 +15,7 @@ #include #include +#include /* * On IA-64, counter must always be volatile to ensure that that the @@ -95,36 +96,38 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__(v->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__(v->counter) c, old; \ - c = atomic64_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic64_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic_add_return(i,v) \ diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h index f5a7d7301c..3a38ffe4a4 100644 --- a/include/asm-m32r/atomic.h +++ b/include/asm-m32r/atomic.h @@ -253,14 +253,21 @@ static __inline__ int atomic_dec_return(atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index d5eed64cb8..4915294fea 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -2,7 +2,7 @@ #define __ARCH_M68K_ATOMIC__ -#include /* local_irq_XXX() */ +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); } -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /* Atomic operations are already serializing */ diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h index 6c4e4b63e4..d5632a305d 100644 --- a/include/asm-m68knommu/atomic.h +++ b/include/asm-m68knommu/atomic.h @@ -1,7 +1,7 @@ #ifndef __ARCH_M68KNOMMU_ATOMIC__ #define __ARCH_M68KNOMMU_ATOMIC__ -#include /* local_irq_XXX() */ +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -131,14 +131,21 @@ static inline int atomic_sub_return(int i, atomic_t * v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_dec_return(v) atomic_sub_return(1,(v)) diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 6423ffa195..62daa746a9 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -18,6 +18,7 @@ #include #include #include +#include typedef struct { volatile int counter; } atomic_t; @@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_dec_return(v) atomic_sub_return(1,(v)) @@ -694,14 +701,21 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index 66a0edbb51..e894ee3507 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h @@ -6,6 +6,7 @@ #define _ASM_PARISC_ATOMIC_H_ #include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -174,14 +175,21 @@ static __inline__ int atomic_read(const atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) @@ -283,14 +291,21 @@ atomic64_read(const atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic64_read(v); \ - while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #endif /* CONFIG_64BIT */ diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 438a7fcfba..c44810b9d3 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -11,6 +11,7 @@ typedef struct { volatile int counter; } atomic_t; #include #include #include +#include #define ATOMIC_INIT(i) { (i) } diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index 738943584c..56abe5e9e1 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -6,7 +6,6 @@ #include -#include #include /* diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index c3feb3af2c..3fb4e1f7f1 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -9,6 +9,7 @@ #define __ARCH_SPARC64_ATOMIC__ #include +#include typedef struct { volatile int counter; } atomic_t; typedef struct { volatile __s64 counter; } atomic64_t; @@ -73,40 +74,42 @@ extern int atomic64_sub_ret(int, atomic64_t *); #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - likely(c != (u)); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic64_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic64_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - likely(c != (u)); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) /* Atomic operations are already serializing */ diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 80e4fdbe22..19e0c607b5 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -2,6 +2,7 @@ #define __ARCH_X86_64_ATOMIC__ #include +#include /* atomic_t should be 32 bit signed type */ @@ -403,20 +404,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /** @@ -428,20 +430,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic64_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic64_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) /* These are x86-specific, used by some header files */ diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index 5c26720210..b3b23540f1 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -234,14 +234,21 @@ static inline int atomic_sub_return(int i, atomic_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -- 2.20.1