X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Fasm-mips%2Fatomic.h;h=62daa746a9c9180101af1e566635778c36d1d269;hb=5351fb106a84d6ac584c2501e3b335093d38a58c;hp=8578869a8bcfb4556d3e50a62312b0c5abc3f8ee;hpb=ef29498655b18d2bfd69048e20835d19333981ab;p=powerpc.git diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 8578869a8b..62daa746a9 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -18,6 +18,7 @@ #include #include #include +#include typedef struct { volatile int counter; } atomic_t; @@ -79,9 +80,9 @@ static __inline__ void atomic_add(int i, atomic_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); v->counter += i; - local_irq_restore(flags); + raw_local_irq_restore(flags); } } @@ -124,9 +125,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); v->counter -= i; - local_irq_restore(flags); + raw_local_irq_restore(flags); } } @@ -173,11 +174,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); result = v->counter; result += i; v->counter = result; - local_irq_restore(flags); + raw_local_irq_restore(flags); } smp_mb(); @@ -225,11 +226,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); result = v->counter; result -= i; v->counter = result; - local_irq_restore(flags); + raw_local_irq_restore(flags); } smp_mb(); @@ -293,12 +294,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); result = v->counter; result -= i; if (result >= 0) v->counter = result; - local_irq_restore(flags); + raw_local_irq_restore(flags); } smp_mb(); @@ -306,8 +307,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) return result; } -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) /** * atomic_add_unless - add unless the number is a given value @@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_dec_return(v) atomic_sub_return(1,(v)) @@ -454,9 +461,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); v->counter += i; - local_irq_restore(flags); + raw_local_irq_restore(flags); } } @@ -499,9 +506,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); v->counter -= i; - local_irq_restore(flags); + raw_local_irq_restore(flags); } } @@ -548,11 +555,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); result = v->counter; result += i; v->counter = result; - local_irq_restore(flags); + raw_local_irq_restore(flags); } smp_mb(); @@ -600,11 +607,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); result = v->counter; result -= i; v->counter = result; - local_irq_restore(flags); + raw_local_irq_restore(flags); } smp_mb(); @@ -668,12 +675,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) } else { unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); result = v->counter; result -= i; if (result >= 0) v->counter = result; - local_irq_restore(flags); + raw_local_irq_restore(flags); } smp_mb(); @@ -681,6 +688,36 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) return result; } +#define atomic64_cmpxchg(v, o, n) \ + (((__typeof__((v)->counter)))cmpxchg(&((v)->counter), (o), (n))) +#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) #define atomic64_inc_return(v) atomic64_add_return(1,(v))