1 #ifndef __ARCH_X86_64_ATOMIC__
2 #define __ARCH_X86_64_ATOMIC__
4 #include <linux/config.h>
6 /* atomic_t should be 32 bit signed type */
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
14 #define LOCK "lock ; "
20 * Make sure gcc doesn't try to be clever and move things around
21 * on us. We need to use _exactly_ the address the user gave us,
22 * not some alias that contains the same information.
24 typedef struct { volatile int counter; } atomic_t;
26 #define ATOMIC_INIT(i) { (i) }
29 * atomic_read - read atomic variable
30 * @v: pointer of type atomic_t
32 * Atomically reads the value of @v. Note that the guaranteed
33 * useful range of an atomic_t is only 24 bits.
35 #define atomic_read(v) ((v)->counter)
38 * atomic_set - set atomic variable
39 * @v: pointer of type atomic_t
42 * Atomically sets the value of @v to @i. Note that the guaranteed
43 * useful range of an atomic_t is only 24 bits.
45 #define atomic_set(v,i) (((v)->counter) = (i))
48 * atomic_add - add integer to atomic variable
49 * @i: integer value to add
50 * @v: pointer of type atomic_t
52 * Atomically adds @i to @v. Note that the guaranteed useful range
53 * of an atomic_t is only 24 bits.
55 static __inline__ void atomic_add(int i, atomic_t *v)
60 :"ir" (i), "m" (v->counter));
64 * atomic_sub - subtract the atomic variable
65 * @i: integer value to subtract
66 * @v: pointer of type atomic_t
68 * Atomically subtracts @i from @v. Note that the guaranteed
69 * useful range of an atomic_t is only 24 bits.
71 static __inline__ void atomic_sub(int i, atomic_t *v)
76 :"ir" (i), "m" (v->counter));
80 * atomic_sub_and_test - subtract value from variable and test result
81 * @i: integer value to subtract
82 * @v: pointer of type atomic_t
84 * Atomically subtracts @i from @v and returns
85 * true if the result is zero, or false for all
86 * other cases. Note that the guaranteed
87 * useful range of an atomic_t is only 24 bits.
89 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
94 LOCK "subl %2,%0; sete %1"
95 :"=m" (v->counter), "=qm" (c)
96 :"ir" (i), "m" (v->counter) : "memory");
101 * atomic_inc - increment atomic variable
102 * @v: pointer of type atomic_t
104 * Atomically increments @v by 1. Note that the guaranteed
105 * useful range of an atomic_t is only 24 bits.
107 static __inline__ void atomic_inc(atomic_t *v)
109 __asm__ __volatile__(
116 * atomic_dec - decrement atomic variable
117 * @v: pointer of type atomic_t
119 * Atomically decrements @v by 1. Note that the guaranteed
120 * useful range of an atomic_t is only 24 bits.
122 static __inline__ void atomic_dec(atomic_t *v)
124 __asm__ __volatile__(
131 * atomic_dec_and_test - decrement and test
132 * @v: pointer of type atomic_t
134 * Atomically decrements @v by 1 and
135 * returns true if the result is 0, or false for all other
136 * cases. Note that the guaranteed
137 * useful range of an atomic_t is only 24 bits.
139 static __inline__ int atomic_dec_and_test(atomic_t *v)
143 __asm__ __volatile__(
144 LOCK "decl %0; sete %1"
145 :"=m" (v->counter), "=qm" (c)
146 :"m" (v->counter) : "memory");
151 * atomic_inc_and_test - increment and test
152 * @v: pointer of type atomic_t
154 * Atomically increments @v by 1
155 * and returns true if the result is zero, or false for all
156 * other cases. Note that the guaranteed
157 * useful range of an atomic_t is only 24 bits.
159 static __inline__ int atomic_inc_and_test(atomic_t *v)
163 __asm__ __volatile__(
164 LOCK "incl %0; sete %1"
165 :"=m" (v->counter), "=qm" (c)
166 :"m" (v->counter) : "memory");
171 * atomic_add_negative - add and test if negative
172 * @v: pointer of type atomic_t
173 * @i: integer value to add
175 * Atomically adds @i to @v and returns true
176 * if the result is negative, or false when
177 * result is greater than or equal to zero. Note that the guaranteed
178 * useful range of an atomic_t is only 24 bits.
180 static __inline__ int atomic_add_negative(int i, atomic_t *v)
184 __asm__ __volatile__(
185 LOCK "addl %2,%0; sets %1"
186 :"=m" (v->counter), "=qm" (c)
187 :"ir" (i), "m" (v->counter) : "memory");
191 /* These are x86-specific, used by some header files */
192 #define atomic_clear_mask(mask, addr) \
193 __asm__ __volatile__(LOCK "andl %0,%1" \
194 : : "r" (~(mask)),"m" (*addr) : "memory")
196 #define atomic_set_mask(mask, addr) \
197 __asm__ __volatile__(LOCK "orl %0,%1" \
198 : : "r" ((unsigned)mask),"m" (*addr) : "memory")
200 /* Atomic operations are already serializing on x86 */
201 #define smp_mb__before_atomic_dec() barrier()
202 #define smp_mb__after_atomic_dec() barrier()
203 #define smp_mb__before_atomic_inc() barrier()
204 #define smp_mb__after_atomic_inc() barrier()