1 /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
3 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
7 * Trylock by Brian Watson (Brian.J.Watson@compaq.com).
10 * The MSW of the count is the negated number of active writers and waiting
11 * lockers, and the LSW is the total number of active locks
13 * The lock count is initialized to 0 (no active and no waiting lockers).
15 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
16 * uncontended lock. This can be determined because XADD returns the old value.
17 * Readers increment by 1 and see a positive value when uncontended, negative
18 * if there are writers (and maybe) readers waiting (in which case it goes to
21 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
22 * be extended to 65534 by manually checking the whole MSW rather than relying
25 * The value of ACTIVE_BIAS supports up to 65535 active processes.
27 * This should be totally fair - if anything is waiting, a process that wants a
28 * lock will go to the back of the queue. When the currently active lock is
29 * released, if there's a writer at the front of the queue, then that and only
30 * that will be woken up; if there's a bunch of consequtive readers at the
31 * front, then they'll all be woken up, but no other readers will be.
37 #ifndef _LINUX_RWSEM_H
38 #error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
43 #include <linux/list.h>
44 #include <linux/spinlock.h>
48 extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
49 extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
50 extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
53 * the semaphore definition
57 #define RWSEM_UNLOCKED_VALUE 0x00000000
58 #define RWSEM_ACTIVE_BIAS 0x00000001
59 #define RWSEM_ACTIVE_MASK 0x0000ffff
60 #define RWSEM_WAITING_BIAS (-0x00010000)
61 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
62 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
64 struct list_head wait_list;
74 #define __RWSEM_DEBUG_INIT , 0
76 #define __RWSEM_DEBUG_INIT /* */
79 #define __RWSEM_INITIALIZER(name) \
80 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
83 #define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
86 static inline void init_rwsem(struct rw_semaphore *sem)
88 sem->count = RWSEM_UNLOCKED_VALUE;
89 spin_lock_init(&sem->wait_lock);
90 INIT_LIST_HEAD(&sem->wait_list);
99 static inline void __down_read(struct rw_semaphore *sem)
101 __asm__ __volatile__(
102 "# beginning down_read\n\t"
103 LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
104 " js 2f\n\t" /* jump if we weren't granted the lock */
106 LOCK_SECTION_START("")
110 " call rwsem_down_read_failed\n\t"
115 "# ending down_read\n\t"
122 * trylock for reading -- returns 1 if successful, 0 if contention
124 static inline int __down_read_trylock(struct rw_semaphore *sem)
127 __asm__ __volatile__(
128 "# beginning __down_read_trylock\n\t"
134 LOCK_PREFIX " cmpxchgl %2,%0\n\t"
137 "# ending __down_read_trylock\n\t"
138 : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
139 : "i"(RWSEM_ACTIVE_READ_BIAS)
141 return result>=0 ? 1 : 0;
147 static inline void __down_write(struct rw_semaphore *sem)
151 tmp = RWSEM_ACTIVE_WRITE_BIAS;
152 __asm__ __volatile__(
153 "# beginning down_write\n\t"
154 LOCK_PREFIX " xadd %0,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
155 " testl %0,%0\n\t" /* was the count 0 before? */
156 " jnz 2f\n\t" /* jump if we weren't granted the lock */
158 LOCK_SECTION_START("")
161 " call rwsem_down_write_failed\n\t"
165 "# ending down_write"
166 : "+d"(tmp), "+m"(sem->count)
172 * trylock for writing -- returns 1 if successful, 0 if contention
174 static inline int __down_write_trylock(struct rw_semaphore *sem)
176 signed long ret = cmpxchg(&sem->count,
177 RWSEM_UNLOCKED_VALUE,
178 RWSEM_ACTIVE_WRITE_BIAS);
179 if (ret == RWSEM_UNLOCKED_VALUE)
185 * unlock after reading
187 static inline void __up_read(struct rw_semaphore *sem)
189 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
190 __asm__ __volatile__(
191 "# beginning __up_read\n\t"
192 LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
193 " js 2f\n\t" /* jump if the lock is being waited upon */
195 LOCK_SECTION_START("")
197 " decw %%dx\n\t" /* do nothing if still outstanding active readers */
200 " call rwsem_wake\n\t"
204 "# ending __up_read\n"
205 : "+m"(sem->count), "+d"(tmp)
211 * unlock after writing
213 static inline void __up_write(struct rw_semaphore *sem)
215 __asm__ __volatile__(
216 "# beginning __up_write\n\t"
218 LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
219 " jnz 2f\n\t" /* jump if the lock is being waited upon */
221 LOCK_SECTION_START("")
223 " decw %%dx\n\t" /* did the active count reduce to 0? */
224 " jnz 1b\n\t" /* jump back if not */
226 " call rwsem_wake\n\t"
230 "# ending __up_write\n"
232 : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS)
233 : "memory", "cc", "edx");
237 * implement atomic add functionality
239 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
241 __asm__ __volatile__(
242 LOCK_PREFIX "addl %1,%0"
244 :"ir"(delta), "m"(sem->count));
248 * implement exchange and add functionality
250 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
254 __asm__ __volatile__(
255 LOCK_PREFIX "xadd %0,(%2)"
256 : "+r"(tmp), "=m"(sem->count)
257 : "r"(sem), "m"(sem->count)
263 #endif /* __KERNEL__ */
264 #endif /* _I386_RWSEM_H */