5 * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
6 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
10 #error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
15 #include <linux/compiler.h>
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
21 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
22 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
23 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
26 * the semaphore definition
30 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
31 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
32 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
33 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
34 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
35 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37 struct list_head wait_list;
44 #define __RWSEM_DEBUG_INIT , 0
46 #define __RWSEM_DEBUG_INIT /* */
49 #define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
51 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
53 #define DECLARE_RWSEM(name) \
54 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
56 static inline void init_rwsem(struct rw_semaphore *sem)
58 sem->count = RWSEM_UNLOCKED_VALUE;
59 spin_lock_init(&sem->wait_lock);
60 INIT_LIST_HEAD(&sem->wait_list);
66 static inline void __down_read(struct rw_semaphore *sem)
70 oldcount = sem->count;
71 sem->count += RWSEM_ACTIVE_READ_BIAS;
83 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
84 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
86 if (__builtin_expect(oldcount < 0, 0))
87 rwsem_down_read_failed(sem);
91 * trylock for reading -- returns 1 if successful, 0 if contention
93 static inline int __down_read_trylock(struct rw_semaphore *sem)
99 new = res + RWSEM_ACTIVE_READ_BIAS;
103 res = cmpxchg(&sem->count, old, new);
104 } while (res != old);
105 return res >= 0 ? 1 : 0;
108 static inline void __down_write(struct rw_semaphore *sem)
112 oldcount = sem->count;
113 sem->count += RWSEM_ACTIVE_WRITE_BIAS;
116 __asm__ __volatile__(
125 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
126 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
128 if (__builtin_expect(oldcount, 0))
129 rwsem_down_write_failed(sem);
133 * trylock for writing -- returns 1 if successful, 0 if contention
135 static inline int __down_write_trylock(struct rw_semaphore *sem)
137 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
138 RWSEM_ACTIVE_WRITE_BIAS);
139 if (ret == RWSEM_UNLOCKED_VALUE)
144 static inline void __up_read(struct rw_semaphore *sem)
148 oldcount = sem->count;
149 sem->count -= RWSEM_ACTIVE_READ_BIAS;
152 __asm__ __volatile__(
161 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
162 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
164 if (__builtin_expect(oldcount < 0, 0))
165 if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
169 static inline void __up_write(struct rw_semaphore *sem)
173 sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
177 __asm__ __volatile__(
187 :"=&r" (count), "=m" (sem->count), "=&r" (temp)
188 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
190 if (__builtin_expect(count, 0))
195 static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
201 __asm__ __volatile__(
209 :"=&r" (temp), "=m" (sem->count)
210 :"Ir" (val), "m" (sem->count));
214 static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
221 __asm__ __volatile__(
230 :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
231 :"Ir" (val), "m" (sem->count));
237 #endif /* __KERNEL__ */
238 #endif /* _ALPHA_RWSEM_H */