2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 Linus Torvalds
7 * Copyright (C) 1998, 99, 2000, 01 Ralf Baechle
8 * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
11 #ifndef _ASM_SEMAPHORE_H
12 #define _ASM_SEMAPHORE_H
14 #include <linux/compiler.h>
15 #include <linux/config.h>
16 #include <linux/spinlock.h>
17 #include <linux/wait.h>
18 #include <linux/rwsem.h>
19 #include <asm/atomic.h>
29 wait_queue_head_t wait;
33 } __attribute__((aligned(8)));
36 # define __SEM_DEBUG_INIT(name) , .__magic = (long)&(name).__magic
38 # define __SEM_DEBUG_INIT(name)
41 #define __SEMAPHORE_INITIALIZER(name,_count) { \
42 .count = ATOMIC_INIT(_count), \
43 .waking = ATOMIC_INIT(0), \
44 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
45 __SEM_DEBUG_INIT(name) \
48 #define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name, 1)
50 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
51 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
53 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
54 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
56 static inline void sema_init (struct semaphore *sem, int val)
58 atomic_set(&sem->count, val);
59 atomic_set(&sem->waking, 0);
60 init_waitqueue_head(&sem->wait);
62 sem->__magic = (long)&sem->__magic;
66 static inline void init_MUTEX (struct semaphore *sem)
71 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
76 #ifndef CONFIG_CPU_HAS_LLDSCD
78 * On machines without lld/scd we need a spinlock to make the manipulation of
79 * sem->count and sem->waking atomic.
81 extern spinlock_t semaphore_lock;
84 extern void __down_failed(struct semaphore * sem);
85 extern int __down_failed_interruptible(struct semaphore * sem);
86 extern void __up_wakeup(struct semaphore * sem);
88 static inline void down(struct semaphore * sem)
93 CHECK_MAGIC(sem->__magic);
95 count = atomic_dec_return(&sem->count);
96 if (unlikely(count < 0))
101 * Interruptible try to acquire a semaphore. If we obtained
102 * it, return zero. If we were interrupted, returns -EINTR
104 static inline int down_interruptible(struct semaphore * sem)
109 CHECK_MAGIC(sem->__magic);
111 count = atomic_dec_return(&sem->count);
112 if (unlikely(count < 0))
113 return __down_failed_interruptible(sem);
118 #ifdef CONFIG_CPU_HAS_LLDSCD
121 * down_trylock returns 0 on success, 1 if we failed to get the lock.
123 * We must manipulate count and waking simultaneously and atomically.
124 * Here, we do this by using lld/scd on the pair of 32-bit words.
128 * Decrement(sem->count)
129 * If(sem->count >=0) {
130 * Return(SUCCESS) // resource is free
132 * If(sem->waking <= 0) { // if no wakeup pending
133 * Increment(sem->count) // undo decrement
136 * Decrement(sem->waking) // otherwise "steal" wakeup
141 static inline int down_trylock(struct semaphore * sem)
143 long ret, tmp, tmp2, sub;
146 CHECK_MAGIC(sem->__magic);
149 __asm__ __volatile__(
150 " .set mips3 # down_trylock \n"
152 " dli %3, 0x0000000100000000 # count -= 1 \n"
154 " li %0, 0 # ret = 0 \n"
155 " bgez %1, 2f # if count >= 0 \n"
156 " sll %2, %1, 0 # extract waking \n"
157 " blez %2, 1f # if waking < 0 -> 1f \n"
158 " daddiu %1, %1, -1 # waking -= 1 \n"
160 "1: daddu %1, %1, %3 # count += 1 \n"
161 " li %0, 1 # ret = 1 \n"
166 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
174 * Note! This is subtle. We jump to wake people up only if
175 * the semaphore was negative (== somebody was waiting on it).
177 static inline void up(struct semaphore * sem)
179 unsigned long tmp, tmp2;
183 CHECK_MAGIC(sem->__magic);
186 * We must manipulate count and waking simultaneously and atomically.
187 * Otherwise we have races between up and __down_failed_interruptible
188 * waking up on a signal.
191 __asm__ __volatile__(
195 " dsra32 %0, %1, 0 # extract count to %0 \n"
196 " daddiu %0, 1 # count += 1 \n"
197 " slti %2, %0, 1 # %3 = (%0 <= 0) \n"
198 " daddu %1, %2 # waking += %3 \n"
199 " dsll32 %1, %1, 0 # zero-extend %1 \n"
200 " dsrl32 %1, %1, 0 \n"
201 " dsll32 %2, %0, 0 # Reassemble union \n"
202 " or %1, %2 # from count and waking \n"
206 : "=&r"(count), "=&r"(tmp), "=&r"(tmp2), "+m"(*sem)
210 if (unlikely(count <= 0))
217 * Non-blockingly attempt to down() a semaphore.
218 * Returns zero if we acquired it
220 static inline int down_trylock(struct semaphore * sem)
227 CHECK_MAGIC(sem->__magic);
230 spin_lock_irqsave(&semaphore_lock, flags);
231 count = atomic_read(&sem->count) - 1;
232 atomic_set(&sem->count, count);
233 if (unlikely(count < 0)) {
234 waking = atomic_read(&sem->waking);
236 atomic_set(&sem->count, count + 1);
239 atomic_set(&sem->waking, waking - 1);
243 spin_unlock_irqrestore(&semaphore_lock, flags);
249 * Note! This is subtle. We jump to wake people up only if
250 * the semaphore was negative (== somebody was waiting on it).
252 static inline void up(struct semaphore * sem)
258 CHECK_MAGIC(sem->__magic);
261 * We must manipulate count and waking simultaneously and atomically.
262 * Otherwise we have races between up and __down_failed_interruptible
263 * waking up on a signal.
266 spin_lock_irqsave(&semaphore_lock, flags);
267 count = atomic_read(&sem->count) + 1;
268 waking = atomic_read(&sem->waking);
271 atomic_set(&sem->count, count);
272 atomic_set(&sem->waking, waking);
273 spin_unlock_irqrestore(&semaphore_lock, flags);
275 if (unlikely(count <= 0))
279 #endif /* CONFIG_CPU_HAS_LLDSCD */
281 static inline int sem_getcount(struct semaphore *sem)
283 return atomic_read(&sem->count);
286 #endif /* _ASM_SEMAPHORE_H */