2 * i386 semaphore implementation.
4 * (C) Copyright 1999 Linus Torvalds
6 * Portions Copyright 1999 Red Hat, Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@redhat.com>
15 #include <linux/config.h>
16 #include <linux/sched.h>
17 #include <asm/semaphore.h>
20 * Semaphores are implemented using a two-way counter:
21 * The "count" variable is decremented for each process
22 * that tries to acquire the semaphore, while the "sleeping"
23 * variable is a count of such acquires.
25 * Notably, the inline "up()" and "down()" functions can
26 * efficiently test if they need to do any extra work (up
27 * needs to do something only if count was negative before
28 * the increment operation.
30 * "sleeping" and the contention routine ordering is
31 * protected by the semaphore spinlock.
33 * Note that these functions are only called when there is
34 * contention on the lock, and as such all this is the
35 * "non-critical" part of the whole semaphore business. The
36 * critical part is the inline stuff in <asm/semaphore.h>
37 * where we want to avoid any extra jumps and calls.
42 * - only on a boundary condition do we need to care. When we go
43 * from a negative count to a non-negative, we wake people up.
44 * - when we go from a non-negative count to a negative do we
45 * (a) synchronize with the "sleeper" count and (b) make sure
46 * that we're on the wakeup list before we synchronize so that
47 * we cannot lose wakeup events.
50 void __up(struct semaphore *sem)
55 static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
57 void __down(struct semaphore * sem)
59 struct task_struct *tsk = current;
60 DECLARE_WAITQUEUE(wait, tsk);
61 tsk->state = TASK_UNINTERRUPTIBLE;
62 add_wait_queue_exclusive(&sem->wait, &wait);
64 spin_lock_irq(&semaphore_lock);
67 int sleepers = sem->sleepers;
70 * Add "everybody else" into it. They aren't
71 * playing, because we own the spinlock.
73 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
77 sem->sleepers = 1; /* us - see -1 above */
78 spin_unlock_irq(&semaphore_lock);
81 tsk->state = TASK_UNINTERRUPTIBLE;
82 spin_lock_irq(&semaphore_lock);
84 spin_unlock_irq(&semaphore_lock);
85 remove_wait_queue(&sem->wait, &wait);
86 tsk->state = TASK_RUNNING;
90 int __down_interruptible(struct semaphore * sem)
93 struct task_struct *tsk = current;
94 DECLARE_WAITQUEUE(wait, tsk);
95 tsk->state = TASK_INTERRUPTIBLE;
96 add_wait_queue_exclusive(&sem->wait, &wait);
98 spin_lock_irq(&semaphore_lock);
101 int sleepers = sem->sleepers;
104 * With signals pending, this turns into
105 * the trylock failure case - we won't be
106 * sleeping, and we* can't get the lock as
107 * it has contention. Just correct the count
110 if (signal_pending(current)) {
113 atomic_add(sleepers, &sem->count);
118 * Add "everybody else" into it. They aren't
119 * playing, because we own the spinlock. The
120 * "-1" is because we're still hoping to get
123 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
127 sem->sleepers = 1; /* us - see -1 above */
128 spin_unlock_irq(&semaphore_lock);
131 tsk->state = TASK_INTERRUPTIBLE;
132 spin_lock_irq(&semaphore_lock);
134 spin_unlock_irq(&semaphore_lock);
135 tsk->state = TASK_RUNNING;
136 remove_wait_queue(&sem->wait, &wait);
142 * Trylock failed - make sure we correct for
143 * having decremented the count.
145 * We could have done the trylock with a
146 * single "cmpxchg" without failure cases,
147 * but then it wouldn't work on a 386.
149 int __down_trylock(struct semaphore * sem)
154 spin_lock_irqsave(&semaphore_lock, flags);
155 sleepers = sem->sleepers + 1;
159 * Add "everybody else" and us into it. They aren't
160 * playing, because we own the spinlock.
162 if (!atomic_add_negative(sleepers, &sem->count))
165 spin_unlock_irqrestore(&semaphore_lock, flags);
171 * The semaphore operations have a special calling sequence that
172 * allow us to do a simpler in-line version of them. These routines
173 * need to convert that sequence back into the C sequence when
174 * there is contention on the semaphore.
176 * %ecx contains the semaphore pointer on entry. Save the C-clobbered
177 * registers (%eax, %edx and %ecx) except %eax when used as a return
183 ".globl __down_failed\n"
185 #if defined(CONFIG_FRAME_POINTER)
196 #if defined(CONFIG_FRAME_POINTER)
206 ".globl __down_failed_interruptible\n"
207 "__down_failed_interruptible:\n\t"
208 #if defined(CONFIG_FRAME_POINTER)
214 "call __down_interruptible\n\t"
217 #if defined(CONFIG_FRAME_POINTER)
227 ".globl __down_failed_trylock\n"
228 "__down_failed_trylock:\n\t"
229 #if defined(CONFIG_FRAME_POINTER)
235 "call __down_trylock\n\t"
238 #if defined(CONFIG_FRAME_POINTER)
248 ".globl __up_wakeup\n"
261 * rw spinlock fallbacks
263 #if defined(CONFIG_SMP)
267 ".globl __write_lock_failed\n"
268 "__write_lock_failed:\n\t"
269 LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n"
271 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
273 LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
274 "jnz __write_lock_failed\n\t"
281 ".globl __read_lock_failed\n"
282 "__read_lock_failed:\n\t"
287 LOCK "decl (%eax)\n\t"
288 "js __read_lock_failed\n\t"