1 /* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */
3 /* sparc32 semaphore implementation, based on i386 version */
5 #include <linux/sched.h>
7 #include <asm/semaphore.h>
10 * Semaphores are implemented using a two-way counter:
11 * The "count" variable is decremented for each process
12 * that tries to acquire the semaphore, while the "sleeping"
13 * variable is a count of such acquires.
15 * Notably, the inline "up()" and "down()" functions can
16 * efficiently test if they need to do any extra work (up
17 * needs to do something only if count was negative before
18 * the increment operation.
20 * "sleeping" and the contention routine ordering is
21 * protected by the semaphore spinlock.
23 * Note that these functions are only called when there is
24 * contention on the lock, and as such all this is the
25 * "non-critical" part of the whole semaphore business. The
26 * critical part is the inline stuff in <asm/semaphore.h>
27 * where we want to avoid any extra jumps and calls.
32 * - only on a boundary condition do we need to care. When we go
33 * from a negative count to a non-negative, we wake people up.
34 * - when we go from a non-negative count to a negative do we
35 * (a) synchronize with the "sleeper" count and (b) make sure
36 * that we're on the wakeup list before we synchronize so that
37 * we cannot lose wakeup events.
40 void __up(struct semaphore *sem)
45 static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
47 void __down(struct semaphore * sem)
49 struct task_struct *tsk = current;
50 DECLARE_WAITQUEUE(wait, tsk);
51 tsk->state = TASK_UNINTERRUPTIBLE;
52 add_wait_queue_exclusive(&sem->wait, &wait);
54 spin_lock_irq(&semaphore_lock);
57 int sleepers = sem->sleepers;
60 * Add "everybody else" into it. They aren't
61 * playing, because we own the spinlock.
63 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
67 sem->sleepers = 1; /* us - see -1 above */
68 spin_unlock_irq(&semaphore_lock);
71 tsk->state = TASK_UNINTERRUPTIBLE;
72 spin_lock_irq(&semaphore_lock);
74 spin_unlock_irq(&semaphore_lock);
75 remove_wait_queue(&sem->wait, &wait);
76 tsk->state = TASK_RUNNING;
80 int __down_interruptible(struct semaphore * sem)
83 struct task_struct *tsk = current;
84 DECLARE_WAITQUEUE(wait, tsk);
85 tsk->state = TASK_INTERRUPTIBLE;
86 add_wait_queue_exclusive(&sem->wait, &wait);
88 spin_lock_irq(&semaphore_lock);
91 int sleepers = sem->sleepers;
94 * With signals pending, this turns into
95 * the trylock failure case - we won't be
96 * sleeping, and we* can't get the lock as
97 * it has contention. Just correct the count
100 if (signal_pending(current)) {
103 atomic_add(sleepers, &sem->count);
108 * Add "everybody else" into it. They aren't
109 * playing, because we own the spinlock. The
110 * "-1" is because we're still hoping to get
113 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
117 sem->sleepers = 1; /* us - see -1 above */
118 spin_unlock_irq(&semaphore_lock);
121 tsk->state = TASK_INTERRUPTIBLE;
122 spin_lock_irq(&semaphore_lock);
124 spin_unlock_irq(&semaphore_lock);
125 tsk->state = TASK_RUNNING;
126 remove_wait_queue(&sem->wait, &wait);
132 * Trylock failed - make sure we correct for
133 * having decremented the count.
135 int __down_trylock(struct semaphore * sem)
140 spin_lock_irqsave(&semaphore_lock, flags);
141 sleepers = sem->sleepers + 1;
145 * Add "everybody else" and us into it. They aren't
146 * playing, because we own the spinlock.
148 if (!atomic_add_negative(sleepers, &sem->count))
151 spin_unlock_irqrestore(&semaphore_lock, flags);