2 * IA-64 semaphore implementation (derived from x86 version).
4 * Copyright (C) 1999-2000 Hewlett-Packard Co
5 * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
9 * Semaphores are implemented using a two-way counter: The "count"
10 * variable is decremented for each process that tries to acquire the
11 * semaphore, while the "sleepers" variable is a count of such
14 * Notably, the inline "up()" and "down()" functions can efficiently
15 * test if they need to do any extra work (up needs to do something
16 * only if count was negative before the increment operation.
18 * "sleepers" and the contention routine ordering is protected by the
21 * Note that these functions are only called when there is contention
22 * on the lock, and as such all this is the "non-critical" part of the
23 * whole semaphore business. The critical part is the inline stuff in
24 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
26 #include <linux/sched.h>
28 #include <asm/semaphore.h>
32 * - Only on a boundary condition do we need to care. When we go
33 * from a negative count to a non-negative, we wake people up.
34 * - When we go from a non-negative count to a negative do we
35 * (a) synchronize with the "sleepers" count and (b) make sure
36 * that we're on the wakeup list before we synchronize so that
37 * we cannot lose wakeup events.
41 __up (struct semaphore *sem)
46 static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
49 __down (struct semaphore *sem)
51 struct task_struct *tsk = current;
52 DECLARE_WAITQUEUE(wait, tsk);
53 tsk->state = TASK_UNINTERRUPTIBLE;
54 add_wait_queue_exclusive(&sem->wait, &wait);
56 spin_lock_irq(&semaphore_lock);
59 int sleepers = sem->sleepers;
62 * Add "everybody else" into it. They aren't
63 * playing, because we own the spinlock.
65 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
69 sem->sleepers = 1; /* us - see -1 above */
70 spin_unlock_irq(&semaphore_lock);
73 tsk->state = TASK_UNINTERRUPTIBLE;
74 spin_lock_irq(&semaphore_lock);
76 spin_unlock_irq(&semaphore_lock);
77 remove_wait_queue(&sem->wait, &wait);
78 tsk->state = TASK_RUNNING;
83 __down_interruptible (struct semaphore * sem)
86 struct task_struct *tsk = current;
87 DECLARE_WAITQUEUE(wait, tsk);
88 tsk->state = TASK_INTERRUPTIBLE;
89 add_wait_queue_exclusive(&sem->wait, &wait);
91 spin_lock_irq(&semaphore_lock);
94 int sleepers = sem->sleepers;
97 * With signals pending, this turns into
98 * the trylock failure case - we won't be
99 * sleeping, and we* can't get the lock as
100 * it has contention. Just correct the count
103 if (signal_pending(current)) {
106 atomic_add(sleepers, &sem->count);
111 * Add "everybody else" into it. They aren't
112 * playing, because we own the spinlock. The
113 * "-1" is because we're still hoping to get
116 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
120 sem->sleepers = 1; /* us - see -1 above */
121 spin_unlock_irq(&semaphore_lock);
124 tsk->state = TASK_INTERRUPTIBLE;
125 spin_lock_irq(&semaphore_lock);
127 spin_unlock_irq(&semaphore_lock);
128 tsk->state = TASK_RUNNING;
129 remove_wait_queue(&sem->wait, &wait);
135 * Trylock failed - make sure we correct for having decremented the
139 __down_trylock (struct semaphore *sem)
144 spin_lock_irqsave(&semaphore_lock, flags);
145 sleepers = sem->sleepers + 1;
149 * Add "everybody else" and us into it. They aren't
150 * playing, because we own the spinlock.
152 if (!atomic_add_negative(sleepers, &sem->count))
155 spin_unlock_irqrestore(&semaphore_lock, flags);