2 * linux/arch/S390/kernel/semaphore.c
5 * Copyright (C) 1998-2000 IBM Corporation
6 * Author(s): Martin Schwidefsky
8 * Derived from "linux/arch/i386/kernel/semaphore.c
9 * Copyright (C) 1999, Linus Torvalds
12 #include <linux/sched.h>
14 #include <asm/semaphore.h>
17 * Semaphores are implemented using a two-way counter:
18 * The "count" variable is decremented for each process
19 * that tries to acquire the semaphore, while the "sleeping"
20 * variable is a count of such acquires.
22 * Notably, the inline "up()" and "down()" functions can
23 * efficiently test if they need to do any extra work (up
24 * needs to do something only if count was negative before
25 * the increment operation.
27 * "sleeping" and the contention routine ordering is
28 * protected by the semaphore spinlock.
30 * Note that these functions are only called when there is
31 * contention on the lock, and as such all this is the
32 * "non-critical" part of the whole semaphore business. The
33 * critical part is the inline stuff in <asm/semaphore.h>
34 * where we want to avoid any extra jumps and calls.
39 * - only on a boundary condition do we need to care. When we go
40 * from a negative count to a non-negative, we wake people up.
41 * - when we go from a non-negative count to a negative do we
42 * (a) synchronize with the "sleeper" count and (b) make sure
43 * that we're on the wakeup list before we synchronize so that
44 * we cannot lose wakeup events.
47 void __up(struct semaphore *sem)
52 static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
54 void __down(struct semaphore * sem)
56 struct task_struct *tsk = current;
57 DECLARE_WAITQUEUE(wait, tsk);
58 tsk->state = TASK_UNINTERRUPTIBLE;
59 add_wait_queue_exclusive(&sem->wait, &wait);
61 spin_lock_irq(&semaphore_lock);
64 int sleepers = sem->sleepers;
67 * Add "everybody else" into it. They aren't
68 * playing, because we own the spinlock.
70 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
74 sem->sleepers = 1; /* us - see -1 above */
75 spin_unlock_irq(&semaphore_lock);
78 tsk->state = TASK_UNINTERRUPTIBLE;
79 spin_lock_irq(&semaphore_lock);
81 spin_unlock_irq(&semaphore_lock);
82 remove_wait_queue(&sem->wait, &wait);
83 tsk->state = TASK_RUNNING;
87 int __down_interruptible(struct semaphore * sem)
90 struct task_struct *tsk = current;
91 DECLARE_WAITQUEUE(wait, tsk);
92 tsk->state = TASK_INTERRUPTIBLE;
93 add_wait_queue_exclusive(&sem->wait, &wait);
95 spin_lock_irq(&semaphore_lock);
98 int sleepers = sem->sleepers;
101 * With signals pending, this turns into
102 * the trylock failure case - we won't be
103 * sleeping, and we* can't get the lock as
104 * it has contention. Just correct the count
107 if (signal_pending(current)) {
110 atomic_add(sleepers, &sem->count);
115 * Add "everybody else" into it. They aren't
116 * playing, because we own the spinlock. The
117 * "-1" is because we're still hoping to get
120 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
124 sem->sleepers = 1; /* us - see -1 above */
125 spin_unlock_irq(&semaphore_lock);
128 tsk->state = TASK_INTERRUPTIBLE;
129 spin_lock_irq(&semaphore_lock);
131 spin_unlock_irq(&semaphore_lock);
132 tsk->state = TASK_RUNNING;
133 remove_wait_queue(&sem->wait, &wait);
139 * Trylock failed - make sure we correct for
140 * having decremented the count.
142 int __down_trylock(struct semaphore * sem)
147 spin_lock_irqsave(&semaphore_lock, flags);
148 sleepers = sem->sleepers + 1;
152 * Add "everybody else" and us into it. They aren't
153 * playing, because we own the spinlock.
155 if (!atomic_add_negative(sleepers, &sem->count))
158 spin_unlock_irqrestore(&semaphore_lock, flags);