more debug output
[linux-2.4.git] / arch / ia64 / kernel / semaphore.c
1 /*
2  * IA-64 semaphore implementation (derived from x86 version).
3  *
4  * Copyright (C) 1999-2000 Hewlett-Packard Co
5  * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
6  */
7
8 /*
9  * Semaphores are implemented using a two-way counter: The "count"
10  * variable is decremented for each process that tries to acquire the
11  * semaphore, while the "sleepers" variable is a count of such
12  * acquires.
13  *
14  * Notably, the inline "up()" and "down()" functions can efficiently
15  * test if they need to do any extra work (up needs to do something
16  * only if count was negative before the increment operation.
17  *
18  * "sleepers" and the contention routine ordering is protected by the
19  * semaphore spinlock.
20  *
21  * Note that these functions are only called when there is contention
22  * on the lock, and as such all this is the "non-critical" part of the
23  * whole semaphore business. The critical part is the inline stuff in
24  * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
25  */
26 #include <linux/sched.h>
27
28 #include <asm/semaphore.h>
29
30 /*
31  * Logic:
32  *  - Only on a boundary condition do we need to care. When we go
33  *    from a negative count to a non-negative, we wake people up.
34  *  - When we go from a non-negative count to a negative do we
35  *    (a) synchronize with the "sleepers" count and (b) make sure
36  *    that we're on the wakeup list before we synchronize so that
37  *    we cannot lose wakeup events.
38  */
39
40 void
41 __up (struct semaphore *sem)
42 {
43         wake_up(&sem->wait);
44 }
45
46 static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
47
48 void
49 __down (struct semaphore *sem)
50 {
51         struct task_struct *tsk = current;
52         DECLARE_WAITQUEUE(wait, tsk);
53         tsk->state = TASK_UNINTERRUPTIBLE;
54         add_wait_queue_exclusive(&sem->wait, &wait);
55
56         spin_lock_irq(&semaphore_lock);
57         sem->sleepers++;
58         for (;;) {
59                 int sleepers = sem->sleepers;
60
61                 /*
62                  * Add "everybody else" into it. They aren't
63                  * playing, because we own the spinlock.
64                  */
65                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
66                         sem->sleepers = 0;
67                         break;
68                 }
69                 sem->sleepers = 1;      /* us - see -1 above */
70                 spin_unlock_irq(&semaphore_lock);
71
72                 schedule();
73                 tsk->state = TASK_UNINTERRUPTIBLE;
74                 spin_lock_irq(&semaphore_lock);
75         }
76         spin_unlock_irq(&semaphore_lock);
77         remove_wait_queue(&sem->wait, &wait);
78         tsk->state = TASK_RUNNING;
79         wake_up(&sem->wait);
80 }
81
82 int
83 __down_interruptible (struct semaphore * sem)
84 {
85         int retval = 0;
86         struct task_struct *tsk = current;
87         DECLARE_WAITQUEUE(wait, tsk);
88         tsk->state = TASK_INTERRUPTIBLE;
89         add_wait_queue_exclusive(&sem->wait, &wait);
90
91         spin_lock_irq(&semaphore_lock);
92         sem->sleepers ++;
93         for (;;) {
94                 int sleepers = sem->sleepers;
95
96                 /*
97                  * With signals pending, this turns into
98                  * the trylock failure case - we won't be
99                  * sleeping, and we* can't get the lock as
100                  * it has contention. Just correct the count
101                  * and exit.
102                  */
103                 if (signal_pending(current)) {
104                         retval = -EINTR;
105                         sem->sleepers = 0;
106                         atomic_add(sleepers, &sem->count);
107                         break;
108                 }
109
110                 /*
111                  * Add "everybody else" into it. They aren't
112                  * playing, because we own the spinlock. The
113                  * "-1" is because we're still hoping to get
114                  * the lock.
115                  */
116                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
117                         sem->sleepers = 0;
118                         break;
119                 }
120                 sem->sleepers = 1;      /* us - see -1 above */
121                 spin_unlock_irq(&semaphore_lock);
122
123                 schedule();
124                 tsk->state = TASK_INTERRUPTIBLE;
125                 spin_lock_irq(&semaphore_lock);
126         }
127         spin_unlock_irq(&semaphore_lock);
128         tsk->state = TASK_RUNNING;
129         remove_wait_queue(&sem->wait, &wait);
130         wake_up(&sem->wait);
131         return retval;
132 }
133
134 /*
135  * Trylock failed - make sure we correct for having decremented the
136  * count.
137  */
138 int
139 __down_trylock (struct semaphore *sem)
140 {
141         unsigned long flags;
142         int sleepers;
143
144         spin_lock_irqsave(&semaphore_lock, flags);
145         sleepers = sem->sleepers + 1;
146         sem->sleepers = 0;
147
148         /*
149          * Add "everybody else" and us into it. They aren't
150          * playing, because we own the spinlock.
151          */
152         if (!atomic_add_negative(sleepers, &sem->count))
153                 wake_up(&sem->wait);
154
155         spin_unlock_irqrestore(&semaphore_lock, flags);
156         return 1;
157 }