2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
8 * This implemenation of synchronization variables is heavily based on
9 * one done by Steve Lord <lord@sgi.com>
11 * Paul Cassella <pwc@sgi.com>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
19 #include <asm/semaphore.h>
20 #include <asm/hardirq.h>
21 #include <asm/softirq.h>
22 #include <asm/current.h>
24 #include <asm/sn/sv.h>
26 /* Define this to have sv_test() run some simple tests.
27 kernel_thread() must behave as expected when this is called. */
32 /* Set up some macros so sv_wait(), sv_signal(), and sv_broadcast()
33 can sanity check interrupt state on architectures where we know
36 #define SV_DEBUG_INTERRUPT_STATE
38 #define SV_TEST_INTERRUPTS_ENABLED(flags) ((flags & 0x1) != 0)
39 #define SV_TEST_INTERRUPTS_DISABLED(flags) ((flags & 0x1) == 0)
40 #define SV_INTERRUPT_TEST_WORKERS 31
42 #define SV_TEST_INTERRUPTS_ENABLED(flags) ((flags & 0x4000) != 0)
43 #define SV_TEST_INTERRUPTS_DISABLED(flags) ((flags & 0x4000) == 0)
44 #define SV_INTERRUPT_TEST_WORKERS 4 /* simulator's slow */
46 #undef SV_DEBUG_INTERRUPT_STATE
47 #define SV_INTERRUPT_TEST_WORKERS 4 /* reasonable? default. */
52 /* XXX FIXME hack hack hack. Our mips64 tree is from before the
53 switch to WQ_FLAG_EXCLUSIVE, and our ia64 tree is from after it. */
55 #undef EXCLUSIVE_IN_QUEUE
57 #define EXCLUSIVE_IN_QUEUE
58 #define TASK_EXCLUSIVE 0 /* for the set_current_state() in sv_wait() */
62 static inline void sv_lock(sv_t *sv) {
63 spin_lock(&sv->sv_lock);
66 static inline void sv_unlock(sv_t *sv) {
67 spin_unlock(&sv->sv_lock);
70 /* up() is "extern inline", so we can't pass its address to sv_wait.
71 Use this function's address instead. */
72 static void up_wrapper(struct semaphore *sem) {
76 /* spin_unlock() is sometimes a macro. */
77 static void spin_unlock_wrapper(spinlock_t *s) {
81 /* XXX Perhaps sv_wait() should do the switch() each time and avoid
82 the extra indirection and the need for the _wrapper functions? */
84 static inline void sv_set_mon_type(sv_t *sv, int type) {
87 sv->sv_mon_unlock_func =
88 (sv_mon_unlock_func_t)spin_unlock_wrapper;
91 sv->sv_mon_unlock_func =
92 (sv_mon_unlock_func_t)up_wrapper;
93 if(sv->sv_flags & SV_INTS) {
94 printk(KERN_ERR "sv_set_mon_type: The monitor lock "
95 "cannot be shared with interrupts if it is a "
99 if(sv->sv_flags & SV_BHS) {
100 printk(KERN_ERR "sv_set_mon_type: The monitor lock "
101 "cannot be shared with bottom-halves if it is "
108 * If needed, and will need to think about interrupts. This
109 * may be needed, for example, if someone wants to use sv's
110 * with something like dev_base; writers need to hold two
115 struct sv_mon_custom *c = lock;
116 sv->sv_mon_unlock_func = c->sv_mon_unlock_func;
117 sv->sv_mon_lock = c->sv_mon_lock;
123 printk(KERN_ERR "sv_set_mon_type: unknown type %d (0x%x)! "
124 "(flags 0x%x)\n", type, type, sv->sv_flags);
128 sv->sv_flags |= type;
131 static inline void sv_set_ord(sv_t *sv, int ord) {
133 ord = SV_ORDER_DEFAULT;
135 if (ord != SV_ORDER_FIFO && ord != SV_ORDER_LIFO) {
136 printk(KERN_EMERG "sv_set_ord: unknown order %d (0x%x)! ",
144 void sv_init(sv_t *sv, sv_mon_lock_t *lock, int flags)
146 int ord = flags & SV_ORDER_MASK;
147 int type = flags & SV_MON_MASK;
149 /* Copy all non-order, non-type flags */
150 sv->sv_flags = (flags & ~(SV_ORDER_MASK | SV_MON_MASK));
152 if((sv->sv_flags & (SV_INTS | SV_BHS)) == (SV_INTS | SV_BHS)) {
153 printk(KERN_ERR "sv_init: do not set both SV_INTS and SV_BHS, only SV_INTS.\n");
158 sv_set_mon_type(sv, type);
160 /* If lock is NULL, we'll get it from sv_wait_compat() (and
161 ignore it in sv_signal() and sv_broadcast()). */
162 sv->sv_mon_lock = lock;
164 spin_lock_init(&sv->sv_lock);
165 init_waitqueue_head(&sv->sv_waiters);
169 * The associated lock must be locked on entry. It is unlocked on return.
173 * n < 0 : interrupted, -n jiffies remaining on timeout, or -1 if timeout == 0
174 * n = 0 : timeout expired
175 * n > 0 : sv_signal()'d, n jiffies remaining on timeout, or 1 if timeout == 0
177 signed long sv_wait(sv_t *sv, int sv_wait_flags, unsigned long timeout)
179 DECLARE_WAITQUEUE( wait, current );
183 #ifdef SV_DEBUG_INTERRUPT_STATE
188 if(sv->sv_flags & SV_INTS) {
189 if(SV_TEST_INTERRUPTS_ENABLED(flags)) {
190 printk(KERN_ERR "sv_wait: SV_INTS and interrupts "
191 "enabled (flags: 0x%lx)\n", flags);
195 if (SV_TEST_INTERRUPTS_DISABLED(flags)) {
196 printk(KERN_WARNING "sv_wait: !SV_INTS and interrupts "
197 "disabled! (flags: 0x%lx)\n", flags);
201 #endif /* SV_DEBUG_INTERRUPT_STATE */
205 sv->sv_mon_unlock_func(sv->sv_mon_lock);
207 /* Add ourselves to the wait queue and set the state before
208 * releasing the sv_lock so as to avoid racing with the
209 * wake_up() in sv_signal() and sv_broadcast().
212 /* don't need the _irqsave part, but there is no wq_write_lock() */
213 wq_write_lock_irqsave(&sv->sv_waiters.lock, flags);
215 #ifdef EXCLUSIVE_IN_QUEUE
216 wait.flags |= WQ_FLAG_EXCLUSIVE;
219 switch(sv->sv_flags & SV_ORDER_MASK) {
221 __add_wait_queue_tail(&sv->sv_waiters, &wait);
224 __add_wait_queue(&sv->sv_waiters, &wait);
227 printk(KERN_ERR "sv_wait: unknown order! (sv: 0x%p, flags: 0x%x)\n",
228 (void *)sv, sv->sv_flags);
231 wq_write_unlock_irqrestore(&sv->sv_waiters.lock, flags);
233 if(sv_wait_flags & SV_WAIT_SIG)
234 set_current_state(TASK_EXCLUSIVE | TASK_INTERRUPTIBLE );
236 set_current_state(TASK_EXCLUSIVE | TASK_UNINTERRUPTIBLE);
238 spin_unlock(&sv->sv_lock);
240 if(sv->sv_flags & SV_INTS)
242 else if(sv->sv_flags & SV_BHS)
246 ret = schedule_timeout(timeout);
250 if(current->state != TASK_RUNNING) /* XXX Is this possible? */ {
251 printk(KERN_ERR "sv_wait: state not TASK_RUNNING after "
253 set_current_state(TASK_RUNNING);
256 remove_wait_queue(&sv->sv_waiters, &wait);
259 - woken by a sv_signal/sv_broadcast
261 - woken by timeout expiring
264 /* XXX This isn't really accurate; we may have been woken
265 before the signal anyway.... */
266 if(signal_pending(current))
267 return timeout ? -ret : -1;
268 return timeout ? ret : 1;
272 void sv_signal(sv_t *sv)
274 /* If interrupts can acquire this lock, they can also acquire the
275 sv_mon_lock, which we must already have to have called this, so
276 interrupts must be disabled already. If interrupts cannot
277 contend for this lock, we don't have to worry about it. */
279 #ifdef SV_DEBUG_INTERRUPT_STATE
280 if(sv->sv_flags & SV_INTS) {
283 if(SV_TEST_INTERRUPTS_ENABLED(flags))
284 printk(KERN_ERR "sv_signal: SV_INTS and "
285 "interrupts enabled! (flags: 0x%lx)\n", flags);
287 #endif /* SV_DEBUG_INTERRUPT_STATE */
290 wake_up(&sv->sv_waiters);
294 void sv_broadcast(sv_t *sv)
296 #ifdef SV_DEBUG_INTERRUPT_STATE
297 if(sv->sv_flags & SV_INTS) {
300 if(SV_TEST_INTERRUPTS_ENABLED(flags))
301 printk(KERN_ERR "sv_broadcast: SV_INTS and "
302 "interrupts enabled! (flags: 0x%lx)\n", flags);
304 #endif /* SV_DEBUG_INTERRUPT_STATE */
307 wake_up_all(&sv->sv_waiters);
311 void sv_destroy(sv_t *sv)
313 if(!spin_trylock(&sv->sv_lock)) {
314 printk(KERN_ERR "sv_destroy: someone else has sv 0x%p locked!\n", (void *)sv);
318 /* XXX Check that the waitqueue is empty?
319 Mark the sv destroyed?
326 static DECLARE_MUTEX_LOCKED(talkback);
327 static DECLARE_MUTEX_LOCKED(sem);
331 static int sv_test_1_w(void *arg)
333 printk("sv_test_1_w: acquiring spinlock 0x%p...\n", arg);
335 spin_lock((spinlock_t*)arg);
336 printk("sv_test_1_w: spinlock acquired, waking sv_test_1_s.\n");
340 printk("sv_test_1_w: sv_spin_wait()'ing.\n");
342 sv_spin_wait(&sv, arg);
344 printk("sv_test_1_w: talkback.\n");
347 printk("sv_test_1_w: exiting.\n");
351 static int sv_test_1_s(void *arg)
353 printk("sv_test_1_s: waiting for semaphore.\n");
355 printk("sv_test_1_s: semaphore acquired. Acquiring spinlock.\n");
356 spin_lock((spinlock_t*)arg);
357 printk("sv_test_1_s: spinlock acquired. sv_signaling.\n");
359 printk("sv_test_1_s: talkback.\n");
361 printk("sv_test_1_s: exiting.\n");
367 static DECLARE_MUTEX(monitor);
369 static int sv_test_2_w(void *arg)
372 sv_t *sv = (sv_t *)arg;
376 printk("sv_test_2_w: thread %d started, sv_waiting.\n", dummy);
377 sv_sema_wait(sv, &monitor);
378 printk("sv_test_2_w: thread %d woken, exiting.\n", dummy);
383 static int sv_test_2_s_1(void *arg)
386 sv_t *sv = (sv_t *)arg;
389 for(i = 0; i < 3; i++) {
390 printk("sv_test_2_s_1: waking one thread.\n");
395 printk("sv_test_2_s_1: signaling and broadcasting again. Nothing should happen.\n");
401 printk("sv_test_2_s_1: talkbacking.\n");
407 static int sv_test_2_s(void *arg)
410 sv_t *sv = (sv_t *)arg;
413 for(i = 0; i < 3; i++) {
414 printk("sv_test_2_s: waking one thread (should be %d.)\n", i);
419 printk("sv_test_3_s: waking remaining threads with broadcast.\n");
424 printk("sv_test_3_s: sending talkback.\n");
427 printk("sv_test_3_s: exiting.\n");
433 static void big_test(sv_t *sv)
439 for(i = 0; i < 3; i++) {
440 printk("big_test: spawning thread %d.\n", i);
441 kernel_thread(sv_test_2_w, sv, 0);
445 printk("big_test: spawning first wake-up thread.\n");
446 kernel_thread(sv_test_2_s_1, sv, 0);
449 printk("big_test: talkback happened.\n");
452 for(i = 3; i < 13; i++) {
453 printk("big_test: spawning thread %d.\n", i);
454 kernel_thread(sv_test_2_w, sv, 0);
458 printk("big_test: spawning wake-up thread.\n");
459 kernel_thread(sv_test_2_s, sv, 0);
465 spinlock_t int_test_spin = SPIN_LOCK_UNLOCKED;
467 static int irqtestcount;
469 static int interrupt_test_worker(void *unused)
471 int id = ++irqtestcount;
473 unsigned long flags, flags2;
475 printk("ITW: thread %d started.\n", id);
478 __save_flags(flags2);
480 printk("ITW %2d %5d: irqsaving (%lx)\n", id, it, flags2);
481 spin_lock_irqsave(&int_test_spin, flags);
483 printk("ITW %2d %5d: spin_lock_irqing (%lx)\n", id, it, flags2);
484 spin_lock_irq(&int_test_spin);
487 __save_flags(flags2);
488 printk("ITW %2d %5d: locked, sv_waiting (%lx).\n", id, it, flags2);
489 sv_wait(&int_test_sv, 0, 0);
491 __save_flags(flags2);
492 printk("ITW %2d %5d: wait finished (%lx), pausing\n", id, it, flags2);
493 set_current_state(TASK_INTERRUPTIBLE);
494 schedule_timeout(jiffies & 0xf);
495 if(current->state != TASK_RUNNING)
496 printk("ITW: current->state isn't RUNNING after schedule!\n");
501 static void interrupt_test(void)
505 printk("interrupt_test: initing sv.\n");
506 sv_init(&int_test_sv, &int_test_spin, SV_MON_SPIN | SV_INTS);
508 for(i = 0; i < SV_INTERRUPT_TEST_WORKERS; i++) {
509 printk("interrupt_test: starting test thread %d.\n", i);
510 kernel_thread(interrupt_test_worker, 0, 0);
512 printk("interrupt_test: done with init part.\n");
518 spinlock_t s = SPIN_LOCK_UNLOCKED;
520 sv_init(&sv, &s, SV_MON_SPIN);
521 printk("sv_test: starting sv_test_1_w.\n");
522 kernel_thread(sv_test_1_w, &s, 0);
523 printk("sv_test: starting sv_test_1_s.\n");
524 kernel_thread(sv_test_1_s, &s, 0);
526 printk("sv_test: waiting for talkback.\n");
527 down(&talkback); down(&talkback);
528 printk("sv_test: talkback happened, sv_destroying.\n");
533 printk("sv_test: beginning big_test on sv.\n");
535 sv_init(&sv, &monitor, SV_MON_SEMA);
539 printk("sv_test: beginning big_test on sv_filo.\n");
540 sv_init(&sv_filo, &monitor, SV_MON_SEMA | SV_ORDER_FILO);
542 sv_destroy(&sv_filo);
546 printk("sv_test: done.\n");
552 #endif /* RUN_SV_TEST */