1 #ifndef __ASM_SH64_SEMAPHORE_HELPER_H
2 #define __ASM_SH64_SEMAPHORE_HELPER_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * include/asm-sh64/semaphore-helper.h
11 * Copyright (C) 2000, 2001 Paolo Alberelli
16 * SMP- and interrupt-safe semaphores helper functions.
18 * (C) Copyright 1996 Linus Torvalds
19 * (C) Copyright 1999 Andrea Arcangeli
23 * These two _must_ execute atomically wrt each other.
25 * This is trivially done with load_locked/store_cond,
26 * which we have. Let the rest of the losers suck eggs.
28 static __inline__ void wake_one_more(struct semaphore * sem)
30 atomic_inc((atomic_t *)&sem->sleepers);
33 static __inline__ int waking_non_zero(struct semaphore *sem)
38 spin_lock_irqsave(&semaphore_wake_lock, flags);
39 if (sem->sleepers > 0) {
43 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
48 * waking_non_zero_interruptible:
53 * We must undo the sem->count down_interruptible() increment while we are
54 * protected by the spinlock in order to make atomic this atomic_inc() with the
55 * atomic_read() in wake_one_more(), otherwise we can race. -arca
57 static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
58 struct task_struct *tsk)
63 spin_lock_irqsave(&semaphore_wake_lock, flags);
64 if (sem->sleepers > 0) {
67 } else if (signal_pending(tsk)) {
68 atomic_inc(&sem->count);
71 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
76 * waking_non_zero_trylock:
80 * We must undo the sem->count down_trylock() increment while we are
81 * protected by the spinlock in order to make atomic this atomic_inc() with the
82 * atomic_read() in wake_one_more(), otherwise we can race. -arca
84 static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
89 spin_lock_irqsave(&semaphore_wake_lock, flags);
90 if (sem->sleepers <= 0)
91 atomic_inc(&sem->count);
96 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
100 #endif /* __ASM_SH64_SEMAPHORE_HELPER_H */