1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <linux/kernel.h>
8 #include <linux/config.h>
10 extern int printk(const char * fmt, ...)
11 __attribute__ ((format (printf, 1, 2)));
13 /* It seems that people are forgetting to
14 * initialize their spinlocks properly, tsk tsk.
15 * Remember to turn this off in 2.4. -ben
17 #if defined(CONFIG_DEBUG_SPINLOCK)
18 #define SPINLOCK_DEBUG 1
20 #define SPINLOCK_DEBUG 0
24 * Your basic SMP spinlocks, allowing only a single CPU anywhere
28 volatile unsigned int lock;
34 #define SPINLOCK_MAGIC 0xdead4ead
37 #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
39 #define SPINLOCK_MAGIC_INIT /* */
42 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
44 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
47 * Simple spin lock operations. There are two variants, one clears IRQ's
48 * on the local processor, one does not.
50 * We make no fairness assumptions. They have a cost.
53 #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
54 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
56 #define spin_lock_string \
58 "lock ; decb %0\n\t" \
60 LOCK_SECTION_START("") \
69 * This works. Despite all the confusion.
71 #define spin_unlock_string \
74 static inline int spin_trylock(spinlock_t *lock)
79 :"=q" (oldval), "=m" (lock->lock)
84 static inline void spin_lock(spinlock_t *lock)
89 if (lock->magic != SPINLOCK_MAGIC) {
90 printk("eip: %p\n", &&here);
96 :"=m" (lock->lock) : : "memory");
99 static inline void spin_unlock(spinlock_t *lock)
102 if (lock->magic != SPINLOCK_MAGIC)
104 if (!spin_is_locked(lock))
107 __asm__ __volatile__(
109 :"=m" (lock->lock) : : "memory");
113 * Read-write spinlocks, allowing multiple readers
114 * but only one writer.
116 * NOTE! it is quite common to have readers in interrupts
117 * but no interrupt writers. For those circumstances we
118 * can "mix" irq-safe locks - any writer needs to get a
119 * irq-safe write-lock, but readers can get non-irqsafe
123 volatile unsigned int lock;
129 #define RWLOCK_MAGIC 0xdeaf1eed
132 #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
134 #define RWLOCK_MAGIC_INIT /* */
137 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
139 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
142 * On x86, we implement read-write locks as a 32-bit counter
143 * with the high bit (sign) being the "contended" bit.
145 * The inline assembly is non-obvious. Think about it.
147 * Changed to use the same technique as rw semaphores. See
148 * semaphore.h for details. -ben
150 /* the spinlock helpers are in arch/x86_64/kernel/semaphore.S */
152 extern inline void read_lock(rwlock_t *rw)
155 if (rw->magic != RWLOCK_MAGIC)
158 __build_read_lock(rw, "__read_lock_failed");
161 static inline void write_lock(rwlock_t *rw)
164 if (rw->magic != RWLOCK_MAGIC)
167 __build_write_lock(rw, "__write_lock_failed");
170 #define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
171 #define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
173 static inline int write_trylock(rwlock_t *lock)
175 atomic_t *count = (atomic_t *)lock;
176 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
178 atomic_add(RW_LOCK_BIAS, count);
182 #endif /* __ASM_SPINLOCK_H */