1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <linux/config.h>
9 extern int printk(const char * fmt, ...)
10 __attribute__ ((format (printf, 1, 2)));
12 /* It seems that people are forgetting to
13 * initialize their spinlocks properly, tsk tsk.
14 * Remember to turn this off in 2.4. -ben
16 #if defined(CONFIG_DEBUG_SPINLOCK)
17 #define SPINLOCK_DEBUG 1
19 #define SPINLOCK_DEBUG 0
23 * Your basic SMP spinlocks, allowing only a single CPU anywhere
27 volatile unsigned int lock;
33 #define SPINLOCK_MAGIC 0xdead4ead
36 #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
38 #define SPINLOCK_MAGIC_INIT /* */
41 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
43 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
46 * Simple spin lock operations. There are two variants, one clears IRQ's
47 * on the local processor, one does not.
49 * We make no fairness assumptions. They have a cost.
52 #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
53 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
55 #define spin_lock_string \
57 "lock ; decb %0\n\t" \
59 LOCK_SECTION_START("") \
68 * This works. Despite all the confusion.
69 * (except on PPro SMP or if we are using OOSTORE)
70 * (PPro errata 66, 92)
73 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
75 #define spin_unlock_string \
77 :"=m" (lock->lock) : : "memory"
80 static inline void spin_unlock(spinlock_t *lock)
83 if (lock->magic != SPINLOCK_MAGIC)
85 if (!spin_is_locked(lock))
95 #define spin_unlock_string \
97 :"=q" (oldval), "=m" (lock->lock) \
98 :"0" (oldval) : "memory"
100 static inline void spin_unlock(spinlock_t *lock)
104 if (lock->magic != SPINLOCK_MAGIC)
106 if (!spin_is_locked(lock))
109 __asm__ __volatile__(
116 static inline int spin_trylock(spinlock_t *lock)
119 __asm__ __volatile__(
121 :"=q" (oldval), "=m" (lock->lock)
122 :"0" (0) : "memory");
126 static inline void spin_lock(spinlock_t *lock)
131 if (lock->magic != SPINLOCK_MAGIC) {
132 printk("eip: %p\n", &&here);
136 __asm__ __volatile__(
138 :"=m" (lock->lock) : : "memory");
143 * Read-write spinlocks, allowing multiple readers
144 * but only one writer.
146 * NOTE! it is quite common to have readers in interrupts
147 * but no interrupt writers. For those circumstances we
148 * can "mix" irq-safe locks - any writer needs to get a
149 * irq-safe write-lock, but readers can get non-irqsafe
153 volatile unsigned int lock;
159 #define RWLOCK_MAGIC 0xdeaf1eed
162 #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
164 #define RWLOCK_MAGIC_INIT /* */
167 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
169 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
172 * On x86, we implement read-write locks as a 32-bit counter
173 * with the high bit (sign) being the "contended" bit.
175 * The inline assembly is non-obvious. Think about it.
177 * Changed to use the same technique as rw semaphores. See
178 * semaphore.h for details. -ben
180 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
182 static inline void read_lock(rwlock_t *rw)
185 if (rw->magic != RWLOCK_MAGIC)
188 __build_read_lock(rw, "__read_lock_failed");
191 static inline void write_lock(rwlock_t *rw)
194 if (rw->magic != RWLOCK_MAGIC)
197 __build_write_lock(rw, "__write_lock_failed");
200 #define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
201 #define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
203 static inline int write_trylock(rwlock_t *lock)
205 atomic_t *count = (atomic_t *)lock;
206 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
208 atomic_add(RW_LOCK_BIAS, count);
212 #endif /* __ASM_SPINLOCK_H */