1 /* spinlock.h: 32-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
9 #include <linux/threads.h> /* For NR_CPUS */
16 * Define this to use the verbose/debugging versions in
17 * arch/sparc/lib/debuglocks.c
19 * Be sure to make dep whenever changing this option.
21 #define SPIN_LOCK_DEBUG
23 #ifdef SPIN_LOCK_DEBUG
24 struct _spinlock_debug {
26 unsigned long owner_pc;
28 typedef struct _spinlock_debug spinlock_t;
30 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 }
31 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
32 #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0)
33 #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock))
35 extern void _do_spin_lock(spinlock_t *lock, char *str);
36 extern int _spin_trylock(spinlock_t *lock);
37 extern void _do_spin_unlock(spinlock_t *lock);
39 #define spin_trylock(lp) _spin_trylock(lp)
40 #define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
41 #define spin_unlock(lock) _do_spin_unlock(lock)
43 struct _rwlock_debug {
44 volatile unsigned int lock;
45 unsigned long owner_pc;
46 unsigned long reader_pc[NR_CPUS];
48 typedef struct _rwlock_debug rwlock_t;
50 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} }
52 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
54 extern void _do_read_lock(rwlock_t *rw, char *str);
55 extern void _do_read_unlock(rwlock_t *rw, char *str);
56 extern void _do_write_lock(rwlock_t *rw, char *str);
57 extern void _do_write_unlock(rwlock_t *rw);
59 #define read_lock(lock) \
60 do { unsigned long flags; \
61 __save_and_cli(flags); \
62 _do_read_lock(lock, "read_lock"); \
63 __restore_flags(flags); \
66 #define read_unlock(lock) \
67 do { unsigned long flags; \
68 __save_and_cli(flags); \
69 _do_read_unlock(lock, "read_unlock"); \
70 __restore_flags(flags); \
73 #define write_lock(lock) \
74 do { unsigned long flags; \
75 __save_and_cli(flags); \
76 _do_write_lock(lock, "write_lock"); \
77 __restore_flags(flags); \
80 #define write_unlock(lock) \
81 do { unsigned long flags; \
82 __save_and_cli(flags); \
83 _do_write_unlock(lock); \
84 __restore_flags(flags); \
87 #else /* !SPIN_LOCK_DEBUG */
89 typedef unsigned char spinlock_t;
90 #define SPIN_LOCK_UNLOCKED 0
92 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
93 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
95 #define spin_unlock_wait(lock) \
98 } while(*((volatile unsigned char *)lock))
100 extern __inline__ void spin_lock(spinlock_t *lock)
102 __asm__ __volatile__(
104 "ldstub [%0], %%g2\n\t"
105 "orcc %%g2, 0x0, %%g0\n\t"
107 " ldub [%0], %%g2\n\t"
110 "orcc %%g2, 0x0, %%g0\n\t"
112 " ldub [%0], %%g2\n\t"
117 : "g2", "memory", "cc");
120 extern __inline__ int spin_trylock(spinlock_t *lock)
123 __asm__ __volatile__("ldstub [%1], %0"
127 return (result == 0);
130 extern __inline__ void spin_unlock(spinlock_t *lock)
132 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
135 /* Read-write spinlocks, allowing multiple readers
136 * but only one writer.
138 * NOTE! it is quite common to have readers in interrupts
139 * but no interrupt writers. For those circumstances we
140 * can "mix" irq-safe locks - any writer needs to get a
141 * irq-safe write-lock, but readers can get non-irqsafe
144 * XXX This might create some problems with my dual spinlock
145 * XXX scheme, deadlocks etc. -DaveM
147 typedef struct { volatile unsigned int lock; } rwlock_t;
149 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
151 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
154 /* Sort of like atomic_t's on Sparc, but even more clever.
156 * ------------------------------------
157 * | 24-bit counter | wlock | rwlock_t
158 * ------------------------------------
161 * wlock signifies the one writer is in or somebody is updating
162 * counter. For a writer, if he successfully acquires the wlock,
163 * but counter is non-zero, he has to release the lock and wait,
164 * till both counter and wlock are zero.
166 * Unfortunately this scheme limits us to ~16,000,000 cpus.
168 extern __inline__ void _read_lock(rwlock_t *rw)
170 register rwlock_t *lp asm("g1");
172 __asm__ __volatile__(
174 "call ___rw_read_enter\n\t"
175 " ldstub [%%g1 + 3], %%g2\n"
178 : "g2", "g4", "memory", "cc");
181 #define read_lock(lock) \
182 do { unsigned long flags; \
183 __save_and_cli(flags); \
185 __restore_flags(flags); \
188 extern __inline__ void _read_unlock(rwlock_t *rw)
190 register rwlock_t *lp asm("g1");
192 __asm__ __volatile__(
194 "call ___rw_read_exit\n\t"
195 " ldstub [%%g1 + 3], %%g2\n"
198 : "g2", "g4", "memory", "cc");
201 #define read_unlock(lock) \
202 do { unsigned long flags; \
203 __save_and_cli(flags); \
204 _read_unlock(lock); \
205 __restore_flags(flags); \
208 extern __inline__ void write_lock(rwlock_t *rw)
210 register rwlock_t *lp asm("g1");
212 __asm__ __volatile__(
214 "call ___rw_write_enter\n\t"
215 " ldstub [%%g1 + 3], %%g2\n"
218 : "g2", "g4", "memory", "cc");
221 #define write_unlock(rw) do { (rw)->lock = 0; } while(0)
223 #endif /* SPIN_LOCK_DEBUG */
225 #endif /* !(__ASSEMBLY__) */
227 #endif /* __SPARC_SPINLOCK_H */