import of ftp.dlink.com/GPL/DSMG-600_reB/ppclinux.tar.gz
[linux-2.4.21-pre4.git] / include / asm-sparc / spinlock.h
1 /* spinlock.h: 32-bit Sparc spinlock support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
8
9 #include <linux/threads.h>      /* For NR_CPUS */
10
11 #ifndef __ASSEMBLY__
12
13 #include <asm/psr.h>
14
15 /*
16  * Define this to use the verbose/debugging versions in
17  * arch/sparc/lib/debuglocks.c
18  *
19  * Be sure to make dep whenever changing this option.
20  */
21 #define SPIN_LOCK_DEBUG
22
23 #ifdef SPIN_LOCK_DEBUG
24 struct _spinlock_debug {
25         unsigned char lock;
26         unsigned long owner_pc;
27 };
28 typedef struct _spinlock_debug spinlock_t;
29
30 #define SPIN_LOCK_UNLOCKED      (spinlock_t) { 0, 0 }
31 #define spin_lock_init(lp)      do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
32 #define spin_is_locked(lp)  (*((volatile unsigned char *)(&((lp)->lock))) != 0)
33 #define spin_unlock_wait(lp)    do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock))
34
35 extern void _do_spin_lock(spinlock_t *lock, char *str);
36 extern int _spin_trylock(spinlock_t *lock);
37 extern void _do_spin_unlock(spinlock_t *lock);
38
39 #define spin_trylock(lp)        _spin_trylock(lp)
40 #define spin_lock(lock)         _do_spin_lock(lock, "spin_lock")
41 #define spin_unlock(lock)       _do_spin_unlock(lock)
42
43 struct _rwlock_debug {
44         volatile unsigned int lock;
45         unsigned long owner_pc;
46         unsigned long reader_pc[NR_CPUS];
47 };
48 typedef struct _rwlock_debug rwlock_t;
49
50 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} }
51
52 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
53
54 extern void _do_read_lock(rwlock_t *rw, char *str);
55 extern void _do_read_unlock(rwlock_t *rw, char *str);
56 extern void _do_write_lock(rwlock_t *rw, char *str);
57 extern void _do_write_unlock(rwlock_t *rw);
58
59 #define read_lock(lock) \
60 do {    unsigned long flags; \
61         __save_and_cli(flags); \
62         _do_read_lock(lock, "read_lock"); \
63         __restore_flags(flags); \
64 } while(0)
65
66 #define read_unlock(lock) \
67 do {    unsigned long flags; \
68         __save_and_cli(flags); \
69         _do_read_unlock(lock, "read_unlock"); \
70         __restore_flags(flags); \
71 } while(0)
72
73 #define write_lock(lock) \
74 do {    unsigned long flags; \
75         __save_and_cli(flags); \
76         _do_write_lock(lock, "write_lock"); \
77         __restore_flags(flags); \
78 } while(0)
79
80 #define write_unlock(lock) \
81 do {    unsigned long flags; \
82         __save_and_cli(flags); \
83         _do_write_unlock(lock); \
84         __restore_flags(flags); \
85 } while(0)
86
87 #else /* !SPIN_LOCK_DEBUG */
88
89 typedef unsigned char spinlock_t;
90 #define SPIN_LOCK_UNLOCKED      0
91
92 #define spin_lock_init(lock)   (*((unsigned char *)(lock)) = 0)
93 #define spin_is_locked(lock)    (*((volatile unsigned char *)(lock)) != 0)
94
95 #define spin_unlock_wait(lock) \
96 do { \
97         barrier(); \
98 } while(*((volatile unsigned char *)lock))
99
100 extern __inline__ void spin_lock(spinlock_t *lock)
101 {
102         __asm__ __volatile__(
103         "\n1:\n\t"
104         "ldstub [%0], %%g2\n\t"
105         "orcc   %%g2, 0x0, %%g0\n\t"
106         "bne,a  2f\n\t"
107         " ldub  [%0], %%g2\n\t"
108         ".subsection    2\n"
109         "2:\n\t"
110         "orcc   %%g2, 0x0, %%g0\n\t"
111         "bne,a  2b\n\t"
112         " ldub  [%0], %%g2\n\t"
113         "b,a    1b\n\t"
114         ".previous\n"
115         : /* no outputs */
116         : "r" (lock)
117         : "g2", "memory", "cc");
118 }
119
120 extern __inline__ int spin_trylock(spinlock_t *lock)
121 {
122         unsigned int result;
123         __asm__ __volatile__("ldstub [%1], %0"
124                              : "=r" (result)
125                              : "r" (lock)
126                              : "memory");
127         return (result == 0);
128 }
129
130 extern __inline__ void spin_unlock(spinlock_t *lock)
131 {
132         __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
133 }
134
135 /* Read-write spinlocks, allowing multiple readers
136  * but only one writer.
137  *
138  * NOTE! it is quite common to have readers in interrupts
139  * but no interrupt writers. For those circumstances we
140  * can "mix" irq-safe locks - any writer needs to get a
141  * irq-safe write-lock, but readers can get non-irqsafe
142  * read-locks.
143  *
144  * XXX This might create some problems with my dual spinlock
145  * XXX scheme, deadlocks etc. -DaveM
146  */
147 typedef struct { volatile unsigned int lock; } rwlock_t;
148
149 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
150
151 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
152
153
154 /* Sort of like atomic_t's on Sparc, but even more clever.
155  *
156  *      ------------------------------------
157  *      | 24-bit counter           | wlock |  rwlock_t
158  *      ------------------------------------
159  *       31                       8 7     0
160  *
161  * wlock signifies the one writer is in or somebody is updating
162  * counter. For a writer, if he successfully acquires the wlock,
163  * but counter is non-zero, he has to release the lock and wait,
164  * till both counter and wlock are zero.
165  *
166  * Unfortunately this scheme limits us to ~16,000,000 cpus.
167  */
168 extern __inline__ void _read_lock(rwlock_t *rw)
169 {
170         register rwlock_t *lp asm("g1");
171         lp = rw;
172         __asm__ __volatile__(
173         "mov    %%o7, %%g4\n\t"
174         "call   ___rw_read_enter\n\t"
175         " ldstub        [%%g1 + 3], %%g2\n"
176         : /* no outputs */
177         : "r" (lp)
178         : "g2", "g4", "memory", "cc");
179 }
180
181 #define read_lock(lock) \
182 do {    unsigned long flags; \
183         __save_and_cli(flags); \
184         _read_lock(lock); \
185         __restore_flags(flags); \
186 } while(0)
187
188 extern __inline__ void _read_unlock(rwlock_t *rw)
189 {
190         register rwlock_t *lp asm("g1");
191         lp = rw;
192         __asm__ __volatile__(
193         "mov    %%o7, %%g4\n\t"
194         "call   ___rw_read_exit\n\t"
195         " ldstub        [%%g1 + 3], %%g2\n"
196         : /* no outputs */
197         : "r" (lp)
198         : "g2", "g4", "memory", "cc");
199 }
200
201 #define read_unlock(lock) \
202 do {    unsigned long flags; \
203         __save_and_cli(flags); \
204         _read_unlock(lock); \
205         __restore_flags(flags); \
206 } while(0)
207
208 extern __inline__ void write_lock(rwlock_t *rw)
209 {
210         register rwlock_t *lp asm("g1");
211         lp = rw;
212         __asm__ __volatile__(
213         "mov    %%o7, %%g4\n\t"
214         "call   ___rw_write_enter\n\t"
215         " ldstub        [%%g1 + 3], %%g2\n"
216         : /* no outputs */
217         : "r" (lp)
218         : "g2", "g4", "memory", "cc");
219 }
220
221 #define write_unlock(rw)        do { (rw)->lock = 0; } while(0)
222
223 #endif /* SPIN_LOCK_DEBUG */
224
225 #endif /* !(__ASSEMBLY__) */
226
227 #endif /* __SPARC_SPINLOCK_H */