make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / include / asm-ppc / spinlock.h
1 /*
2  * BK Id: SCCS/s.spinlock.h 1.19 09/11/02 14:55:14 paulus
3  */
4 #ifndef __ASM_SPINLOCK_H
5 #define __ASM_SPINLOCK_H
6
7 #include <asm/system.h>
8 #include <asm/processor.h>
9
10 #if defined(CONFIG_DEBUG_SPINLOCK)
11 #define SPINLOCK_DEBUG 1
12 #else
13 #define SPINLOCK_DEBUG 0
14 #endif
15
16 /*
17  * Simple spin lock operations.
18  */
19
20 typedef struct {
21         volatile unsigned long lock;
22 #if SPINLOCK_DEBUG
23         volatile unsigned long owner_pc;
24         volatile unsigned long owner_cpu;
25 #endif
26 } spinlock_t;
27
28 #ifdef __KERNEL__
29 #if SPINLOCK_DEBUG
30 #define SPINLOCK_DEBUG_INIT     , 0, 0
31 #else
32 #define SPINLOCK_DEBUG_INIT     /* */
33 #endif
34
35 #define SPIN_LOCK_UNLOCKED      (spinlock_t) { 0 SPINLOCK_DEBUG_INIT }
36
37 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
38 #define spin_is_locked(x)       ((x)->lock != 0)
39 #define spin_unlock_wait(x)     do { barrier(); } while(spin_is_locked(x))
40
41 #if SPINLOCK_DEBUG
42
43 extern void _spin_lock(spinlock_t *lock);
44 extern void _spin_unlock(spinlock_t *lock);
45 extern int spin_trylock(spinlock_t *lock);
46 extern unsigned long __spin_trylock(volatile unsigned long *lock);
47
48 #define spin_lock(lp)                   _spin_lock(lp)
49 #define spin_unlock(lp)                 _spin_unlock(lp)
50
51 #else /* ! SPINLOCK_DEBUG */
52
53 static inline void spin_lock(spinlock_t *lock)
54 {
55         unsigned long tmp;
56
57         __asm__ __volatile__(
58         "b      1f              # spin_lock\n\
59 2:"     HMT_PRIO_LOW
60 "       lwzx    %0,0,%1\n\
61         cmpwi   0,%0,0\n\
62         bne+    2b\n"
63         HMT_PRIO_MED
64 "1:     lwarx   %0,0,%1\n\
65         cmpwi   0,%0,0\n\
66         bne-    2b\n"
67         PPC405_ERR77(0,%1)
68 "       stwcx.  %2,0,%1\n\
69         bne-    2b\n\
70         isync"
71         : "=&r"(tmp)
72         : "r"(&lock->lock), "r"(1)
73         : "cr0", "memory");
74 }
75
76 static inline void spin_unlock(spinlock_t *lock)
77 {
78         __asm__ __volatile__("eieio             # spin_unlock": : :"memory");
79         lock->lock = 0;
80 }
81
82 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
83
84 #endif
85
86 /*
87  * Read-write spinlocks, allowing multiple readers
88  * but only one writer.
89  *
90  * NOTE! it is quite common to have readers in interrupts
91  * but no interrupt writers. For those circumstances we
92  * can "mix" irq-safe locks - any writer needs to get a
93  * irq-safe write-lock, but readers can get non-irqsafe
94  * read-locks.
95  */
96 typedef struct {
97         volatile unsigned long lock;
98 #if SPINLOCK_DEBUG
99         volatile unsigned long owner_pc;
100 #endif
101 } rwlock_t;
102
103 #if SPINLOCK_DEBUG
104 #define RWLOCK_DEBUG_INIT     , 0
105 #else
106 #define RWLOCK_DEBUG_INIT     /* */
107 #endif
108
109 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 RWLOCK_DEBUG_INIT }
110 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
111
112 #if SPINLOCK_DEBUG
113
114 extern void _read_lock(rwlock_t *rw);
115 extern void _read_unlock(rwlock_t *rw);
116 extern void _write_lock(rwlock_t *rw);
117 extern void _write_unlock(rwlock_t *rw);
118
119 #define read_lock(rw)           _read_lock(rw)
120 #define write_lock(rw)          _write_lock(rw)
121 #define write_unlock(rw)        _write_unlock(rw)
122 #define read_unlock(rw)         _read_unlock(rw)
123
124 #else /* ! SPINLOCK_DEBUG */
125
126 static __inline__ void read_lock(rwlock_t *rw)
127 {
128         unsigned int tmp;
129
130         __asm__ __volatile__(
131         "b      2f              # read_lock\n\
132 1:"     HMT_PRIO_LOW
133 "       lwzx    %0,0,%1\n\
134         cmpwi   0,%0,0\n\
135         blt+    1b\n"
136         HMT_PRIO_MED
137 "2:     lwarx   %0,0,%1\n\
138         addic.  %0,%0,1\n\
139         ble-    1b\n"
140         PPC405_ERR77(0,%1)
141 "       stwcx.  %0,0,%1\n\
142         bne-    2b\n\
143         isync"
144         : "=&r"(tmp)
145         : "r"(&rw->lock)
146         : "cr0", "memory");
147 }
148
149 static __inline__ void read_unlock(rwlock_t *rw)
150 {
151         unsigned int tmp;
152
153         __asm__ __volatile__(
154         "eieio                  # read_unlock\n\
155 1:      lwarx   %0,0,%1\n\
156         addic   %0,%0,-1\n"
157         PPC405_ERR77(0,%1)
158 "       stwcx.  %0,0,%1\n\
159         bne-    1b"
160         : "=&r"(tmp)
161         : "r"(&rw->lock)
162         : "cr0", "memory");
163 }
164
165 static __inline__ void write_lock(rwlock_t *rw)
166 {
167         unsigned int tmp;
168
169         __asm__ __volatile__(
170         "b      2f              # write_lock\n\
171 1:"     HMT_PRIO_LOW
172 "       lwzx    %0,0,%1\n\
173         cmpwi   0,%0,0\n\
174         bne+    1b\n"
175         HMT_PRIO_MED
176 "2:     lwarx   %0,0,%1\n\
177         cmpwi   0,%0,0\n\
178         bne-    1b\n"
179         PPC405_ERR77(0,%1)
180 "       stwcx.  %2,0,%1\n\
181         bne-    2b\n\
182         isync"
183         : "=&r"(tmp)
184         : "r"(&rw->lock), "r"(-1)
185         : "cr0", "memory");
186 }
187
188 static __inline__ void write_unlock(rwlock_t *rw)
189 {
190         __asm__ __volatile__("eieio             # write_unlock": : :"memory");
191         rw->lock = 0;
192 }
193
194 #endif
195
196 #endif /* __ASM_SPINLOCK_H */
197 #endif /* __KERNEL__ */