added a lot of printk output to ease writing of emulator
[linux-2.4.21-pre4.git] / include / asm-ppc64 / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 /*
5  * Simple spin lock operations.  
6  *
7  * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
8  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  *
10  * Type of int is used as a full 64b word is not necessary.
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version
15  * 2 of the License, or (at your option) any later version.
16  */
17 typedef struct {
18         volatile unsigned int lock;
19 } spinlock_t;
20
21 #ifdef __KERNEL__
22 #define SPIN_LOCK_UNLOCKED      (spinlock_t) { 0 }
23
24 #define spin_is_locked(x)       ((x)->lock != 0)
25
26 static __inline__ int spin_trylock(spinlock_t *lock)
27 {
28         unsigned int tmp;
29
30         __asm__ __volatile__(
31 "1:     lwarx           %0,0,%1         # spin_trylock\n\
32         cmpwi           0,%0,0\n\
33         li              %0,0\n\
34         bne-            2f\n\
35         li              %0,1\n\
36         stwcx.          %0,0,%1\n\
37         bne-            1b\n\
38         isync\n\
39 2:"     : "=&r"(tmp)
40         : "r"(&lock->lock)
41         : "cr0", "memory");
42
43         return tmp;
44 }
45
46 static __inline__ void spin_lock(spinlock_t *lock)
47 {
48         unsigned int tmp;
49
50         __asm__ __volatile__(
51         "b              2f              # spin_lock\n\
52 1:      or              1,1,1           # spin at low priority\n\
53         lwzx            %0,0,%1\n\
54         cmpwi           0,%0,0\n\
55         bne+            1b\n\
56         or              2,2,2           # back to medium priority\n\
57 2:      lwarx           %0,0,%1\n\
58         cmpwi           0,%0,0\n\
59         bne-            1b\n\
60         stwcx.          %2,0,%1\n\
61         bne-            2b\n\
62         isync"
63         : "=&r"(tmp)
64         : "r"(&lock->lock), "r"(1)
65         : "cr0", "memory");
66 }
67
68 static __inline__ void spin_unlock(spinlock_t *lock)
69 {
70         __asm__ __volatile__("lwsync    # spin_unlock": : :"memory");
71         lock->lock = 0;
72 }
73
74 /*
75  * Read-write spinlocks, allowing multiple readers
76  * but only one writer.
77  *
78  * NOTE! it is quite common to have readers in interrupts
79  * but no interrupt writers. For those circumstances we
80  * can "mix" irq-safe locks - any writer needs to get a
81  * irq-safe write-lock, but readers can get non-irqsafe
82  * read-locks.
83  */
84 typedef struct {
85         volatile signed int lock;
86 } rwlock_t;
87
88 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
89
90 static __inline__ int read_trylock(rwlock_t *rw)
91 {
92         unsigned int tmp;
93         unsigned int ret;
94
95         __asm__ __volatile__(
96 "1:     lwarx           %0,0,%2         # read_trylock\n\
97         li              %1,0\n\
98         extsw           %0,%0\n\
99         addic.          %0,%0,1\n\
100         ble-            2f\n\
101         stwcx.          %0,0,%2\n\
102         bne-            1b\n\
103         li              %1,1\n\
104         isync\n\
105 2:"     : "=&r"(tmp), "=&r"(ret)
106         : "r"(&rw->lock)
107         : "cr0", "memory");
108
109         return ret;
110 }
111
112 static __inline__ void read_lock(rwlock_t *rw)
113 {
114         unsigned int tmp;
115
116         __asm__ __volatile__(
117         "b              2f              # read_lock\n\
118 1:      or              1,1,1           # spin at low priority\n\
119         lwax            %0,0,%1\n\
120         cmpwi           0,%0,0\n\
121         blt+            1b\n\
122         or              2,2,2           # back to medium priority\n\
123 2:      lwarx           %0,0,%1\n\
124         extsw           %0,%0\n\
125         addic.          %0,%0,1\n\
126         ble-            1b\n\
127         stwcx.          %0,0,%1\n\
128         bne-            2b\n\
129         isync"
130         : "=&r"(tmp)
131         : "r"(&rw->lock)
132         : "cr0", "memory");
133 }
134
135 static __inline__ void read_unlock(rwlock_t *rw)
136 {
137         unsigned int tmp;
138
139         __asm__ __volatile__(
140         "lwsync                         # read_unlock\n\
141 1:      lwarx           %0,0,%1\n\
142         addic           %0,%0,-1\n\
143         stwcx.          %0,0,%1\n\
144         bne-            1b"
145         : "=&r"(tmp)
146         : "r"(&rw->lock)
147         : "cr0", "memory");
148 }
149
150 static __inline__ int write_trylock(rwlock_t *rw)
151 {
152         unsigned int tmp;
153         unsigned int ret;
154
155         __asm__ __volatile__(
156 "1:     lwarx           %0,0,%2         # write_trylock\n\
157         cmpwi           0,%0,0\n\
158         li              %1,0\n\
159         bne-            2f\n\
160         stwcx.          %3,0,%2\n\
161         bne-            1b\n\
162         li              %1,1\n\
163         isync\n\
164 2:"     : "=&r"(tmp), "=&r"(ret)
165         : "r"(&rw->lock), "r"(-1)
166         : "cr0", "memory");
167
168         return ret;
169 }
170
171 static __inline__ void write_lock(rwlock_t *rw)
172 {
173         unsigned int tmp;
174
175         __asm__ __volatile__(
176         "b              2f              # write_lock\n\
177 1:      or              1,1,1           # spin at low priority\n\
178         lwax            %0,0,%1\n\
179         cmpwi           0,%0,0\n\
180         bne+            1b\n\
181         or              2,2,2           # back to medium priority\n\
182 2:      lwarx           %0,0,%1\n\
183         cmpwi           0,%0,0\n\
184         bne-            1b\n\
185         stwcx.          %2,0,%1\n\
186         bne-            2b\n\
187         isync"
188         : "=&r"(tmp)
189         : "r"(&rw->lock), "r"(-1)
190         : "cr0", "memory");
191 }
192
193 static __inline__ void write_unlock(rwlock_t *rw)
194 {
195         __asm__ __volatile__("lwsync            # write_unlock": : :"memory");
196         rw->lock = 0;
197 }
198
199 static __inline__ int is_read_locked(rwlock_t *rw)
200 {
201         return rw->lock > 0;
202 }
203
204 static __inline__ int is_write_locked(rwlock_t *rw)
205 {
206         return rw->lock < 0;
207 }
208
209 #define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
210 #define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
211
212 #define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
213
214 #endif /* __KERNEL__ */
215 #endif /* __ASM_SPINLOCK_H */