1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
5 * Simple spin lock operations.
7 * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>, IBM
10 * Rework to support virtual processors
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <asm/memory.h>
19 #include <asm/hvcall.h>
22 * The following define is being used to select basic or shared processor
23 * locking when running on an RPA platform. As we do more performance
24 * tuning, I would expect this selection mechanism to change. Dave E.
26 /* #define SPLPAR_LOCKS */
29 volatile unsigned long lock;
33 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
35 #define spin_is_locked(x) ((x)->lock != 0)
37 static __inline__ int spin_trylock(spinlock_t *lock)
42 "1: ldarx %0,0,%1 # spin_trylock\n\
60 * Negative : Locked. Value is paca pointer (0xc...0) of holder
62 #ifdef CONFIG_PPC_ISERIES
63 static __inline__ void spin_lock(spinlock_t *lock)
65 unsigned long tmp, tmp2;
71 " ldx %0,0,%2 # load the lock value\n\
72 cmpdi 0,%0,0 # if not locked, try to acquire\n\
74 lwz 5,0x280(%0) # load yield counter\n\
75 andi. %1,5,1 # if even then spin\n\
77 lwsync # if odd, give up cycles\n\
78 ldx %1,0,%2 # reverify the lock holder\n\
80 bne 1b # new holder so restart\n\
81 li 3,0x25 # yield hcall 0x8-12 \n\
82 rotrdi 3,3,1 # put the bits in the right spot\n\
83 lhz 4,0x18(%0) # processor number\n\
84 sldi 4,4,32 # move into top half of word\n\
85 or 5,5,4 # r5 has yield cnt - or it in\n\
86 li 4,2 # yield to processor\n\
87 li 0,-1 # indicate an hcall\n\
98 : "=&r"(tmp), "=&r"(tmp2)
100 : "r0", "r3", "r4", "r5", "ctr", "cr0", "cr1", "cr2", "cr3", "cr4",
105 static __inline__ void spin_lock(spinlock_t *lock)
107 unsigned long tmp, tmp2;
109 __asm__ __volatile__(
113 " ldx %0,0,%2 # load the lock value\n\
114 cmpdi 0,%0,0 # if not locked, try to acquire\n\
116 lwz 5,0x280(%0) # load dispatch counter\n\
117 andi. %1,5,1 # if even then spin\n\
119 lwsync # if odd, give up cycles\n\
120 ldx %1,0,%2 # reverify the lock holder\n\
122 bne 1b # new holder so restart\n\
123 li 3,0xE4 # give up the cycles H_CONFER\n\
124 lhz 4,0x18(%0) # processor number\n\
125 # r5 has dispatch cnt already\n"
136 : "=&r"(tmp), "=&r"(tmp2)
138 : "r3", "r4", "r5", "cr0", "cr1", "ctr", "xer", "memory");
141 static __inline__ void spin_lock(spinlock_t *lock)
145 __asm__ __volatile__(
149 " ldx %0,0,%1 # load the lock value\n\
150 cmpdi 0,%0,0 # if not locked, try to acquire\n\
167 static __inline__ void spin_unlock(spinlock_t *lock)
169 __asm__ __volatile__("lwsync # spin_unlock": : :"memory");
174 * Read-write spinlocks, allowing multiple readers
175 * but only one writer.
177 * NOTE! it is quite common to have readers in interrupts
178 * but no interrupt writers. For those circumstances we
179 * can "mix" irq-safe locks - any writer needs to get a
180 * irq-safe write-lock, but readers can get non-irqsafe
185 * Positive : Reader count
186 * Negative : Writer locked. Value is paca pointer (0xc...0) of holder
188 * If lock is not held, try to acquire.
189 * If lock is held by a writer, yield cycles to the holder.
190 * If lock is help by reader(s), spin.
193 volatile signed long lock;
196 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
198 static __inline__ int read_trylock(rwlock_t *rw)
203 __asm__ __volatile__(
204 "1: ldarx %0,0,%2 # read_trylock\n\
212 2:" : "=&r"(tmp), "=&r"(ret)
219 #ifdef CONFIG_PPC_ISERIES
220 static __inline__ void read_lock(rwlock_t *rw)
222 unsigned long tmp, tmp2;
224 __asm__ __volatile__(
231 lwz 5,0x280(%0) # load yield counter\n\
232 andi. %1,5,1 # if even then spin\n\
234 lwsync # if odd, give up cycles\n\
235 ldx %1,0,%2 # reverify the lock holder\n\
237 bne 1b # new holder so restart\n\
238 li 3,0x25 # yield hcall 0x8-12 \n\
239 rotrdi 3,3,1 # put the bits in the right spot\n\
240 lhz 4,0x18(%0) # processor number\n\
241 sldi 4,4,32 # move into top half of word\n\
242 or 5,5,4 # r5 has yield cnt - or it in\n\
243 li 4,2 # yield to processor\n\
244 li 0,-1 # indicate an hcall\n\
245 sc # do the hcall \n\
254 : "=&r"(tmp), "=&r"(tmp2)
256 : "r0", "r3", "r4", "r5", "ctr", "cr0", "cr1", "cr2", "cr3", "cr4",
261 static __inline__ void read_lock(rwlock_t *rw)
263 unsigned long tmp, tmp2;
265 __asm__ __volatile__(
272 lwz 5,0x280(%0) # load dispatch counter\n\
273 andi. %1,5,1 # if even then spin\n\
275 lwsync # if odd, give up cycles\n\
276 ldx %1,0,%2 # reverify the lock holder\n\
278 bne 1b # new holder so restart\n\
279 li 3,0xE4 # give up the cycles H_CONFER\n\
280 lhz 4,0x18(%0) # processor number\n\
281 # r5 has dispatch cnt already\n"
291 : "=&r"(tmp), "=&r"(tmp2)
293 : "r3", "r4", "r5", "cr0", "cr1", "ctr", "xer", "memory");
296 static __inline__ void read_lock(rwlock_t *rw)
300 __asm__ __volatile__(
322 static __inline__ void read_unlock(rwlock_t *rw)
326 __asm__ __volatile__(
327 "eieio # read_unlock\n\
337 static __inline__ int write_trylock(rwlock_t *rw)
342 __asm__ __volatile__(
343 "1: ldarx %0,0,%2 # write_trylock\n\
351 2:" : "=&r"(tmp), "=&r"(ret)
358 #ifdef CONFIG_PPC_ISERIES
359 static __inline__ void write_lock(rwlock_t *rw)
361 unsigned long tmp, tmp2;
363 __asm__ __volatile__(
367 " ldx %0,0,%2 # load the lock value\n\
368 cmpdi 0,%0,0 # if not locked(0), try to acquire\n\
370 bgt 1b # negative(0xc..)->cycles to holder\n"
371 "3: lwz 5,0x280(%0) # load yield counter\n\
372 andi. %1,5,1 # if even then spin\n\
374 lwsync # if odd, give up cycles\n\
375 ldx %1,0,%2 # reverify the lock holder\n\
377 bne 1b # new holder so restart\n\
378 lhz 4,0x18(%0) # processor number\n\
379 sldi 4,4,32 # move into top half of word\n\
380 or 5,5,4 # r5 has yield cnt - or it in\n\
381 li 3,0x25 # yield hcall 0x8-12 \n\
382 rotrdi 3,3,1 # put the bits in the right spot\n\
383 li 4,2 # yield to processor\n\
384 li 0,-1 # indicate an hcall\n\
385 sc # do the hcall \n\
394 : "=&r"(tmp), "=&r"(tmp2)
396 : "r0", "r3", "r4", "r5", "ctr", "cr0", "cr1", "cr2", "cr3", "cr4",
401 static __inline__ void write_lock(rwlock_t *rw)
403 unsigned long tmp, tmp2;
405 __asm__ __volatile__(
409 " ldx %0,0,%2 # load the lock value\n\
410 li 3,0xE4 # give up the cycles H_CONFER\n\
411 cmpdi 0,%0,0 # if not locked(0), try to acquire\n\
413 blt 3f # negative(0xc..)->confer to holder\n\
415 "3: lwz 5,0x280(%0) # load dispatch counter\n\
416 andi. %1,5,1 # if even then spin\n\
418 lwsync # if odd, give up cycles\n\
419 ldx %1,0,%2 # reverify the lock holder\n\
421 bne 1b # new holder so restart\n\
422 lhz 4,0x18(%0) # processor number\n\
423 # r5 has dispatch cnt already\n"
434 : "=&r"(tmp), "=&r"(tmp2)
436 : "r3", "r4", "r5", "cr0", "cr1", "ctr", "xer", "memory");
439 static __inline__ void write_lock(rwlock_t *rw)
443 __asm__ __volatile__(
447 " ldx %0,0,%1 # load the lock value\n\
448 cmpdi 0,%0,0 # if not locked(0), try to acquire\n\
465 static __inline__ void write_unlock(rwlock_t *rw)
467 __asm__ __volatile__("lwsync # write_unlock": : :"memory");
471 static __inline__ int is_read_locked(rwlock_t *rw)
476 static __inline__ int is_write_locked(rwlock_t *rw)
481 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
482 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
484 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
486 #endif /* __KERNEL__ */
487 #endif /* __ASM_SPINLOCK_H */