1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
5 * Simple spin lock operations.
7 * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Type of int is used as a full 64b word is not necessary.
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 volatile unsigned int lock;
22 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
24 #define spin_is_locked(x) ((x)->lock != 0)
26 static __inline__ int spin_trylock(spinlock_t *lock)
31 "1: lwarx %0,0,%1 # spin_trylock\n\
46 static __inline__ void spin_lock(spinlock_t *lock)
52 1: or 1,1,1 # spin at low priority\n\
56 or 2,2,2 # back to medium priority\n\
64 : "r"(&lock->lock), "r"(1)
68 static __inline__ void spin_unlock(spinlock_t *lock)
70 __asm__ __volatile__("lwsync # spin_unlock": : :"memory");
75 * Read-write spinlocks, allowing multiple readers
76 * but only one writer.
78 * NOTE! it is quite common to have readers in interrupts
79 * but no interrupt writers. For those circumstances we
80 * can "mix" irq-safe locks - any writer needs to get a
81 * irq-safe write-lock, but readers can get non-irqsafe
85 volatile signed int lock;
88 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
90 static __inline__ int read_trylock(rwlock_t *rw)
96 "1: lwarx %0,0,%2 # read_trylock\n\
105 2:" : "=&r"(tmp), "=&r"(ret)
112 static __inline__ void read_lock(rwlock_t *rw)
116 __asm__ __volatile__(
118 1: or 1,1,1 # spin at low priority\n\
122 or 2,2,2 # back to medium priority\n\
135 static __inline__ void read_unlock(rwlock_t *rw)
139 __asm__ __volatile__(
140 "lwsync # read_unlock\n\
150 static __inline__ int write_trylock(rwlock_t *rw)
155 __asm__ __volatile__(
156 "1: lwarx %0,0,%2 # write_trylock\n\
164 2:" : "=&r"(tmp), "=&r"(ret)
165 : "r"(&rw->lock), "r"(-1)
171 static __inline__ void write_lock(rwlock_t *rw)
175 __asm__ __volatile__(
176 "b 2f # write_lock\n\
177 1: or 1,1,1 # spin at low priority\n\
181 or 2,2,2 # back to medium priority\n\
189 : "r"(&rw->lock), "r"(-1)
193 static __inline__ void write_unlock(rwlock_t *rw)
195 __asm__ __volatile__("lwsync # write_unlock": : :"memory");
199 static __inline__ int is_read_locked(rwlock_t *rw)
204 static __inline__ int is_write_locked(rwlock_t *rw)
209 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
210 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
212 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
214 #endif /* __KERNEL__ */
215 #endif /* __ASM_SPINLOCK_H */