1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
5 * Copyright (C) 1998-2001 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
9 * This file is used for SMP configurations only.
12 #include <linux/kernel.h>
14 #include <asm/system.h>
15 #include <asm/bitops.h>
16 #include <asm/atomic.h>
23 volatile unsigned int lock;
26 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
27 #define spin_lock_init(x) ((x)->lock = 0)
30 * Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
31 * rather than a simple xchg to avoid writing the cache-line when
32 * there is contention.
34 #define spin_lock(x) \
36 register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
38 __asm__ __volatile__ ( \
42 "cmpxchg4.acq r30=[%0],r30,ar.ccv\n" \
44 "cmp.ne p15,p0=r30,r0\n" \
45 "(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
47 "1:\n" /* force a new bundle */ \
49 : "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory"); \
52 #define spin_trylock(x) \
54 register long result; \
56 __asm__ __volatile__ ( \
59 "cmpxchg4.acq %0=[%2],%1,ar.ccv\n" \
60 : "=r"(result) : "r"(1), "r"(&(x)->lock) : "ar.ccv", "memory"); \
64 #define spin_is_locked(x) ((x)->lock != 0)
65 #define spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0)
66 #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
71 volatile unsigned int lock;
74 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
75 #define spin_lock_init(x) ((x)->lock = 0)
78 * Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
79 * rather than a simple xchg to avoid writing the cache-line when
80 * there is contention.
82 #define spin_lock(x) __asm__ __volatile__ ( \
89 "cmp4.eq p0,p7 = r0,r2\n" \
90 "(p7) br.cond.spnt.few 1b \n" \
91 "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
93 "cmp4.eq p0,p7 = r0, r2\n" \
94 "(p7) br.cond.spnt.few 1b\n" \
96 :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
98 #define spin_is_locked(x) ((x)->lock != 0)
99 #define spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
100 #define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
101 #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
103 #endif /* !NEW_LOCK */
106 volatile int read_counter:31;
107 volatile int write_lock:1;
109 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
111 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
113 #define read_lock(rw) \
116 __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \
118 "tbit.nz p6,p0 = %0, 31\n" \
119 "(p6) br.cond.sptk.few 2f\n" \
120 ".section .text.lock,\"ax\"\n" \
121 "2:\tfetchadd4.rel %0 = [%1], -1\n" \
123 "3:\tld4.acq %0 = [%1]\n" \
125 "tbit.nz p6,p0 = %0, 31\n" \
126 "(p6) br.cond.sptk.few 3b\n" \
127 "br.cond.sptk.few 1b\n" \
131 : "r" (rw) : "p6", "memory"); \
134 #define read_unlock(rw) \
137 __asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \
143 #define write_lock(rw) \
145 __asm__ __volatile__ ( \
146 "mov ar.ccv = r0\n" \
147 "dep r29 = -1, r0, 31, 1\n" \
152 "cmp4.eq p0,p7 = r0,r2\n" \
153 "(p7) br.cond.spnt.few 1b \n" \
154 "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
156 "cmp4.eq p0,p7 = r0, r2\n" \
157 "(p7) br.cond.spnt.few 1b\n" \
159 :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
162 #define write_unlock(x) \
164 smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
165 clear_bit(31, (x)); \
168 #endif /* _ASM_IA64_SPINLOCK_H */