added a lot of printk output to ease writing of emulator
[linux-2.4.21-pre4.git] / include / asm-ia64 / spinlock.h
1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
3
4 /*
5  * Copyright (C) 1998-2001 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8  *
9  * This file is used for SMP configurations only.
10  */
11
12 #include <linux/kernel.h>
13
14 #include <asm/system.h>
15 #include <asm/bitops.h>
16 #include <asm/atomic.h>
17
18 #undef NEW_LOCK
19
20 #ifdef NEW_LOCK
21
22 typedef struct {
23         volatile unsigned int lock;
24 } spinlock_t;
25
26 #define SPIN_LOCK_UNLOCKED                      (spinlock_t) { 0 }
27 #define spin_lock_init(x)                       ((x)->lock = 0)
28
29 /*
30  * Streamlined test_and_set_bit(0, (x)).  We use test-and-test-and-set
31  * rather than a simple xchg to avoid writing the cache-line when
32  * there is contention.
33  */
34 #define spin_lock(x)                                                                    \
35 {                                                                                       \
36         register char *addr __asm__ ("r31") = (char *) &(x)->lock;                      \
37                                                                                         \
38         __asm__ __volatile__ (                                                          \
39                 "mov r30=1\n"                                                           \
40                 "mov ar.ccv=r0\n"                                                       \
41                 ";;\n"                                                                  \
42                 "cmpxchg4.acq r30=[%0],r30,ar.ccv\n"                                    \
43                 ";;\n"                                                                  \
44                 "cmp.ne p15,p0=r30,r0\n"                                                \
45                 "(p15) br.call.spnt.few b7=ia64_spinlock_contention\n"                  \
46                 ";;\n"                                                                  \
47                 "1:\n"                          /* force a new bundle */                \
48                 :: "r"(addr)                                                            \
49                 : "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory");      \
50 }
51
52 #define spin_trylock(x)                                                                 \
53 ({                                                                                      \
54         register long result;                                                           \
55                                                                                         \
56         __asm__ __volatile__ (                                                          \
57                 "mov ar.ccv=r0\n"                                                       \
58                 ";;\n"                                                                  \
59                 "cmpxchg4.acq %0=[%2],%1,ar.ccv\n"                                      \
60                 : "=r"(result) : "r"(1), "r"(&(x)->lock) : "ar.ccv", "memory");         \
61         (result == 0);                                                                  \
62 })
63
64 #define spin_is_locked(x)       ((x)->lock != 0)
65 #define spin_unlock(x)          do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0)
66 #define spin_unlock_wait(x)     do { barrier(); } while ((x)->lock)
67
68 #else /* !NEW_LOCK */
69
70 typedef struct {
71         volatile unsigned int lock;
72 } spinlock_t;
73
74 #define SPIN_LOCK_UNLOCKED                      (spinlock_t) { 0 }
75 #define spin_lock_init(x)                       ((x)->lock = 0)
76
77 /*
78  * Streamlined test_and_set_bit(0, (x)).  We use test-and-test-and-set
79  * rather than a simple xchg to avoid writing the cache-line when
80  * there is contention.
81  */
82 #define spin_lock(x) __asm__ __volatile__ (                     \
83         "mov ar.ccv = r0\n"                                     \
84         "mov r29 = 1\n"                                         \
85         ";;\n"                                                  \
86         "1:\n"                                                  \
87         "ld4 r2 = [%0]\n"                                       \
88         ";;\n"                                                  \
89         "cmp4.eq p0,p7 = r0,r2\n"                               \
90         "(p7) br.cond.spnt.few 1b \n"                           \
91         "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n"                 \
92         ";;\n"                                                  \
93         "cmp4.eq p0,p7 = r0, r2\n"                              \
94         "(p7) br.cond.spnt.few 1b\n"                            \
95         ";;\n"                                                  \
96         :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
97
98 #define spin_is_locked(x)       ((x)->lock != 0)
99 #define spin_unlock(x)          do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
100 #define spin_trylock(x)         (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
101 #define spin_unlock_wait(x)     do { barrier(); } while ((x)->lock)
102
103 #endif /* !NEW_LOCK */
104
105 typedef struct {
106         volatile int read_counter:31;
107         volatile int write_lock:1;
108 } rwlock_t;
109 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
110
111 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
112
113 #define read_lock(rw)                                                           \
114 do {                                                                            \
115         int tmp = 0;                                                            \
116         __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n"                \
117                               ";;\n"                                            \
118                               "tbit.nz p6,p0 = %0, 31\n"                        \
119                               "(p6) br.cond.sptk.few 2f\n"                      \
120                               ".section .text.lock,\"ax\"\n"                    \
121                               "2:\tfetchadd4.rel %0 = [%1], -1\n"               \
122                               ";;\n"                                            \
123                               "3:\tld4.acq %0 = [%1]\n"                         \
124                               ";;\n"                                            \
125                               "tbit.nz p6,p0 = %0, 31\n"                        \
126                               "(p6) br.cond.sptk.few 3b\n"                      \
127                               "br.cond.sptk.few 1b\n"                           \
128                               ";;\n"                                            \
129                               ".previous\n"                                     \
130                               : "=&r" (tmp)                                     \
131                               : "r" (rw) : "p6", "memory");                     \
132 } while(0)
133
134 #define read_unlock(rw)                                                         \
135 do {                                                                            \
136         int tmp = 0;                                                            \
137         __asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n"                   \
138                               : "=r" (tmp)                                      \
139                               : "r" (rw)                                        \
140                               : "memory");                                      \
141 } while(0)
142
143 #define write_lock(rw)                                                          \
144 do {                                                                            \
145         __asm__ __volatile__ (                                                  \
146                 "mov ar.ccv = r0\n"                                             \
147                 "dep r29 = -1, r0, 31, 1\n"                                     \
148                 ";;\n"                                                          \
149                 "1:\n"                                                          \
150                 "ld4 r2 = [%0]\n"                                               \
151                 ";;\n"                                                          \
152                 "cmp4.eq p0,p7 = r0,r2\n"                                       \
153                 "(p7) br.cond.spnt.few 1b \n"                                   \
154                 "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n"                         \
155                 ";;\n"                                                          \
156                 "cmp4.eq p0,p7 = r0, r2\n"                                      \
157                 "(p7) br.cond.spnt.few 1b\n"                                    \
158                 ";;\n"                                                          \
159                 :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");            \
160 } while(0)
161
162 #define write_unlock(x)                                                                 \
163 ({                                                                                      \
164         smp_mb__before_clear_bit();     /* need barrier before releasing lock... */     \
165         clear_bit(31, (x));                                                             \
166 })
167
168 #endif /*  _ASM_IA64_SPINLOCK_H */