more changes on original files
[linux-2.4.git] / include / linux / brlock.h
1 #ifndef __LINUX_BRLOCK_H
2 #define __LINUX_BRLOCK_H
3
4 /*
5  * 'Big Reader' read-write spinlocks.
6  *
7  * super-fast read/write locks, with write-side penalty. The point
8  * is to have a per-CPU read/write lock. Readers lock their CPU-local
9  * readlock, writers must lock all locks to get write access. These
10  * CPU-read-write locks are semantically identical to normal rwlocks.
11  * Memory usage is higher as well. (NR_CPUS*L1_CACHE_BYTES bytes)
12  *
13  * The most important feature is that these spinlocks do not cause
14  * cacheline ping-pong in the 'most readonly data' case.
15  *
16  * Copyright 2000, Ingo Molnar <mingo@redhat.com>
17  *
18  * Registry idea and naming [ crutial! :-) ] by:
19  *
20  *                 David S. Miller <davem@redhat.com>
21  */
22
23 /* Register bigreader lock indices here. */
24 enum brlock_indices {
25         BR_GLOBALIRQ_LOCK,
26         BR_NETPROTO_LOCK,
27
28         __BR_END
29 };
30
31 #include <linux/config.h>
32
33 #ifdef CONFIG_SMP
34
35 #include <linux/cache.h>
36 #include <linux/spinlock.h>
37
38 typedef unsigned int    brlock_read_lock_t;
39
40 /*
41  * align last allocated index to the next cacheline:
42  */
43 #define __BR_IDX_MAX \
44         (((sizeof(brlock_read_lock_t)*__BR_END + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) / sizeof(brlock_read_lock_t))
45
46 extern brlock_read_lock_t __brlock_array[NR_CPUS][__BR_IDX_MAX];
47
48 struct br_wrlock {
49         spinlock_t lock;
50 } __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
51
52 extern struct br_wrlock __br_write_locks[__BR_IDX_MAX];
53
54 extern void __br_lock_usage_bug (void);
55
56 static inline void br_read_lock (enum brlock_indices idx)
57 {
58         unsigned int *ctr;
59         spinlock_t *lock;
60
61         /*
62          * This causes a link-time bug message if an
63          * invalid index is used:
64          */
65         if (idx >= __BR_END)
66                 __br_lock_usage_bug();
67
68         ctr = &__brlock_array[smp_processor_id()][idx];
69         lock = &__br_write_locks[idx].lock;
70 again:
71         (*ctr)++;
72         mb();
73         if (spin_is_locked(lock)) {
74                 (*ctr)--;
75                 wmb(); /*
76                         * The release of the ctr must become visible
77                         * to the other cpus eventually thus wmb(),
78                         * we don't care if spin_is_locked is reordered
79                         * before the releasing of the ctr.
80                         * However IMHO this wmb() is superflous even in theory.
81                         * It would not be superflous only if on the
82                         * other CPUs doing a ldl_l instead of an ldl
83                         * would make a difference and I don't think this is
84                         * the case.
85                         * I'd like to clarify this issue further
86                         * but for now this is a slow path so adding the
87                         * wmb() will keep us on the safe side.
88                         */
89                 while (spin_is_locked(lock))
90                         barrier();
91                 goto again;
92         }
93 }
94
95 static inline void br_read_unlock (enum brlock_indices idx)
96 {
97         unsigned int *ctr;
98
99         if (idx >= __BR_END)
100                 __br_lock_usage_bug();
101
102         ctr = &__brlock_array[smp_processor_id()][idx];
103
104         wmb();
105         (*ctr)--;
106 }
107
108 /* write path not inlined - it's rare and larger */
109
110 extern void FASTCALL(__br_write_lock (enum brlock_indices idx));
111 extern void FASTCALL(__br_write_unlock (enum brlock_indices idx));
112
113 static inline void br_write_lock (enum brlock_indices idx)
114 {
115         if (idx >= __BR_END)
116                 __br_lock_usage_bug();
117         __br_write_lock(idx);
118 }
119
120 static inline void br_write_unlock (enum brlock_indices idx)
121 {
122         if (idx >= __BR_END)
123                 __br_lock_usage_bug();
124         __br_write_unlock(idx);
125 }
126
127 #else
128 # define br_read_lock(idx)      ((void)(idx))
129 # define br_read_unlock(idx)    ((void)(idx))
130 # define br_write_lock(idx)     ((void)(idx))
131 # define br_write_unlock(idx)   ((void)(idx))
132 #endif
133
134 /*
135  * Now enumerate all of the possible sw/hw IRQ protected
136  * versions of the interfaces.
137  */
138 #define br_read_lock_irqsave(idx, flags) \
139         do { local_irq_save(flags); br_read_lock(idx); } while (0)
140
141 #define br_read_lock_irq(idx) \
142         do { local_irq_disable(); br_read_lock(idx); } while (0)
143
144 #define br_read_lock_bh(idx) \
145         do { local_bh_disable(); br_read_lock(idx); } while (0)
146
147 #define br_write_lock_irqsave(idx, flags) \
148         do { local_irq_save(flags); br_write_lock(idx); } while (0)
149
150 #define br_write_lock_irq(idx) \
151         do { local_irq_disable(); br_write_lock(idx); } while (0)
152
153 #define br_write_lock_bh(idx) \
154         do { local_bh_disable(); br_write_lock(idx); } while (0)
155
156 #define br_read_unlock_irqrestore(idx, flags) \
157         do { br_read_unlock(irx); local_irq_restore(flags); } while (0)
158
159 #define br_read_unlock_irq(idx) \
160         do { br_read_unlock(idx); local_irq_enable(); } while (0)
161
162 #define br_read_unlock_bh(idx) \
163         do { br_read_unlock(idx); local_bh_enable(); } while (0)
164
165 #define br_write_unlock_irqrestore(idx, flags) \
166         do { br_write_unlock(irx); local_irq_restore(flags); } while (0)
167
168 #define br_write_unlock_irq(idx) \
169         do { br_write_unlock(idx); local_irq_enable(); } while (0)
170
171 #define br_write_unlock_bh(idx) \
172         do { br_write_unlock(idx); local_bh_enable(); } while (0)
173
174 #endif /* __LINUX_BRLOCK_H */