1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
8 * Trylock by Brian Watson (Brian.J.Watson@compaq.com).
10 #include <linux/rwsem.h>
11 #include <linux/sched.h>
12 #include <linux/module.h>
15 struct list_head list;
16 struct task_struct *task;
18 #define RWSEM_WAITING_FOR_READ 0x00000001
19 #define RWSEM_WAITING_FOR_WRITE 0x00000002
23 void rwsemtrace(struct rw_semaphore *sem, const char *str)
26 printk("[%d] %s({%d,%d})\n",
27 current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
32 * initialise the semaphore
34 void init_rwsem(struct rw_semaphore *sem)
37 spin_lock_init(&sem->wait_lock);
38 INIT_LIST_HEAD(&sem->wait_list);
45 * handle the lock being released whilst there are processes blocked on it that can now run
46 * - if we come here, then:
47 * - the 'active count' _reached_ zero
48 * - the 'waiting count' is non-zero
49 * - the spinlock must be held by the caller
50 * - woken process blocks are discarded from the list after having flags zeroised
52 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
54 struct rwsem_waiter *waiter;
57 rwsemtrace(sem,"Entering __rwsem_do_wake");
59 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
61 /* try to grant a single write lock if there's a writer at the front of the queue
62 * - we leave the 'waiting count' incremented to signify potential contention
64 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
66 list_del(&waiter->list);
68 wake_up_process(waiter->task);
72 /* grant an infinite number of read locks to the readers at the front of the queue */
75 list_del(&waiter->list);
77 wake_up_process(waiter->task);
79 if (list_empty(&sem->wait_list))
81 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
82 } while (waiter->flags&RWSEM_WAITING_FOR_READ);
84 sem->activity += woken;
87 rwsemtrace(sem,"Leaving __rwsem_do_wake");
92 * wake a single writer
94 static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
96 struct rwsem_waiter *waiter;
100 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
101 list_del(&waiter->list);
104 wake_up_process(waiter->task);
109 * get a read lock on the semaphore
111 void __down_read(struct rw_semaphore *sem)
113 struct rwsem_waiter waiter;
114 struct task_struct *tsk;
116 rwsemtrace(sem,"Entering __down_read");
118 spin_lock(&sem->wait_lock);
120 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
123 spin_unlock(&sem->wait_lock);
128 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
130 /* set up my own style of waitqueue */
132 waiter.flags = RWSEM_WAITING_FOR_READ;
134 list_add_tail(&waiter.list,&sem->wait_list);
136 /* we don't need to touch the semaphore struct anymore */
137 spin_unlock(&sem->wait_lock);
139 /* wait to be given the lock */
144 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
147 tsk->state = TASK_RUNNING;
150 rwsemtrace(sem,"Leaving __down_read");
154 * trylock for reading -- returns 1 if successful, 0 if contention
156 int __down_read_trylock(struct rw_semaphore *sem)
159 rwsemtrace(sem,"Entering __down_read_trylock");
161 spin_lock(&sem->wait_lock);
163 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
169 spin_unlock(&sem->wait_lock);
171 rwsemtrace(sem,"Leaving __down_read_trylock");
176 * get a write lock on the semaphore
177 * - note that we increment the waiting count anyway to indicate an exclusive lock
179 void __down_write(struct rw_semaphore *sem)
181 struct rwsem_waiter waiter;
182 struct task_struct *tsk;
184 rwsemtrace(sem,"Entering __down_write");
186 spin_lock(&sem->wait_lock);
188 if (sem->activity==0 && list_empty(&sem->wait_list)) {
191 spin_unlock(&sem->wait_lock);
196 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
198 /* set up my own style of waitqueue */
200 waiter.flags = RWSEM_WAITING_FOR_WRITE;
202 list_add_tail(&waiter.list,&sem->wait_list);
204 /* we don't need to touch the semaphore struct anymore */
205 spin_unlock(&sem->wait_lock);
207 /* wait to be given the lock */
212 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
215 tsk->state = TASK_RUNNING;
218 rwsemtrace(sem,"Leaving __down_write");
222 * trylock for writing -- returns 1 if successful, 0 if contention
224 int __down_write_trylock(struct rw_semaphore *sem)
227 rwsemtrace(sem,"Entering __down_write_trylock");
229 spin_lock(&sem->wait_lock);
231 if (sem->activity==0 && list_empty(&sem->wait_list)) {
237 spin_unlock(&sem->wait_lock);
239 rwsemtrace(sem,"Leaving __down_write_trylock");
244 * release a read lock on the semaphore
246 void __up_read(struct rw_semaphore *sem)
248 rwsemtrace(sem,"Entering __up_read");
250 spin_lock(&sem->wait_lock);
252 if (--sem->activity==0 && !list_empty(&sem->wait_list))
253 sem = __rwsem_wake_one_writer(sem);
255 spin_unlock(&sem->wait_lock);
257 rwsemtrace(sem,"Leaving __up_read");
261 * release a write lock on the semaphore
263 void __up_write(struct rw_semaphore *sem)
265 rwsemtrace(sem,"Entering __up_write");
267 spin_lock(&sem->wait_lock);
270 if (!list_empty(&sem->wait_list))
271 sem = __rwsem_do_wake(sem);
273 spin_unlock(&sem->wait_lock);
275 rwsemtrace(sem,"Leaving __up_write");
278 EXPORT_SYMBOL(init_rwsem);
279 EXPORT_SYMBOL(__down_read);
280 EXPORT_SYMBOL(__down_write);
281 EXPORT_SYMBOL(__up_read);
282 EXPORT_SYMBOL(__up_write);
284 EXPORT_SYMBOL(rwsemtrace);