1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
8 * Trylock by Brian Watson (Brian.J.Watson@compaq.com).
10 #include <linux/rwsem.h>
11 #include <linux/sched.h>
13 #include <linux/module.h>
16 struct list_head list;
17 struct task_struct *task;
19 #define RWSEM_WAITING_FOR_READ 0x00000001
20 #define RWSEM_WAITING_FOR_WRITE 0x00000002
24 void rwsemtrace(struct rw_semaphore *sem, const char *str)
27 printk("[%d] %s({%d,%d})\n",
28 current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
33 * initialise the semaphore
35 void fastcall init_rwsem(struct rw_semaphore *sem)
38 spin_lock_init(&sem->wait_lock);
39 INIT_LIST_HEAD(&sem->wait_list);
46 * handle the lock being released whilst there are processes blocked on it that can now run
47 * - if we come here, then:
48 * - the 'active count' _reached_ zero
49 * - the 'waiting count' is non-zero
50 * - the spinlock must be held by the caller
51 * - woken process blocks are discarded from the list after having flags zeroised
53 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
55 struct rwsem_waiter *waiter;
56 struct task_struct *tsk;
59 rwsemtrace(sem,"Entering __rwsem_do_wake");
61 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
63 /* try to grant a single write lock if there's a writer at the front of the queue
64 * - we leave the 'waiting count' incremented to signify potential contention
66 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
68 list_del(&waiter->list);
73 free_task_struct(tsk);
77 /* grant an infinite number of read locks to the readers at the front of the queue */
80 list_del(&waiter->list);
85 free_task_struct(tsk);
87 if (list_empty(&sem->wait_list))
89 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
90 } while (waiter->flags&RWSEM_WAITING_FOR_READ);
92 sem->activity += woken;
95 rwsemtrace(sem,"Leaving __rwsem_do_wake");
100 * wake a single writer
102 static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
104 struct rwsem_waiter *waiter;
105 struct task_struct *tsk;
109 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
110 list_del(&waiter->list);
115 wake_up_process(tsk);
116 free_task_struct(tsk);
121 * get a read lock on the semaphore
123 void fastcall __down_read(struct rw_semaphore *sem)
125 struct rwsem_waiter waiter;
126 struct task_struct *tsk;
128 rwsemtrace(sem,"Entering __down_read");
130 spin_lock_irq(&sem->wait_lock);
132 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
135 spin_unlock_irq(&sem->wait_lock);
140 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
142 /* set up my own style of waitqueue */
144 waiter.flags = RWSEM_WAITING_FOR_READ;
145 get_task_struct(tsk);
147 list_add_tail(&waiter.list,&sem->wait_list);
149 /* we don't need to touch the semaphore struct anymore */
150 spin_unlock_irq(&sem->wait_lock);
152 /* wait to be given the lock */
157 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
160 tsk->state = TASK_RUNNING;
163 rwsemtrace(sem,"Leaving __down_read");
167 * trylock for reading -- returns 1 if successful, 0 if contention
169 int fastcall __down_read_trylock(struct rw_semaphore *sem)
173 rwsemtrace(sem,"Entering __down_read_trylock");
175 spin_lock_irqsave(&sem->wait_lock, flags);
177 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
183 spin_unlock_irqrestore(&sem->wait_lock, flags);
185 rwsemtrace(sem,"Leaving __down_read_trylock");
190 * get a write lock on the semaphore
191 * - note that we increment the waiting count anyway to indicate an exclusive lock
193 void fastcall __down_write(struct rw_semaphore *sem)
195 struct rwsem_waiter waiter;
196 struct task_struct *tsk;
198 rwsemtrace(sem,"Entering __down_write");
200 spin_lock_irq(&sem->wait_lock);
202 if (sem->activity==0 && list_empty(&sem->wait_list)) {
205 spin_unlock_irq(&sem->wait_lock);
210 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
212 /* set up my own style of waitqueue */
214 waiter.flags = RWSEM_WAITING_FOR_WRITE;
215 get_task_struct(tsk);
217 list_add_tail(&waiter.list,&sem->wait_list);
219 /* we don't need to touch the semaphore struct anymore */
220 spin_unlock_irq(&sem->wait_lock);
222 /* wait to be given the lock */
227 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
230 tsk->state = TASK_RUNNING;
233 rwsemtrace(sem,"Leaving __down_write");
237 * trylock for writing -- returns 1 if successful, 0 if contention
239 int fastcall __down_write_trylock(struct rw_semaphore *sem)
243 rwsemtrace(sem,"Entering __down_write_trylock");
245 spin_lock_irqsave(&sem->wait_lock, flags);
247 if (sem->activity==0 && list_empty(&sem->wait_list)) {
253 spin_unlock_irqrestore(&sem->wait_lock, flags);
255 rwsemtrace(sem,"Leaving __down_write_trylock");
260 * release a read lock on the semaphore
262 void fastcall __up_read(struct rw_semaphore *sem)
265 rwsemtrace(sem,"Entering __up_read");
267 spin_lock_irqsave(&sem->wait_lock, flags);
269 if (--sem->activity==0 && !list_empty(&sem->wait_list))
270 sem = __rwsem_wake_one_writer(sem);
272 spin_unlock_irqrestore(&sem->wait_lock, flags);
274 rwsemtrace(sem,"Leaving __up_read");
278 * release a write lock on the semaphore
280 void fastcall __up_write(struct rw_semaphore *sem)
283 rwsemtrace(sem,"Entering __up_write");
285 spin_lock_irqsave(&sem->wait_lock, flags);
288 if (!list_empty(&sem->wait_list))
289 sem = __rwsem_do_wake(sem);
291 spin_unlock_irqrestore(&sem->wait_lock, flags);
293 rwsemtrace(sem,"Leaving __up_write");
296 EXPORT_SYMBOL(init_rwsem);
297 EXPORT_SYMBOL(__down_read);
298 EXPORT_SYMBOL(__down_write);
299 EXPORT_SYMBOL(__up_read);
300 EXPORT_SYMBOL(__up_write);
302 EXPORT_SYMBOL(rwsemtrace);