1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
9 #include <linux/module.h>
12 struct list_head list;
13 struct task_struct *task;
15 #define RWSEM_WAITING_FOR_READ 0x00000001
16 #define RWSEM_WAITING_FOR_WRITE 0x00000002
21 void rwsemtrace(struct rw_semaphore *sem, const char *str)
23 printk("sem=%p\n",sem);
24 printk("(sem)=%08lx\n",sem->count);
26 printk("[%d] %s({%08lx})\n",current->pid,str,sem->count);
31 * handle the lock being released whilst there are processes blocked on it that can now run
32 * - if we come here, then:
33 * - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented
34 * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
35 * - there must be someone on the queue
36 * - the spinlock must be held by the caller
37 * - woken process blocks are discarded from the list after having flags zeroised
39 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
41 struct rwsem_waiter *waiter;
42 struct task_struct *tsk;
43 struct list_head *next;
44 signed long oldcount, woken, loop;
46 rwsemtrace(sem,"Entering __rwsem_do_wake");
48 /* only wake someone up if we can transition the active part of the count from 0 -> 1 */
50 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
51 if (oldcount & RWSEM_ACTIVE_MASK)
54 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
56 /* try to grant a single write lock if there's a writer at the front of the queue
57 * - note we leave the 'active part' of the count incremented by 1 and the waiting part
58 * incremented by 0x00010000
60 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
63 list_del(&waiter->list);
68 free_task_struct(tsk);
71 /* grant an infinite number of read locks to the readers at the front of the queue
72 * - note we increment the 'active part' of the count by the number of readers (less one
73 * for the activity decrement we've already done) before waking any processes up
80 if (waiter->list.next==&sem->wait_list)
83 waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);
85 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
88 woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
89 woken -= RWSEM_ACTIVE_BIAS;
90 rwsem_atomic_add(woken,sem);
92 next = sem->wait_list.next;
93 for (; loop>0; loop--) {
94 waiter = list_entry(next,struct rwsem_waiter,list);
95 next = waiter->list.next;
100 free_task_struct(tsk);
103 sem->wait_list.next = next;
104 next->prev = &sem->wait_list;
107 rwsemtrace(sem,"Leaving __rwsem_do_wake");
110 /* undo the change to count, but check for a transition 1->0 */
112 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
118 * wait for a lock to be granted
120 static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
121 struct rwsem_waiter *waiter,
122 signed long adjustment)
124 struct task_struct *tsk = current;
127 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
129 /* set up my own style of waitqueue */
130 spin_lock_irq(&sem->wait_lock);
132 get_task_struct(tsk);
134 list_add_tail(&waiter->list,&sem->wait_list);
136 /* note that we're now waiting on the lock, but no longer actively read-locking */
137 count = rwsem_atomic_update(adjustment,sem);
139 /* if there are no longer active locks, wake the front queued process(es) up
140 * - it might even be this process, since the waker takes a more active part
142 if (!(count & RWSEM_ACTIVE_MASK))
143 sem = __rwsem_do_wake(sem);
145 spin_unlock_irq(&sem->wait_lock);
147 /* wait to be given the lock */
152 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
155 tsk->state = TASK_RUNNING;
161 * wait for the read lock to be granted
163 struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem)
165 struct rwsem_waiter waiter;
167 rwsemtrace(sem,"Entering rwsem_down_read_failed");
169 waiter.flags = RWSEM_WAITING_FOR_READ;
170 rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS);
172 rwsemtrace(sem,"Leaving rwsem_down_read_failed");
177 * wait for the write lock to be granted
179 struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem)
181 struct rwsem_waiter waiter;
183 rwsemtrace(sem,"Entering rwsem_down_write_failed");
185 waiter.flags = RWSEM_WAITING_FOR_WRITE;
186 rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS);
188 rwsemtrace(sem,"Leaving rwsem_down_write_failed");
193 * handle waking up a waiter on the semaphore
194 * - up_read has decremented the active part of the count if we come here
196 struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
199 rwsemtrace(sem,"Entering rwsem_wake");
201 spin_lock_irqsave(&sem->wait_lock, flags);
203 /* do nothing if list empty */
204 if (!list_empty(&sem->wait_list))
205 sem = __rwsem_do_wake(sem);
207 spin_unlock_irqrestore(&sem->wait_lock, flags);
209 rwsemtrace(sem,"Leaving rwsem_wake");
214 EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
215 EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
216 EXPORT_SYMBOL_NOVERS(rwsem_wake);
218 EXPORT_SYMBOL(rwsemtrace);