2 * linux/net/sunrpc/sched.c
4 * Scheduling for synchronous and asynchronous RPC requests.
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
12 #include <linux/module.h>
14 #define __KERNEL_SYSCALLS__
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/unistd.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/spinlock.h>
23 #include <linux/sunrpc/clnt.h>
24 #include <linux/sunrpc/xprt.h>
27 #define RPCDBG_FACILITY RPCDBG_SCHED
28 static int rpc_task_id;
32 * We give RPC the same get_free_pages priority as NFS
34 #define GFP_RPC GFP_NOFS
36 static void __rpc_default_timer(struct rpc_task *task);
37 static void rpciod_killall(void);
40 * When an asynchronous RPC task is activated within a bottom half
41 * handler, or while executing another RPC task, it is put on
42 * schedq, and rpciod is woken up.
44 static RPC_WAITQ(schedq, "schedq");
47 * RPC tasks that create another task (e.g. for contacting the portmapper)
48 * will wait on this queue for their child's completion
50 static RPC_WAITQ(childq, "childq");
53 * RPC tasks sit here while waiting for conditions to improve.
55 static RPC_WAITQ(delay_queue, "delayq");
58 * All RPC tasks are linked into this list
60 static LIST_HEAD(all_tasks);
63 * rpciod-related stuff
65 static DECLARE_WAIT_QUEUE_HEAD(rpciod_idle);
66 static DECLARE_WAIT_QUEUE_HEAD(rpciod_killer);
67 static DECLARE_MUTEX(rpciod_sema);
68 static unsigned int rpciod_users;
69 static pid_t rpciod_pid;
70 static int rpc_inhibit;
73 * Spinlock for wait queues. Access to the latter also has to be
74 * interrupt-safe in order to allow timers to wake up sleeping tasks.
76 static spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED;
78 * Spinlock for other critical sections of code.
80 static spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED;
83 * This is the last-ditch buffer for NFS swap requests
85 static u32 swap_buffer[PAGE_SIZE >> 2];
86 static long swap_buffer_used;
89 * Make allocation of the swap_buffer SMP-safe
91 static __inline__ int rpc_lock_swapbuf(void)
93 return !test_and_set_bit(1, &swap_buffer_used);
95 static __inline__ void rpc_unlock_swapbuf(void)
97 clear_bit(1, &swap_buffer_used);
101 * Disable the timer for a given RPC task. Should be called with
102 * rpc_queue_lock and bh_disabled in order to avoid races within
106 __rpc_disable_timer(struct rpc_task *task)
108 dprintk("RPC: %4d disabling timer\n", task->tk_pid);
109 task->tk_timeout_fn = NULL;
110 task->tk_timeout = 0;
114 * Run a timeout function.
115 * We use the callback in order to allow __rpc_wake_up_task()
116 * and friends to disable the timer synchronously on SMP systems
117 * without calling del_timer_sync(). The latter could cause a
118 * deadlock if called while we're holding spinlocks...
121 rpc_run_timer(struct rpc_task *task)
123 void (*callback)(struct rpc_task *);
125 spin_lock_bh(&rpc_queue_lock);
126 callback = task->tk_timeout_fn;
127 task->tk_timeout_fn = NULL;
128 spin_unlock_bh(&rpc_queue_lock);
130 dprintk("RPC: %4d running timer\n", task->tk_pid);
136 * Set up a timer for the current task.
139 __rpc_add_timer(struct rpc_task *task, rpc_action timer)
141 if (!task->tk_timeout)
144 dprintk("RPC: %4d setting alarm for %lu ms\n",
145 task->tk_pid, task->tk_timeout * 1000 / HZ);
148 task->tk_timeout_fn = timer;
150 task->tk_timeout_fn = __rpc_default_timer;
151 mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
155 * Set up a timer for an already sleeping task.
157 void rpc_add_timer(struct rpc_task *task, rpc_action timer)
159 spin_lock_bh(&rpc_queue_lock);
160 if (!RPC_IS_RUNNING(task))
161 __rpc_add_timer(task, timer);
162 spin_unlock_bh(&rpc_queue_lock);
166 * Delete any timer for the current task. Because we use del_timer_sync(),
167 * this function should never be called while holding rpc_queue_lock.
170 rpc_delete_timer(struct rpc_task *task)
172 if (timer_pending(&task->tk_timer)) {
173 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
174 del_timer_sync(&task->tk_timer);
179 * Add new request to wait queue.
181 * Swapper tasks always get inserted at the head of the queue.
182 * This should avoid many nasty memory deadlocks and hopefully
183 * improve overall performance.
184 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
187 __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
189 if (task->tk_rpcwait == queue)
192 if (task->tk_rpcwait) {
193 printk(KERN_WARNING "RPC: doubly enqueued task!\n");
196 if (RPC_IS_SWAPPER(task))
197 list_add(&task->tk_list, &queue->tasks);
199 list_add_tail(&task->tk_list, &queue->tasks);
200 task->tk_rpcwait = queue;
202 dprintk("RPC: %4d added to queue %p \"%s\"\n",
203 task->tk_pid, queue, rpc_qname(queue));
209 rpc_add_wait_queue(struct rpc_wait_queue *q, struct rpc_task *task)
213 spin_lock_bh(&rpc_queue_lock);
214 result = __rpc_add_wait_queue(q, task);
215 spin_unlock_bh(&rpc_queue_lock);
220 * Remove request from queue.
221 * Note: must be called with spin lock held.
224 __rpc_remove_wait_queue(struct rpc_task *task)
226 struct rpc_wait_queue *queue = task->tk_rpcwait;
231 list_del(&task->tk_list);
232 task->tk_rpcwait = NULL;
234 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
235 task->tk_pid, queue, rpc_qname(queue));
239 rpc_remove_wait_queue(struct rpc_task *task)
241 if (!task->tk_rpcwait)
243 spin_lock_bh(&rpc_queue_lock);
244 __rpc_remove_wait_queue(task);
245 spin_unlock_bh(&rpc_queue_lock);
249 * Make an RPC task runnable.
251 * Note: If the task is ASYNC, this must be called with
252 * the spinlock held to protect the wait queue operation.
255 rpc_make_runnable(struct rpc_task *task)
257 if (task->tk_timeout_fn) {
258 printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n");
261 rpc_set_running(task);
262 if (RPC_IS_ASYNC(task)) {
263 if (RPC_IS_SLEEPING(task)) {
265 status = __rpc_add_wait_queue(&schedq, task);
267 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
268 task->tk_status = status;
271 rpc_clear_sleeping(task);
272 if (waitqueue_active(&rpciod_idle))
273 wake_up(&rpciod_idle);
276 rpc_clear_sleeping(task);
277 if (waitqueue_active(&task->tk_wait))
278 wake_up(&task->tk_wait);
283 * Place a newly initialized task on the schedq.
286 rpc_schedule_run(struct rpc_task *task)
288 /* Don't run a child twice! */
289 if (RPC_IS_ACTIVATED(task))
292 rpc_set_sleeping(task);
293 rpc_make_runnable(task);
297 * For other people who may need to wake the I/O daemon
298 * but should (for now) know nothing about its innards
300 void rpciod_wake_up(void)
303 printk(KERN_ERR "rpciod: wot no daemon?\n");
304 if (waitqueue_active(&rpciod_idle))
305 wake_up(&rpciod_idle);
309 * Prepare for sleeping on a wait queue.
310 * By always appending tasks to the list we ensure FIFO behavior.
311 * NB: An RPC task will only receive interrupt-driven events as long
312 * as it's on a wait queue.
315 __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
316 rpc_action action, rpc_action timer)
320 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
321 rpc_qname(q), jiffies);
323 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
324 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
328 /* Mark the task as being activated if so needed */
329 if (!RPC_IS_ACTIVATED(task)) {
331 rpc_set_sleeping(task);
334 status = __rpc_add_wait_queue(q, task);
336 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
337 task->tk_status = status;
339 rpc_clear_running(task);
340 if (task->tk_callback) {
341 dprintk(KERN_ERR "RPC: %4d overwrites an active callback\n", task->tk_pid);
344 task->tk_callback = action;
345 __rpc_add_timer(task, timer);
350 rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
351 rpc_action action, rpc_action timer)
354 * Protect the queue operations.
356 spin_lock_bh(&rpc_queue_lock);
357 __rpc_sleep_on(q, task, action, timer);
358 spin_unlock_bh(&rpc_queue_lock);
362 * __rpc_wake_up_task - wake up a single rpc_task
363 * @task: task to be woken up
365 * Caller must hold rpc_queue_lock
368 __rpc_wake_up_task(struct rpc_task *task)
370 dprintk("RPC: %4d __rpc_wake_up_task (now %ld inh %d)\n",
371 task->tk_pid, jiffies, rpc_inhibit);
374 if (task->tk_magic != 0xf00baa) {
375 printk(KERN_ERR "RPC: attempt to wake up non-existing task!\n");
381 /* Has the task been executed yet? If not, we cannot wake it up! */
382 if (!RPC_IS_ACTIVATED(task)) {
383 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
386 if (RPC_IS_RUNNING(task))
389 __rpc_disable_timer(task);
390 if (task->tk_rpcwait != &schedq)
391 __rpc_remove_wait_queue(task);
393 rpc_make_runnable(task);
395 dprintk("RPC: __rpc_wake_up_task done\n");
399 * Default timeout handler if none specified by user
402 __rpc_default_timer(struct rpc_task *task)
404 dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
405 task->tk_status = -ETIMEDOUT;
406 rpc_wake_up_task(task);
410 * Wake up the specified task
413 rpc_wake_up_task(struct rpc_task *task)
415 if (RPC_IS_RUNNING(task))
417 spin_lock_bh(&rpc_queue_lock);
418 __rpc_wake_up_task(task);
419 spin_unlock_bh(&rpc_queue_lock);
423 * Wake up the next task on the wait queue.
426 rpc_wake_up_next(struct rpc_wait_queue *queue)
428 struct rpc_task *task = NULL;
430 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
431 spin_lock_bh(&rpc_queue_lock);
432 task_for_first(task, &queue->tasks)
433 __rpc_wake_up_task(task);
434 spin_unlock_bh(&rpc_queue_lock);
440 * rpc_wake_up - wake up all rpc_tasks
441 * @queue: rpc_wait_queue on which the tasks are sleeping
443 * Grabs rpc_queue_lock
446 rpc_wake_up(struct rpc_wait_queue *queue)
448 struct rpc_task *task;
450 spin_lock_bh(&rpc_queue_lock);
451 while (!list_empty(&queue->tasks))
452 task_for_first(task, &queue->tasks)
453 __rpc_wake_up_task(task);
454 spin_unlock_bh(&rpc_queue_lock);
458 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
459 * @queue: rpc_wait_queue on which the tasks are sleeping
460 * @status: status value to set
462 * Grabs rpc_queue_lock
465 rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
467 struct rpc_task *task;
469 spin_lock_bh(&rpc_queue_lock);
470 while (!list_empty(&queue->tasks)) {
471 task_for_first(task, &queue->tasks) {
472 task->tk_status = status;
473 __rpc_wake_up_task(task);
476 spin_unlock_bh(&rpc_queue_lock);
480 * Run a task at a later time
482 static void __rpc_atrun(struct rpc_task *);
484 rpc_delay(struct rpc_task *task, unsigned long delay)
486 task->tk_timeout = delay;
487 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
491 __rpc_atrun(struct rpc_task *task)
494 rpc_wake_up_task(task);
498 * This is the RPC `scheduler' (or rather, the finite state machine).
501 __rpc_execute(struct rpc_task *task)
505 dprintk("RPC: %4d rpc_execute flgs %x\n",
506 task->tk_pid, task->tk_flags);
508 if (!RPC_IS_RUNNING(task)) {
509 printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n");
516 * Execute any pending callback.
518 if (RPC_DO_CALLBACK(task)) {
519 /* Define a callback save pointer */
520 void (*save_callback)(struct rpc_task *);
523 * If a callback exists, save it, reset it,
525 * The save is needed to stop from resetting
526 * another callback set within the callback handler
529 save_callback=task->tk_callback;
530 task->tk_callback=NULL;
535 * Perform the next FSM step.
536 * tk_action may be NULL when the task has been killed
539 if (RPC_IS_RUNNING(task)) {
541 * Garbage collection of pending timers...
543 rpc_delete_timer(task);
544 if (!task->tk_action)
546 task->tk_action(task);
550 * Check whether task is sleeping.
552 spin_lock_bh(&rpc_queue_lock);
553 if (!RPC_IS_RUNNING(task)) {
554 rpc_set_sleeping(task);
555 if (RPC_IS_ASYNC(task)) {
556 spin_unlock_bh(&rpc_queue_lock);
560 spin_unlock_bh(&rpc_queue_lock);
562 while (RPC_IS_SLEEPING(task)) {
563 /* sync task: sleep here */
564 dprintk("RPC: %4d sync task going to sleep\n",
566 if (current->pid == rpciod_pid)
567 printk(KERN_ERR "RPC: rpciod waiting on sync task!\n");
569 __wait_event(task->tk_wait, !RPC_IS_SLEEPING(task));
570 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
573 * When a sync task receives a signal, it exits with
574 * -ERESTARTSYS. In order to catch any callbacks that
575 * clean up after sleeping on some queue, we don't
576 * break the loop here, but go around once more.
578 if (task->tk_client->cl_intr && signalled()) {
579 dprintk("RPC: %4d got signal\n", task->tk_pid);
580 task->tk_flags |= RPC_TASK_KILLED;
581 rpc_exit(task, -ERESTARTSYS);
582 rpc_wake_up_task(task);
589 /* If tk_action is non-null, the user wants us to restart */
590 if (task->tk_action) {
591 if (!RPC_ASSASSINATED(task)) {
592 /* Release RPC slot and buffer memory */
595 if (task->tk_buffer) {
596 rpc_free(task->tk_buffer);
597 task->tk_buffer = NULL;
601 printk(KERN_ERR "RPC: dead task tries to walk away.\n");
605 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
606 status = task->tk_status;
608 /* Release all resources associated with the task */
609 rpc_release_task(task);
615 * User-visible entry point to the scheduler.
617 * This may be called recursively if e.g. an async NFS task updates
618 * the attributes and finds that dirty pages must be flushed.
619 * NOTE: Upon exit of this function the task is guaranteed to be
620 * released. In particular note that tk_release() will have
621 * been called, so your task memory may have been freed.
624 rpc_execute(struct rpc_task *task)
628 printk(KERN_INFO "RPC: execution inhibited!\n");
632 status = -EWOULDBLOCK;
633 if (task->tk_active) {
634 printk(KERN_ERR "RPC: active task was run twice!\n");
639 rpc_set_running(task);
640 return __rpc_execute(task);
642 rpc_release_task(task);
648 * This is our own little scheduler for async RPC tasks.
653 struct rpc_task *task;
656 dprintk("RPC: rpc_schedule enter\n");
658 spin_lock_bh(&rpc_queue_lock);
660 task_for_first(task, &schedq.tasks) {
661 __rpc_remove_wait_queue(task);
662 spin_unlock_bh(&rpc_queue_lock);
666 spin_unlock_bh(&rpc_queue_lock);
670 if (++count >= 200 || current->need_resched) {
675 dprintk("RPC: rpc_schedule leave\n");
679 * Allocate memory for RPC purpose.
681 * This is yet another tricky issue: For sync requests issued by
682 * a user process, we want to make kmalloc sleep if there isn't
683 * enough memory. Async requests should not sleep too excessively
684 * because that will block rpciod (but that's not dramatic when
685 * it's starved of memory anyway). Finally, swapout requests should
686 * never sleep at all, and should not trigger another swap_out
687 * request through kmalloc which would just increase memory contention.
689 * I hope the following gets it right, which gives async requests
690 * a slight advantage over sync requests (good for writeback, debatable
693 * sync user requests: GFP_KERNEL
694 * async requests: GFP_RPC (== GFP_NOFS)
695 * swap requests: GFP_ATOMIC (or new GFP_SWAPPER)
698 rpc_allocate(unsigned int flags, unsigned int size)
703 if (flags & RPC_TASK_SWAPPER)
705 else if (flags & RPC_TASK_ASYNC)
711 if ((buffer = (u32 *) kmalloc(size, gfp)) != NULL) {
712 dprintk("RPC: allocated buffer %p\n", buffer);
715 if ((flags & RPC_TASK_SWAPPER) && size <= sizeof(swap_buffer)
716 && rpc_lock_swapbuf()) {
717 dprintk("RPC: used last-ditch swap buffer\n");
720 if (flags & RPC_TASK_ASYNC)
723 } while (!signalled());
729 rpc_free(void *buffer)
731 if (buffer != swap_buffer) {
735 rpc_unlock_swapbuf();
739 * Creation and deletion of RPC task structures
742 rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
743 rpc_action callback, int flags)
745 memset(task, 0, sizeof(*task));
746 init_timer(&task->tk_timer);
747 task->tk_timer.data = (unsigned long) task;
748 task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
749 task->tk_client = clnt;
750 task->tk_flags = flags;
751 task->tk_exit = callback;
752 init_waitqueue_head(&task->tk_wait);
753 if (current->uid != current->fsuid || current->gid != current->fsgid)
754 task->tk_flags |= RPC_TASK_SETUID;
756 /* Initialize retry counters */
757 task->tk_garb_retry = 2;
758 task->tk_cred_retry = 2;
759 task->tk_suid_retry = 1;
761 /* Add to global list of all tasks */
762 spin_lock(&rpc_sched_lock);
763 list_add(&task->tk_task, &all_tasks);
764 spin_unlock(&rpc_sched_lock);
767 atomic_inc(&clnt->cl_users);
770 task->tk_magic = 0xf00baa;
771 task->tk_pid = rpc_task_id++;
773 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
778 rpc_default_free_task(struct rpc_task *task)
780 dprintk("RPC: %4d freeing task\n", task->tk_pid);
785 * Create a new task for the specified client. We have to
786 * clean up after an allocation failure, as the client may
787 * have specified "oneshot".
790 rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
792 struct rpc_task *task;
794 task = (struct rpc_task *) rpc_allocate(flags, sizeof(*task));
798 rpc_init_task(task, clnt, callback, flags);
800 /* Replace tk_release */
801 task->tk_release = rpc_default_free_task;
803 dprintk("RPC: %4d allocated task\n", task->tk_pid);
804 task->tk_flags |= RPC_TASK_DYNAMIC;
809 /* Check whether to release the client */
811 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
812 atomic_read(&clnt->cl_users), clnt->cl_oneshot);
813 atomic_inc(&clnt->cl_users); /* pretend we were used ... */
814 rpc_release_client(clnt);
820 rpc_release_task(struct rpc_task *task)
822 dprintk("RPC: %4d release task\n", task->tk_pid);
825 if (task->tk_magic != 0xf00baa) {
826 printk(KERN_ERR "RPC: attempt to release a non-existing task!\n");
833 /* Remove from global task list */
834 spin_lock(&rpc_sched_lock);
835 list_del(&task->tk_task);
836 spin_unlock(&rpc_sched_lock);
838 /* Protect the execution below. */
839 spin_lock_bh(&rpc_queue_lock);
841 /* Disable timer to prevent zombie wakeup */
842 __rpc_disable_timer(task);
844 /* Remove from any wait queue we're still on */
845 __rpc_remove_wait_queue(task);
849 spin_unlock_bh(&rpc_queue_lock);
851 /* Synchronously delete any running timer */
852 rpc_delete_timer(task);
854 /* Release resources */
857 if (task->tk_msg.rpc_cred)
858 rpcauth_unbindcred(task);
859 if (task->tk_buffer) {
860 rpc_free(task->tk_buffer);
861 task->tk_buffer = NULL;
863 if (task->tk_client) {
864 rpc_release_client(task->tk_client);
865 task->tk_client = NULL;
871 if (task->tk_release)
872 task->tk_release(task);
876 * rpc_find_parent - find the parent of a child task.
879 * Checks that the parent task is still sleeping on the
880 * queue 'childq'. If so returns a pointer to the parent.
881 * Upon failure returns NULL.
883 * Caller must hold rpc_queue_lock
885 static inline struct rpc_task *
886 rpc_find_parent(struct rpc_task *child)
888 struct rpc_task *task, *parent;
889 struct list_head *le;
891 parent = (struct rpc_task *) child->tk_calldata;
892 task_for_each(task, le, &childq.tasks)
900 rpc_child_exit(struct rpc_task *child)
902 struct rpc_task *parent;
904 spin_lock_bh(&rpc_queue_lock);
905 if ((parent = rpc_find_parent(child)) != NULL) {
906 parent->tk_status = child->tk_status;
907 __rpc_wake_up_task(parent);
909 spin_unlock_bh(&rpc_queue_lock);
913 * Note: rpc_new_task releases the client after a failure.
916 rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
918 struct rpc_task *task;
920 task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD);
923 task->tk_exit = rpc_child_exit;
924 task->tk_calldata = parent;
928 parent->tk_status = -ENOMEM;
933 rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
935 spin_lock_bh(&rpc_queue_lock);
936 /* N.B. Is it possible for the child to have already finished? */
937 __rpc_sleep_on(&childq, task, func, NULL);
938 rpc_schedule_run(child);
939 spin_unlock_bh(&rpc_queue_lock);
943 * Kill all tasks for the given client.
944 * XXX: kill their descendants as well?
947 rpc_killall_tasks(struct rpc_clnt *clnt)
949 struct rpc_task *rovr;
950 struct list_head *le;
952 dprintk("RPC: killing all tasks for client %p\n", clnt);
955 * Spin lock all_tasks to prevent changes...
957 spin_lock(&rpc_sched_lock);
958 alltask_for_each(rovr, le, &all_tasks)
959 if (!clnt || rovr->tk_client == clnt) {
960 rovr->tk_flags |= RPC_TASK_KILLED;
961 rpc_exit(rovr, -EIO);
962 rpc_wake_up_task(rovr);
964 spin_unlock(&rpc_sched_lock);
967 static DECLARE_MUTEX_LOCKED(rpciod_running);
970 rpciod_task_pending(void)
972 return !list_empty(&schedq.tasks);
977 * This is the rpciod kernel thread
982 wait_queue_head_t *assassin = (wait_queue_head_t*) ptr;
988 * Let our maker know we're running ...
990 rpciod_pid = current->pid;
995 spin_lock_irq(¤t->sigmask_lock);
996 siginitsetinv(¤t->blocked, sigmask(SIGKILL));
997 recalc_sigpending(current);
998 spin_unlock_irq(¤t->sigmask_lock);
1000 strcpy(current->comm, "rpciod");
1002 dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid);
1003 while (rpciod_users) {
1006 flush_signals(current);
1010 if (++rounds >= 64) { /* safeguard */
1015 if (!rpciod_task_pending()) {
1016 dprintk("RPC: rpciod back to sleep\n");
1017 wait_event_interruptible(rpciod_idle, rpciod_task_pending());
1018 dprintk("RPC: switch to rpciod\n");
1023 dprintk("RPC: rpciod shutdown commences\n");
1024 if (!list_empty(&all_tasks)) {
1025 printk(KERN_ERR "rpciod: active tasks at shutdown?!\n");
1032 dprintk("RPC: rpciod exiting\n");
1038 rpciod_killall(void)
1040 unsigned long flags;
1042 while (!list_empty(&all_tasks)) {
1043 current->sigpending = 0;
1044 rpc_killall_tasks(NULL);
1046 if (!list_empty(&all_tasks)) {
1047 dprintk("rpciod_killall: waiting for tasks to exit\n");
1052 spin_lock_irqsave(¤t->sigmask_lock, flags);
1053 recalc_sigpending(current);
1054 spin_unlock_irqrestore(¤t->sigmask_lock, flags);
1058 * Start up the rpciod process if it's not already running.
1067 dprintk("rpciod_up: pid %d, users %d\n", rpciod_pid, rpciod_users);
1072 * If there's no pid, we should be the first user.
1074 if (rpciod_users > 1)
1075 printk(KERN_WARNING "rpciod_up: no pid, %d users??\n", rpciod_users);
1077 * Create the rpciod thread and wait for it to start.
1079 error = kernel_thread(rpciod, &rpciod_killer, 0);
1081 printk(KERN_WARNING "rpciod_up: create thread failed, error=%d\n", error);
1085 down(&rpciod_running);
1096 unsigned long flags;
1100 dprintk("rpciod_down pid %d sema %d\n", rpciod_pid, rpciod_users);
1105 printk(KERN_WARNING "rpciod_down: pid=%d, no users??\n", rpciod_pid);
1108 dprintk("rpciod_down: Nothing to do!\n");
1112 kill_proc(rpciod_pid, SIGKILL, 1);
1114 * Usually rpciod will exit very quickly, so we
1115 * wait briefly before checking the process id.
1117 current->sigpending = 0;
1120 * Display a message if we're going to wait longer.
1122 while (rpciod_pid) {
1123 dprintk("rpciod_down: waiting for pid %d to exit\n", rpciod_pid);
1125 dprintk("rpciod_down: caught signal\n");
1128 interruptible_sleep_on(&rpciod_killer);
1130 spin_lock_irqsave(¤t->sigmask_lock, flags);
1131 recalc_sigpending(current);
1132 spin_unlock_irqrestore(¤t->sigmask_lock, flags);
1139 void rpc_show_tasks(void)
1141 struct list_head *le;
1144 spin_lock(&rpc_sched_lock);
1145 if (list_empty(&all_tasks)) {
1146 spin_unlock(&rpc_sched_lock);
1149 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
1150 "-rpcwait -action- --exit--\n");
1151 alltask_for_each(t, le, &all_tasks)
1152 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
1153 t->tk_pid, t->tk_msg.rpc_proc, t->tk_flags, t->tk_status,
1154 t->tk_client, t->tk_client->cl_prog,
1155 t->tk_rqstp, t->tk_timeout,
1156 t->tk_rpcwait ? rpc_qname(t->tk_rpcwait) : " <NULL> ",
1157 t->tk_action, t->tk_exit);
1158 spin_unlock(&rpc_sched_lock);