2 * linux/net/sunrpc/sched.c
4 * Scheduling for synchronous and asynchronous RPC requests.
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
12 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
22 #include <linux/sunrpc/clnt.h>
23 #include <linux/sunrpc/xprt.h>
26 #define RPCDBG_FACILITY RPCDBG_SCHED
27 #define RPC_TASK_MAGIC_ID 0xf00baa
28 static int rpc_task_id;
32 * RPC slabs and memory pools
34 #define RPC_BUFFER_MAXSIZE (2048)
35 #define RPC_BUFFER_POOLSIZE (8)
36 #define RPC_TASK_POOLSIZE (8)
37 static kmem_cache_t *rpc_task_slabp;
38 static kmem_cache_t *rpc_buffer_slabp;
39 static mempool_t *rpc_task_mempool;
40 static mempool_t *rpc_buffer_mempool;
42 static void __rpc_default_timer(struct rpc_task *task);
43 static void rpciod_killall(void);
44 static void rpc_free(struct rpc_task *task);
46 static void rpc_async_schedule(void *);
49 * RPC tasks that create another task (e.g. for contacting the portmapper)
50 * will wait on this queue for their child's completion
52 static RPC_WAITQ(childq, "childq");
55 * RPC tasks sit here while waiting for conditions to improve.
57 static RPC_WAITQ(delay_queue, "delayq");
60 * All RPC tasks are linked into this list
62 static LIST_HEAD(all_tasks);
65 * rpciod-related stuff
67 static DECLARE_MUTEX(rpciod_sema);
68 static unsigned int rpciod_users;
69 static struct workqueue_struct *rpciod_workqueue;
72 * Spinlock for other critical sections of code.
74 static DEFINE_SPINLOCK(rpc_sched_lock);
77 * Disable the timer for a given RPC task. Should be called with
78 * queue->lock and bh_disabled in order to avoid races within
82 __rpc_disable_timer(struct rpc_task *task)
84 dprintk("RPC: %4d disabling timer\n", task->tk_pid);
85 task->tk_timeout_fn = NULL;
90 * Run a timeout function.
91 * We use the callback in order to allow __rpc_wake_up_task()
92 * and friends to disable the timer synchronously on SMP systems
93 * without calling del_timer_sync(). The latter could cause a
94 * deadlock if called while we're holding spinlocks...
96 static void rpc_run_timer(struct rpc_task *task)
98 void (*callback)(struct rpc_task *);
100 callback = task->tk_timeout_fn;
101 task->tk_timeout_fn = NULL;
102 if (callback && RPC_IS_QUEUED(task)) {
103 dprintk("RPC: %4d running timer\n", task->tk_pid);
106 smp_mb__before_clear_bit();
107 clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
108 smp_mb__after_clear_bit();
112 * Set up a timer for the current task.
115 __rpc_add_timer(struct rpc_task *task, rpc_action timer)
117 if (!task->tk_timeout)
120 dprintk("RPC: %4d setting alarm for %lu ms\n",
121 task->tk_pid, task->tk_timeout * 1000 / HZ);
124 task->tk_timeout_fn = timer;
126 task->tk_timeout_fn = __rpc_default_timer;
127 set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
128 mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
132 * Delete any timer for the current task. Because we use del_timer_sync(),
133 * this function should never be called while holding queue->lock.
136 rpc_delete_timer(struct rpc_task *task)
138 if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
139 del_singleshot_timer_sync(&task->tk_timer);
140 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
145 * Add new request to a priority queue.
147 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
152 INIT_LIST_HEAD(&task->u.tk_wait.links);
153 q = &queue->tasks[task->tk_priority];
154 if (unlikely(task->tk_priority > queue->maxpriority))
155 q = &queue->tasks[queue->maxpriority];
156 list_for_each_entry(t, q, u.tk_wait.list) {
157 if (t->tk_cookie == task->tk_cookie) {
158 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
162 list_add_tail(&task->u.tk_wait.list, q);
166 * Add new request to wait queue.
168 * Swapper tasks always get inserted at the head of the queue.
169 * This should avoid many nasty memory deadlocks and hopefully
170 * improve overall performance.
171 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
173 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
175 BUG_ON (RPC_IS_QUEUED(task));
177 if (RPC_IS_PRIORITY(queue))
178 __rpc_add_wait_queue_priority(queue, task);
179 else if (RPC_IS_SWAPPER(task))
180 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
182 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
183 task->u.tk_wait.rpc_waitq = queue;
184 rpc_set_queued(task);
186 dprintk("RPC: %4d added to queue %p \"%s\"\n",
187 task->tk_pid, queue, rpc_qname(queue));
191 * Remove request from a priority queue.
193 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
197 if (!list_empty(&task->u.tk_wait.links)) {
198 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
199 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
200 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
202 list_del(&task->u.tk_wait.list);
206 * Remove request from queue.
207 * Note: must be called with spin lock held.
209 static void __rpc_remove_wait_queue(struct rpc_task *task)
211 struct rpc_wait_queue *queue;
212 queue = task->u.tk_wait.rpc_waitq;
214 if (RPC_IS_PRIORITY(queue))
215 __rpc_remove_wait_queue_priority(task);
217 list_del(&task->u.tk_wait.list);
218 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
219 task->tk_pid, queue, rpc_qname(queue));
222 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
224 queue->priority = priority;
225 queue->count = 1 << (priority * 2);
228 static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
230 queue->cookie = cookie;
231 queue->nr = RPC_BATCH_COUNT;
234 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
236 rpc_set_waitqueue_priority(queue, queue->maxpriority);
237 rpc_set_waitqueue_cookie(queue, 0);
240 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
244 spin_lock_init(&queue->lock);
245 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
246 INIT_LIST_HEAD(&queue->tasks[i]);
247 queue->maxpriority = maxprio;
248 rpc_reset_waitqueue_priority(queue);
254 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
256 __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
259 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
261 __rpc_init_priority_wait_queue(queue, qname, 0);
263 EXPORT_SYMBOL(rpc_init_wait_queue);
266 * Make an RPC task runnable.
268 * Note: If the task is ASYNC, this must be called with
269 * the spinlock held to protect the wait queue operation.
271 static void rpc_make_runnable(struct rpc_task *task)
275 BUG_ON(task->tk_timeout_fn);
276 do_ret = rpc_test_and_set_running(task);
277 rpc_clear_queued(task);
280 if (RPC_IS_ASYNC(task)) {
283 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
284 status = queue_work(task->tk_workqueue, &task->u.tk_work);
286 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
287 task->tk_status = status;
291 wake_up(&task->u.tk_wait.waitq);
295 * Place a newly initialized task on the workqueue.
298 rpc_schedule_run(struct rpc_task *task)
300 /* Don't run a child twice! */
301 if (RPC_IS_ACTIVATED(task))
304 rpc_make_runnable(task);
308 * Prepare for sleeping on a wait queue.
309 * By always appending tasks to the list we ensure FIFO behavior.
310 * NB: An RPC task will only receive interrupt-driven events as long
311 * as it's on a wait queue.
313 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
314 rpc_action action, rpc_action timer)
316 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
317 rpc_qname(q), jiffies);
319 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
320 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
324 /* Mark the task as being activated if so needed */
325 if (!RPC_IS_ACTIVATED(task))
328 __rpc_add_wait_queue(q, task);
330 BUG_ON(task->tk_callback != NULL);
331 task->tk_callback = action;
332 __rpc_add_timer(task, timer);
335 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
336 rpc_action action, rpc_action timer)
339 * Protect the queue operations.
341 spin_lock_bh(&q->lock);
342 __rpc_sleep_on(q, task, action, timer);
343 spin_unlock_bh(&q->lock);
347 * __rpc_do_wake_up_task - wake up a single rpc_task
348 * @task: task to be woken up
350 * Caller must hold queue->lock, and have cleared the task queued flag.
352 static void __rpc_do_wake_up_task(struct rpc_task *task)
354 dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
357 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
359 /* Has the task been executed yet? If not, we cannot wake it up! */
360 if (!RPC_IS_ACTIVATED(task)) {
361 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
365 __rpc_disable_timer(task);
366 __rpc_remove_wait_queue(task);
368 rpc_make_runnable(task);
370 dprintk("RPC: __rpc_wake_up_task done\n");
374 * Wake up the specified task
376 static void __rpc_wake_up_task(struct rpc_task *task)
378 if (rpc_start_wakeup(task)) {
379 if (RPC_IS_QUEUED(task))
380 __rpc_do_wake_up_task(task);
381 rpc_finish_wakeup(task);
386 * Default timeout handler if none specified by user
389 __rpc_default_timer(struct rpc_task *task)
391 dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
392 task->tk_status = -ETIMEDOUT;
393 rpc_wake_up_task(task);
397 * Wake up the specified task
399 void rpc_wake_up_task(struct rpc_task *task)
401 if (rpc_start_wakeup(task)) {
402 if (RPC_IS_QUEUED(task)) {
403 struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
405 spin_lock_bh(&queue->lock);
406 __rpc_do_wake_up_task(task);
407 spin_unlock_bh(&queue->lock);
409 rpc_finish_wakeup(task);
414 * Wake up the next task on a priority queue.
416 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
419 struct rpc_task *task;
422 * Service a batch of tasks from a single cookie.
424 q = &queue->tasks[queue->priority];
425 if (!list_empty(q)) {
426 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
427 if (queue->cookie == task->tk_cookie) {
430 list_move_tail(&task->u.tk_wait.list, q);
433 * Check if we need to switch queues.
440 * Service the next queue.
443 if (q == &queue->tasks[0])
444 q = &queue->tasks[queue->maxpriority];
447 if (!list_empty(q)) {
448 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
451 } while (q != &queue->tasks[queue->priority]);
453 rpc_reset_waitqueue_priority(queue);
457 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
459 rpc_set_waitqueue_cookie(queue, task->tk_cookie);
461 __rpc_wake_up_task(task);
466 * Wake up the next task on the wait queue.
468 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
470 struct rpc_task *task = NULL;
472 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
473 spin_lock_bh(&queue->lock);
474 if (RPC_IS_PRIORITY(queue))
475 task = __rpc_wake_up_next_priority(queue);
477 task_for_first(task, &queue->tasks[0])
478 __rpc_wake_up_task(task);
480 spin_unlock_bh(&queue->lock);
486 * rpc_wake_up - wake up all rpc_tasks
487 * @queue: rpc_wait_queue on which the tasks are sleeping
491 void rpc_wake_up(struct rpc_wait_queue *queue)
493 struct rpc_task *task;
495 struct list_head *head;
496 spin_lock_bh(&queue->lock);
497 head = &queue->tasks[queue->maxpriority];
499 while (!list_empty(head)) {
500 task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
501 __rpc_wake_up_task(task);
503 if (head == &queue->tasks[0])
507 spin_unlock_bh(&queue->lock);
511 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
512 * @queue: rpc_wait_queue on which the tasks are sleeping
513 * @status: status value to set
517 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
519 struct list_head *head;
520 struct rpc_task *task;
522 spin_lock_bh(&queue->lock);
523 head = &queue->tasks[queue->maxpriority];
525 while (!list_empty(head)) {
526 task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
527 task->tk_status = status;
528 __rpc_wake_up_task(task);
530 if (head == &queue->tasks[0])
534 spin_unlock_bh(&queue->lock);
538 * Run a task at a later time
540 static void __rpc_atrun(struct rpc_task *);
542 rpc_delay(struct rpc_task *task, unsigned long delay)
544 task->tk_timeout = delay;
545 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
549 __rpc_atrun(struct rpc_task *task)
552 rpc_wake_up_task(task);
556 * This is the RPC `scheduler' (or rather, the finite state machine).
558 static int __rpc_execute(struct rpc_task *task)
562 dprintk("RPC: %4d rpc_execute flgs %x\n",
563 task->tk_pid, task->tk_flags);
565 BUG_ON(RPC_IS_QUEUED(task));
570 * Garbage collection of pending timers...
572 rpc_delete_timer(task);
575 * Execute any pending callback.
577 if (RPC_DO_CALLBACK(task)) {
578 /* Define a callback save pointer */
579 void (*save_callback)(struct rpc_task *);
582 * If a callback exists, save it, reset it,
584 * The save is needed to stop from resetting
585 * another callback set within the callback handler
588 save_callback=task->tk_callback;
589 task->tk_callback=NULL;
596 * Perform the next FSM step.
597 * tk_action may be NULL when the task has been killed
600 if (!RPC_IS_QUEUED(task)) {
601 if (!task->tk_action)
604 task->tk_action(task);
609 * Lockless check for whether task is sleeping or not.
611 if (!RPC_IS_QUEUED(task))
613 rpc_clear_running(task);
614 if (RPC_IS_ASYNC(task)) {
615 /* Careful! we may have raced... */
616 if (RPC_IS_QUEUED(task))
618 if (rpc_test_and_set_running(task))
623 /* sync task: sleep here */
624 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
625 if (RPC_TASK_UNINTERRUPTIBLE(task)) {
626 __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task));
628 __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status);
630 * When a sync task receives a signal, it exits with
631 * -ERESTARTSYS. In order to catch any callbacks that
632 * clean up after sleeping on some queue, we don't
633 * break the loop here, but go around once more.
635 if (status == -ERESTARTSYS) {
636 dprintk("RPC: %4d got signal\n", task->tk_pid);
637 task->tk_flags |= RPC_TASK_KILLED;
638 rpc_exit(task, -ERESTARTSYS);
639 rpc_wake_up_task(task);
642 rpc_set_running(task);
643 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
650 /* If tk_action is non-null, the user wants us to restart */
651 if (task->tk_action) {
652 if (!RPC_ASSASSINATED(task)) {
653 /* Release RPC slot and buffer memory */
659 printk(KERN_ERR "RPC: dead task tries to walk away.\n");
663 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
664 status = task->tk_status;
666 /* Release all resources associated with the task */
667 rpc_release_task(task);
672 * User-visible entry point to the scheduler.
674 * This may be called recursively if e.g. an async NFS task updates
675 * the attributes and finds that dirty pages must be flushed.
676 * NOTE: Upon exit of this function the task is guaranteed to be
677 * released. In particular note that tk_release() will have
678 * been called, so your task memory may have been freed.
681 rpc_execute(struct rpc_task *task)
683 BUG_ON(task->tk_active);
686 rpc_set_running(task);
687 return __rpc_execute(task);
690 static void rpc_async_schedule(void *arg)
692 __rpc_execute((struct rpc_task *)arg);
696 * Allocate memory for RPC purposes.
698 * We try to ensure that some NFS reads and writes can always proceed
699 * by using a mempool when allocating 'small' buffers.
700 * In order to avoid memory starvation triggering more writebacks of
701 * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
704 rpc_malloc(struct rpc_task *task, size_t size)
708 if (task->tk_flags & RPC_TASK_SWAPPER)
713 if (size > RPC_BUFFER_MAXSIZE) {
714 task->tk_buffer = kmalloc(size, gfp);
716 task->tk_bufsize = size;
718 task->tk_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
720 task->tk_bufsize = RPC_BUFFER_MAXSIZE;
722 return task->tk_buffer;
726 rpc_free(struct rpc_task *task)
728 if (task->tk_buffer) {
729 if (task->tk_bufsize == RPC_BUFFER_MAXSIZE)
730 mempool_free(task->tk_buffer, rpc_buffer_mempool);
732 kfree(task->tk_buffer);
733 task->tk_buffer = NULL;
734 task->tk_bufsize = 0;
739 * Creation and deletion of RPC task structures
741 void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action callback, int flags)
743 memset(task, 0, sizeof(*task));
744 init_timer(&task->tk_timer);
745 task->tk_timer.data = (unsigned long) task;
746 task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
747 task->tk_client = clnt;
748 task->tk_flags = flags;
749 task->tk_exit = callback;
750 if (current->uid != current->fsuid || current->gid != current->fsgid)
751 task->tk_flags |= RPC_TASK_SETUID;
753 /* Initialize retry counters */
754 task->tk_garb_retry = 2;
755 task->tk_cred_retry = 2;
756 task->tk_suid_retry = 1;
758 task->tk_priority = RPC_PRIORITY_NORMAL;
759 task->tk_cookie = (unsigned long)current;
761 /* Initialize workqueue for async tasks */
762 task->tk_workqueue = rpciod_workqueue;
763 if (!RPC_IS_ASYNC(task))
764 init_waitqueue_head(&task->u.tk_wait.waitq);
767 atomic_inc(&clnt->cl_users);
768 if (clnt->cl_softrtry)
769 task->tk_flags |= RPC_TASK_SOFT;
771 task->tk_flags |= RPC_TASK_NOINTR;
775 task->tk_magic = RPC_TASK_MAGIC_ID;
776 task->tk_pid = rpc_task_id++;
778 /* Add to global list of all tasks */
779 spin_lock(&rpc_sched_lock);
780 list_add_tail(&task->tk_task, &all_tasks);
781 spin_unlock(&rpc_sched_lock);
783 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
787 static struct rpc_task *
790 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
794 rpc_default_free_task(struct rpc_task *task)
796 dprintk("RPC: %4d freeing task\n", task->tk_pid);
797 mempool_free(task, rpc_task_mempool);
801 * Create a new task for the specified client. We have to
802 * clean up after an allocation failure, as the client may
803 * have specified "oneshot".
806 rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
808 struct rpc_task *task;
810 task = rpc_alloc_task();
814 rpc_init_task(task, clnt, callback, flags);
816 /* Replace tk_release */
817 task->tk_release = rpc_default_free_task;
819 dprintk("RPC: %4d allocated task\n", task->tk_pid);
820 task->tk_flags |= RPC_TASK_DYNAMIC;
825 /* Check whether to release the client */
827 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
828 atomic_read(&clnt->cl_users), clnt->cl_oneshot);
829 atomic_inc(&clnt->cl_users); /* pretend we were used ... */
830 rpc_release_client(clnt);
835 void rpc_release_task(struct rpc_task *task)
837 dprintk("RPC: %4d release task\n", task->tk_pid);
840 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
843 /* Remove from global task list */
844 spin_lock(&rpc_sched_lock);
845 list_del(&task->tk_task);
846 spin_unlock(&rpc_sched_lock);
848 BUG_ON (RPC_IS_QUEUED(task));
851 /* Synchronously delete any running timer */
852 rpc_delete_timer(task);
854 /* Release resources */
857 if (task->tk_msg.rpc_cred)
858 rpcauth_unbindcred(task);
860 if (task->tk_client) {
861 rpc_release_client(task->tk_client);
862 task->tk_client = NULL;
868 if (task->tk_release)
869 task->tk_release(task);
873 * rpc_find_parent - find the parent of a child task.
876 * Checks that the parent task is still sleeping on the
877 * queue 'childq'. If so returns a pointer to the parent.
878 * Upon failure returns NULL.
880 * Caller must hold childq.lock
882 static inline struct rpc_task *rpc_find_parent(struct rpc_task *child)
884 struct rpc_task *task, *parent;
885 struct list_head *le;
887 parent = (struct rpc_task *) child->tk_calldata;
888 task_for_each(task, le, &childq.tasks[0])
895 static void rpc_child_exit(struct rpc_task *child)
897 struct rpc_task *parent;
899 spin_lock_bh(&childq.lock);
900 if ((parent = rpc_find_parent(child)) != NULL) {
901 parent->tk_status = child->tk_status;
902 __rpc_wake_up_task(parent);
904 spin_unlock_bh(&childq.lock);
908 * Note: rpc_new_task releases the client after a failure.
911 rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
913 struct rpc_task *task;
915 task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD);
918 task->tk_exit = rpc_child_exit;
919 task->tk_calldata = parent;
923 parent->tk_status = -ENOMEM;
927 void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
929 spin_lock_bh(&childq.lock);
930 /* N.B. Is it possible for the child to have already finished? */
931 __rpc_sleep_on(&childq, task, func, NULL);
932 rpc_schedule_run(child);
933 spin_unlock_bh(&childq.lock);
937 * Kill all tasks for the given client.
938 * XXX: kill their descendants as well?
940 void rpc_killall_tasks(struct rpc_clnt *clnt)
942 struct rpc_task *rovr;
943 struct list_head *le;
945 dprintk("RPC: killing all tasks for client %p\n", clnt);
948 * Spin lock all_tasks to prevent changes...
950 spin_lock(&rpc_sched_lock);
951 alltask_for_each(rovr, le, &all_tasks) {
952 if (! RPC_IS_ACTIVATED(rovr))
954 if (!clnt || rovr->tk_client == clnt) {
955 rovr->tk_flags |= RPC_TASK_KILLED;
956 rpc_exit(rovr, -EIO);
957 rpc_wake_up_task(rovr);
960 spin_unlock(&rpc_sched_lock);
963 static DECLARE_MUTEX_LOCKED(rpciod_running);
965 static void rpciod_killall(void)
969 while (!list_empty(&all_tasks)) {
970 clear_thread_flag(TIF_SIGPENDING);
971 rpc_killall_tasks(NULL);
972 flush_workqueue(rpciod_workqueue);
973 if (!list_empty(&all_tasks)) {
974 dprintk("rpciod_killall: waiting for tasks to exit\n");
979 spin_lock_irqsave(¤t->sighand->siglock, flags);
981 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
985 * Start up the rpciod process if it's not already running.
990 struct workqueue_struct *wq;
994 dprintk("rpciod_up: users %d\n", rpciod_users);
996 if (rpciod_workqueue)
999 * If there's no pid, we should be the first user.
1001 if (rpciod_users > 1)
1002 printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
1004 * Create the rpciod thread and wait for it to start.
1007 wq = create_workqueue("rpciod");
1009 printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
1013 rpciod_workqueue = wq;
1024 dprintk("rpciod_down sema %d\n", rpciod_users);
1029 printk(KERN_WARNING "rpciod_down: no users??\n");
1031 if (!rpciod_workqueue) {
1032 dprintk("rpciod_down: Nothing to do!\n");
1037 destroy_workqueue(rpciod_workqueue);
1038 rpciod_workqueue = NULL;
1044 void rpc_show_tasks(void)
1046 struct list_head *le;
1049 spin_lock(&rpc_sched_lock);
1050 if (list_empty(&all_tasks)) {
1051 spin_unlock(&rpc_sched_lock);
1054 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
1055 "-rpcwait -action- --exit--\n");
1056 alltask_for_each(t, le, &all_tasks) {
1057 const char *rpc_waitq = "none";
1059 if (RPC_IS_QUEUED(t))
1060 rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
1062 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
1064 (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
1065 t->tk_flags, t->tk_status,
1067 (t->tk_client ? t->tk_client->cl_prog : 0),
1068 t->tk_rqstp, t->tk_timeout,
1070 t->tk_action, t->tk_exit);
1072 spin_unlock(&rpc_sched_lock);
1077 rpc_destroy_mempool(void)
1079 if (rpc_buffer_mempool)
1080 mempool_destroy(rpc_buffer_mempool);
1081 if (rpc_task_mempool)
1082 mempool_destroy(rpc_task_mempool);
1083 if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp))
1084 printk(KERN_INFO "rpc_task: not all structures were freed\n");
1085 if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp))
1086 printk(KERN_INFO "rpc_buffers: not all structures were freed\n");
1090 rpc_init_mempool(void)
1092 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1093 sizeof(struct rpc_task),
1094 0, SLAB_HWCACHE_ALIGN,
1096 if (!rpc_task_slabp)
1098 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1100 0, SLAB_HWCACHE_ALIGN,
1102 if (!rpc_buffer_slabp)
1104 rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,
1108 if (!rpc_task_mempool)
1110 rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,
1114 if (!rpc_buffer_mempool)
1118 rpc_destroy_mempool();