d1c8b47f0d0946bc9a3c7e1bd3e38cb065e7a13f
[linux-2.6.git] / net / sunrpc / sched.c
1 /*
2  * linux/net/sunrpc/sched.c
3  *
4  * Scheduling for synchronous and asynchronous RPC requests.
5  *
6  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7  * 
8  * TCP NFS related read + write fixes
9  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  */
11
12 #include <linux/module.h>
13
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21
22 #include <linux/sunrpc/clnt.h>
23 #include <linux/sunrpc/xprt.h>
24
25 #ifdef RPC_DEBUG
26 #define RPCDBG_FACILITY         RPCDBG_SCHED
27 #define RPC_TASK_MAGIC_ID       0xf00baa
28 static int                      rpc_task_id;
29 #endif
30
31 /*
32  * RPC slabs and memory pools
33  */
34 #define RPC_BUFFER_MAXSIZE      (2048)
35 #define RPC_BUFFER_POOLSIZE     (8)
36 #define RPC_TASK_POOLSIZE       (8)
37 static kmem_cache_t     *rpc_task_slabp __read_mostly;
38 static kmem_cache_t     *rpc_buffer_slabp __read_mostly;
39 static mempool_t        *rpc_task_mempool __read_mostly;
40 static mempool_t        *rpc_buffer_mempool __read_mostly;
41
42 static void                     __rpc_default_timer(struct rpc_task *task);
43 static void                     rpciod_killall(void);
44 static void                     rpc_async_schedule(void *);
45
46 /*
47  * RPC tasks that create another task (e.g. for contacting the portmapper)
48  * will wait on this queue for their child's completion
49  */
50 static RPC_WAITQ(childq, "childq");
51
52 /*
53  * RPC tasks sit here while waiting for conditions to improve.
54  */
55 static RPC_WAITQ(delay_queue, "delayq");
56
57 /*
58  * All RPC tasks are linked into this list
59  */
60 static LIST_HEAD(all_tasks);
61
62 /*
63  * rpciod-related stuff
64  */
65 static DECLARE_MUTEX(rpciod_sema);
66 static unsigned int             rpciod_users;
67 static struct workqueue_struct *rpciod_workqueue;
68
69 /*
70  * Spinlock for other critical sections of code.
71  */
72 static DEFINE_SPINLOCK(rpc_sched_lock);
73
74 /*
75  * Disable the timer for a given RPC task. Should be called with
76  * queue->lock and bh_disabled in order to avoid races within
77  * rpc_run_timer().
78  */
79 static inline void
80 __rpc_disable_timer(struct rpc_task *task)
81 {
82         dprintk("RPC: %4d disabling timer\n", task->tk_pid);
83         task->tk_timeout_fn = NULL;
84         task->tk_timeout = 0;
85 }
86
87 /*
88  * Run a timeout function.
89  * We use the callback in order to allow __rpc_wake_up_task()
90  * and friends to disable the timer synchronously on SMP systems
91  * without calling del_timer_sync(). The latter could cause a
92  * deadlock if called while we're holding spinlocks...
93  */
94 static void rpc_run_timer(struct rpc_task *task)
95 {
96         void (*callback)(struct rpc_task *);
97
98         callback = task->tk_timeout_fn;
99         task->tk_timeout_fn = NULL;
100         if (callback && RPC_IS_QUEUED(task)) {
101                 dprintk("RPC: %4d running timer\n", task->tk_pid);
102                 callback(task);
103         }
104         smp_mb__before_clear_bit();
105         clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
106         smp_mb__after_clear_bit();
107 }
108
109 /*
110  * Set up a timer for the current task.
111  */
112 static inline void
113 __rpc_add_timer(struct rpc_task *task, rpc_action timer)
114 {
115         if (!task->tk_timeout)
116                 return;
117
118         dprintk("RPC: %4d setting alarm for %lu ms\n",
119                         task->tk_pid, task->tk_timeout * 1000 / HZ);
120
121         if (timer)
122                 task->tk_timeout_fn = timer;
123         else
124                 task->tk_timeout_fn = __rpc_default_timer;
125         set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
126         mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
127 }
128
129 /*
130  * Delete any timer for the current task. Because we use del_timer_sync(),
131  * this function should never be called while holding queue->lock.
132  */
133 static void
134 rpc_delete_timer(struct rpc_task *task)
135 {
136         if (RPC_IS_QUEUED(task))
137                 return;
138         if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
139                 del_singleshot_timer_sync(&task->tk_timer);
140                 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
141         }
142 }
143
144 /*
145  * Add new request to a priority queue.
146  */
147 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
148 {
149         struct list_head *q;
150         struct rpc_task *t;
151
152         INIT_LIST_HEAD(&task->u.tk_wait.links);
153         q = &queue->tasks[task->tk_priority];
154         if (unlikely(task->tk_priority > queue->maxpriority))
155                 q = &queue->tasks[queue->maxpriority];
156         list_for_each_entry(t, q, u.tk_wait.list) {
157                 if (t->tk_cookie == task->tk_cookie) {
158                         list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
159                         return;
160                 }
161         }
162         list_add_tail(&task->u.tk_wait.list, q);
163 }
164
165 /*
166  * Add new request to wait queue.
167  *
168  * Swapper tasks always get inserted at the head of the queue.
169  * This should avoid many nasty memory deadlocks and hopefully
170  * improve overall performance.
171  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
172  */
173 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
174 {
175         BUG_ON (RPC_IS_QUEUED(task));
176
177         if (RPC_IS_PRIORITY(queue))
178                 __rpc_add_wait_queue_priority(queue, task);
179         else if (RPC_IS_SWAPPER(task))
180                 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
181         else
182                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
183         task->u.tk_wait.rpc_waitq = queue;
184         rpc_set_queued(task);
185
186         dprintk("RPC: %4d added to queue %p \"%s\"\n",
187                                 task->tk_pid, queue, rpc_qname(queue));
188 }
189
190 /*
191  * Remove request from a priority queue.
192  */
193 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
194 {
195         struct rpc_task *t;
196
197         if (!list_empty(&task->u.tk_wait.links)) {
198                 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
199                 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
200                 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
201         }
202         list_del(&task->u.tk_wait.list);
203 }
204
205 /*
206  * Remove request from queue.
207  * Note: must be called with spin lock held.
208  */
209 static void __rpc_remove_wait_queue(struct rpc_task *task)
210 {
211         struct rpc_wait_queue *queue;
212         queue = task->u.tk_wait.rpc_waitq;
213
214         if (RPC_IS_PRIORITY(queue))
215                 __rpc_remove_wait_queue_priority(task);
216         else
217                 list_del(&task->u.tk_wait.list);
218         dprintk("RPC: %4d removed from queue %p \"%s\"\n",
219                                 task->tk_pid, queue, rpc_qname(queue));
220 }
221
222 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
223 {
224         queue->priority = priority;
225         queue->count = 1 << (priority * 2);
226 }
227
228 static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
229 {
230         queue->cookie = cookie;
231         queue->nr = RPC_BATCH_COUNT;
232 }
233
234 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
235 {
236         rpc_set_waitqueue_priority(queue, queue->maxpriority);
237         rpc_set_waitqueue_cookie(queue, 0);
238 }
239
240 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
241 {
242         int i;
243
244         spin_lock_init(&queue->lock);
245         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
246                 INIT_LIST_HEAD(&queue->tasks[i]);
247         queue->maxpriority = maxprio;
248         rpc_reset_waitqueue_priority(queue);
249 #ifdef RPC_DEBUG
250         queue->name = qname;
251 #endif
252 }
253
254 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
255 {
256         __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
257 }
258
259 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
260 {
261         __rpc_init_priority_wait_queue(queue, qname, 0);
262 }
263 EXPORT_SYMBOL(rpc_init_wait_queue);
264
265 static int rpc_wait_bit_interruptible(void *word)
266 {
267         if (signal_pending(current))
268                 return -ERESTARTSYS;
269         schedule();
270         return 0;
271 }
272
273 /*
274  * Mark an RPC call as having completed by clearing the 'active' bit
275  */
276 static inline void rpc_mark_complete_task(struct rpc_task *task)
277 {
278         rpc_clear_active(task);
279         wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
280 }
281
282 /*
283  * Allow callers to wait for completion of an RPC call
284  */
285 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
286 {
287         if (action == NULL)
288                 action = rpc_wait_bit_interruptible;
289         return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
290                         action, TASK_INTERRUPTIBLE);
291 }
292 EXPORT_SYMBOL(__rpc_wait_for_completion_task);
293
294 /*
295  * Make an RPC task runnable.
296  *
297  * Note: If the task is ASYNC, this must be called with 
298  * the spinlock held to protect the wait queue operation.
299  */
300 static void rpc_make_runnable(struct rpc_task *task)
301 {
302         BUG_ON(task->tk_timeout_fn);
303         rpc_clear_queued(task);
304         if (rpc_test_and_set_running(task))
305                 return;
306         /* We might have raced */
307         if (RPC_IS_QUEUED(task)) {
308                 rpc_clear_running(task);
309                 return;
310         }
311         if (RPC_IS_ASYNC(task)) {
312                 int status;
313
314                 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
315                 status = queue_work(task->tk_workqueue, &task->u.tk_work);
316                 if (status < 0) {
317                         printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
318                         task->tk_status = status;
319                         return;
320                 }
321         } else
322                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
323 }
324
325 /*
326  * Place a newly initialized task on the workqueue.
327  */
328 static inline void
329 rpc_schedule_run(struct rpc_task *task)
330 {
331         rpc_set_active(task);
332         rpc_make_runnable(task);
333 }
334
335 /*
336  * Prepare for sleeping on a wait queue.
337  * By always appending tasks to the list we ensure FIFO behavior.
338  * NB: An RPC task will only receive interrupt-driven events as long
339  * as it's on a wait queue.
340  */
341 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
342                         rpc_action action, rpc_action timer)
343 {
344         dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
345                                 rpc_qname(q), jiffies);
346
347         if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
348                 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
349                 return;
350         }
351
352         /* Mark the task as being activated if so needed */
353         rpc_set_active(task);
354
355         __rpc_add_wait_queue(q, task);
356
357         BUG_ON(task->tk_callback != NULL);
358         task->tk_callback = action;
359         __rpc_add_timer(task, timer);
360 }
361
362 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
363                                 rpc_action action, rpc_action timer)
364 {
365         /*
366          * Protect the queue operations.
367          */
368         spin_lock_bh(&q->lock);
369         __rpc_sleep_on(q, task, action, timer);
370         spin_unlock_bh(&q->lock);
371 }
372
373 /**
374  * __rpc_do_wake_up_task - wake up a single rpc_task
375  * @task: task to be woken up
376  *
377  * Caller must hold queue->lock, and have cleared the task queued flag.
378  */
379 static void __rpc_do_wake_up_task(struct rpc_task *task)
380 {
381         dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
382
383 #ifdef RPC_DEBUG
384         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
385 #endif
386         /* Has the task been executed yet? If not, we cannot wake it up! */
387         if (!RPC_IS_ACTIVATED(task)) {
388                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
389                 return;
390         }
391
392         __rpc_disable_timer(task);
393         __rpc_remove_wait_queue(task);
394
395         rpc_make_runnable(task);
396
397         dprintk("RPC:      __rpc_wake_up_task done\n");
398 }
399
400 /*
401  * Wake up the specified task
402  */
403 static void __rpc_wake_up_task(struct rpc_task *task)
404 {
405         if (rpc_start_wakeup(task)) {
406                 if (RPC_IS_QUEUED(task))
407                         __rpc_do_wake_up_task(task);
408                 rpc_finish_wakeup(task);
409         }
410 }
411
412 /*
413  * Default timeout handler if none specified by user
414  */
415 static void
416 __rpc_default_timer(struct rpc_task *task)
417 {
418         dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
419         task->tk_status = -ETIMEDOUT;
420         rpc_wake_up_task(task);
421 }
422
423 /*
424  * Wake up the specified task
425  */
426 void rpc_wake_up_task(struct rpc_task *task)
427 {
428         if (rpc_start_wakeup(task)) {
429                 if (RPC_IS_QUEUED(task)) {
430                         struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
431
432                         spin_lock_bh(&queue->lock);
433                         __rpc_do_wake_up_task(task);
434                         spin_unlock_bh(&queue->lock);
435                 }
436                 rpc_finish_wakeup(task);
437         }
438 }
439
440 /*
441  * Wake up the next task on a priority queue.
442  */
443 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
444 {
445         struct list_head *q;
446         struct rpc_task *task;
447
448         /*
449          * Service a batch of tasks from a single cookie.
450          */
451         q = &queue->tasks[queue->priority];
452         if (!list_empty(q)) {
453                 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
454                 if (queue->cookie == task->tk_cookie) {
455                         if (--queue->nr)
456                                 goto out;
457                         list_move_tail(&task->u.tk_wait.list, q);
458                 }
459                 /*
460                  * Check if we need to switch queues.
461                  */
462                 if (--queue->count)
463                         goto new_cookie;
464         }
465
466         /*
467          * Service the next queue.
468          */
469         do {
470                 if (q == &queue->tasks[0])
471                         q = &queue->tasks[queue->maxpriority];
472                 else
473                         q = q - 1;
474                 if (!list_empty(q)) {
475                         task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
476                         goto new_queue;
477                 }
478         } while (q != &queue->tasks[queue->priority]);
479
480         rpc_reset_waitqueue_priority(queue);
481         return NULL;
482
483 new_queue:
484         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
485 new_cookie:
486         rpc_set_waitqueue_cookie(queue, task->tk_cookie);
487 out:
488         __rpc_wake_up_task(task);
489         return task;
490 }
491
492 /*
493  * Wake up the next task on the wait queue.
494  */
495 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
496 {
497         struct rpc_task *task = NULL;
498
499         dprintk("RPC:      wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
500         spin_lock_bh(&queue->lock);
501         if (RPC_IS_PRIORITY(queue))
502                 task = __rpc_wake_up_next_priority(queue);
503         else {
504                 task_for_first(task, &queue->tasks[0])
505                         __rpc_wake_up_task(task);
506         }
507         spin_unlock_bh(&queue->lock);
508
509         return task;
510 }
511
512 /**
513  * rpc_wake_up - wake up all rpc_tasks
514  * @queue: rpc_wait_queue on which the tasks are sleeping
515  *
516  * Grabs queue->lock
517  */
518 void rpc_wake_up(struct rpc_wait_queue *queue)
519 {
520         struct rpc_task *task, *next;
521         struct list_head *head;
522
523         spin_lock_bh(&queue->lock);
524         head = &queue->tasks[queue->maxpriority];
525         for (;;) {
526                 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
527                         __rpc_wake_up_task(task);
528                 if (head == &queue->tasks[0])
529                         break;
530                 head--;
531         }
532         spin_unlock_bh(&queue->lock);
533 }
534
535 /**
536  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
537  * @queue: rpc_wait_queue on which the tasks are sleeping
538  * @status: status value to set
539  *
540  * Grabs queue->lock
541  */
542 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
543 {
544         struct rpc_task *task, *next;
545         struct list_head *head;
546
547         spin_lock_bh(&queue->lock);
548         head = &queue->tasks[queue->maxpriority];
549         for (;;) {
550                 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
551                         task->tk_status = status;
552                         __rpc_wake_up_task(task);
553                 }
554                 if (head == &queue->tasks[0])
555                         break;
556                 head--;
557         }
558         spin_unlock_bh(&queue->lock);
559 }
560
561 /*
562  * Run a task at a later time
563  */
564 static void     __rpc_atrun(struct rpc_task *);
565 void
566 rpc_delay(struct rpc_task *task, unsigned long delay)
567 {
568         task->tk_timeout = delay;
569         rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
570 }
571
572 static void
573 __rpc_atrun(struct rpc_task *task)
574 {
575         task->tk_status = 0;
576         rpc_wake_up_task(task);
577 }
578
579 /*
580  * Helper to call task->tk_ops->rpc_call_prepare
581  */
582 static void rpc_prepare_task(struct rpc_task *task)
583 {
584         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
585 }
586
587 /*
588  * Helper that calls task->tk_ops->rpc_call_done if it exists
589  */
590 void rpc_exit_task(struct rpc_task *task)
591 {
592         task->tk_action = NULL;
593         if (task->tk_ops->rpc_call_done != NULL) {
594                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
595                 if (task->tk_action != NULL) {
596                         WARN_ON(RPC_ASSASSINATED(task));
597                         /* Always release the RPC slot and buffer memory */
598                         xprt_release(task);
599                 }
600         }
601 }
602 EXPORT_SYMBOL(rpc_exit_task);
603
604 /*
605  * This is the RPC `scheduler' (or rather, the finite state machine).
606  */
607 static int __rpc_execute(struct rpc_task *task)
608 {
609         int             status = 0;
610
611         dprintk("RPC: %4d rpc_execute flgs %x\n",
612                                 task->tk_pid, task->tk_flags);
613
614         BUG_ON(RPC_IS_QUEUED(task));
615
616         for (;;) {
617                 /*
618                  * Garbage collection of pending timers...
619                  */
620                 rpc_delete_timer(task);
621
622                 /*
623                  * Execute any pending callback.
624                  */
625                 if (RPC_DO_CALLBACK(task)) {
626                         /* Define a callback save pointer */
627                         void (*save_callback)(struct rpc_task *);
628         
629                         /* 
630                          * If a callback exists, save it, reset it,
631                          * call it.
632                          * The save is needed to stop from resetting
633                          * another callback set within the callback handler
634                          * - Dave
635                          */
636                         save_callback=task->tk_callback;
637                         task->tk_callback=NULL;
638                         lock_kernel();
639                         save_callback(task);
640                         unlock_kernel();
641                 }
642
643                 /*
644                  * Perform the next FSM step.
645                  * tk_action may be NULL when the task has been killed
646                  * by someone else.
647                  */
648                 if (!RPC_IS_QUEUED(task)) {
649                         if (task->tk_action == NULL)
650                                 break;
651                         lock_kernel();
652                         task->tk_action(task);
653                         unlock_kernel();
654                 }
655
656                 /*
657                  * Lockless check for whether task is sleeping or not.
658                  */
659                 if (!RPC_IS_QUEUED(task))
660                         continue;
661                 rpc_clear_running(task);
662                 if (RPC_IS_ASYNC(task)) {
663                         /* Careful! we may have raced... */
664                         if (RPC_IS_QUEUED(task))
665                                 return 0;
666                         if (rpc_test_and_set_running(task))
667                                 return 0;
668                         continue;
669                 }
670
671                 /* sync task: sleep here */
672                 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
673                 /* Note: Caller should be using rpc_clnt_sigmask() */
674                 status = out_of_line_wait_on_bit(&task->tk_runstate,
675                                 RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
676                                 TASK_INTERRUPTIBLE);
677                 if (status == -ERESTARTSYS) {
678                         /*
679                          * When a sync task receives a signal, it exits with
680                          * -ERESTARTSYS. In order to catch any callbacks that
681                          * clean up after sleeping on some queue, we don't
682                          * break the loop here, but go around once more.
683                          */
684                         dprintk("RPC: %4d got signal\n", task->tk_pid);
685                         task->tk_flags |= RPC_TASK_KILLED;
686                         rpc_exit(task, -ERESTARTSYS);
687                         rpc_wake_up_task(task);
688                 }
689                 rpc_set_running(task);
690                 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
691         }
692
693         dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
694         /* Wake up anyone who is waiting for task completion */
695         rpc_mark_complete_task(task);
696         /* Release all resources associated with the task */
697         rpc_release_task(task);
698         return status;
699 }
700
701 /*
702  * User-visible entry point to the scheduler.
703  *
704  * This may be called recursively if e.g. an async NFS task updates
705  * the attributes and finds that dirty pages must be flushed.
706  * NOTE: Upon exit of this function the task is guaranteed to be
707  *       released. In particular note that tk_release() will have
708  *       been called, so your task memory may have been freed.
709  */
710 int
711 rpc_execute(struct rpc_task *task)
712 {
713         rpc_set_active(task);
714         rpc_set_running(task);
715         return __rpc_execute(task);
716 }
717
718 static void rpc_async_schedule(void *arg)
719 {
720         __rpc_execute((struct rpc_task *)arg);
721 }
722
723 /**
724  * rpc_malloc - allocate an RPC buffer
725  * @task: RPC task that will use this buffer
726  * @size: requested byte size
727  *
728  * We try to ensure that some NFS reads and writes can always proceed
729  * by using a mempool when allocating 'small' buffers.
730  * In order to avoid memory starvation triggering more writebacks of
731  * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
732  */
733 void * rpc_malloc(struct rpc_task *task, size_t size)
734 {
735         struct rpc_rqst *req = task->tk_rqstp;
736         gfp_t   gfp;
737
738         if (task->tk_flags & RPC_TASK_SWAPPER)
739                 gfp = GFP_ATOMIC;
740         else
741                 gfp = GFP_NOFS;
742
743         if (size > RPC_BUFFER_MAXSIZE) {
744                 req->rq_buffer = kmalloc(size, gfp);
745                 if (req->rq_buffer)
746                         req->rq_bufsize = size;
747         } else {
748                 req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
749                 if (req->rq_buffer)
750                         req->rq_bufsize = RPC_BUFFER_MAXSIZE;
751         }
752         return req->rq_buffer;
753 }
754
755 /**
756  * rpc_free - free buffer allocated via rpc_malloc
757  * @task: RPC task with a buffer to be freed
758  *
759  */
760 void rpc_free(struct rpc_task *task)
761 {
762         struct rpc_rqst *req = task->tk_rqstp;
763
764         if (req->rq_buffer) {
765                 if (req->rq_bufsize == RPC_BUFFER_MAXSIZE)
766                         mempool_free(req->rq_buffer, rpc_buffer_mempool);
767                 else
768                         kfree(req->rq_buffer);
769                 req->rq_buffer = NULL;
770                 req->rq_bufsize = 0;
771         }
772 }
773
774 /*
775  * Creation and deletion of RPC task structures
776  */
777 void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
778 {
779         memset(task, 0, sizeof(*task));
780         init_timer(&task->tk_timer);
781         task->tk_timer.data     = (unsigned long) task;
782         task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
783         atomic_set(&task->tk_count, 1);
784         task->tk_client = clnt;
785         task->tk_flags  = flags;
786         task->tk_ops = tk_ops;
787         if (tk_ops->rpc_call_prepare != NULL)
788                 task->tk_action = rpc_prepare_task;
789         task->tk_calldata = calldata;
790
791         /* Initialize retry counters */
792         task->tk_garb_retry = 2;
793         task->tk_cred_retry = 2;
794
795         task->tk_priority = RPC_PRIORITY_NORMAL;
796         task->tk_cookie = (unsigned long)current;
797
798         /* Initialize workqueue for async tasks */
799         task->tk_workqueue = rpciod_workqueue;
800
801         if (clnt) {
802                 atomic_inc(&clnt->cl_users);
803                 if (clnt->cl_softrtry)
804                         task->tk_flags |= RPC_TASK_SOFT;
805                 if (!clnt->cl_intr)
806                         task->tk_flags |= RPC_TASK_NOINTR;
807         }
808
809 #ifdef RPC_DEBUG
810         task->tk_magic = RPC_TASK_MAGIC_ID;
811         task->tk_pid = rpc_task_id++;
812 #endif
813         /* Add to global list of all tasks */
814         spin_lock(&rpc_sched_lock);
815         list_add_tail(&task->tk_task, &all_tasks);
816         spin_unlock(&rpc_sched_lock);
817
818         BUG_ON(task->tk_ops == NULL);
819
820         dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
821                                 current->pid);
822 }
823
824 static struct rpc_task *
825 rpc_alloc_task(void)
826 {
827         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
828 }
829
830 static void rpc_free_task(struct rpc_task *task)
831 {
832         dprintk("RPC: %4d freeing task\n", task->tk_pid);
833         mempool_free(task, rpc_task_mempool);
834 }
835
836 /*
837  * Create a new task for the specified client.  We have to
838  * clean up after an allocation failure, as the client may
839  * have specified "oneshot".
840  */
841 struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
842 {
843         struct rpc_task *task;
844
845         task = rpc_alloc_task();
846         if (!task)
847                 goto cleanup;
848
849         rpc_init_task(task, clnt, flags, tk_ops, calldata);
850
851         dprintk("RPC: %4d allocated task\n", task->tk_pid);
852         task->tk_flags |= RPC_TASK_DYNAMIC;
853 out:
854         return task;
855
856 cleanup:
857         /* Check whether to release the client */
858         if (clnt) {
859                 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
860                         atomic_read(&clnt->cl_users), clnt->cl_oneshot);
861                 atomic_inc(&clnt->cl_users); /* pretend we were used ... */
862                 rpc_release_client(clnt);
863         }
864         goto out;
865 }
866
867 void rpc_release_task(struct rpc_task *task)
868 {
869         const struct rpc_call_ops *tk_ops = task->tk_ops;
870         void *calldata = task->tk_calldata;
871
872 #ifdef RPC_DEBUG
873         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
874 #endif
875         if (!atomic_dec_and_test(&task->tk_count))
876                 return;
877         dprintk("RPC: %4d release task\n", task->tk_pid);
878
879         /* Remove from global task list */
880         spin_lock(&rpc_sched_lock);
881         list_del(&task->tk_task);
882         spin_unlock(&rpc_sched_lock);
883
884         BUG_ON (RPC_IS_QUEUED(task));
885
886         /* Synchronously delete any running timer */
887         rpc_delete_timer(task);
888
889         /* Release resources */
890         if (task->tk_rqstp)
891                 xprt_release(task);
892         if (task->tk_msg.rpc_cred)
893                 rpcauth_unbindcred(task);
894         if (task->tk_client) {
895                 rpc_release_client(task->tk_client);
896                 task->tk_client = NULL;
897         }
898
899 #ifdef RPC_DEBUG
900         task->tk_magic = 0;
901 #endif
902         if (task->tk_flags & RPC_TASK_DYNAMIC)
903                 rpc_free_task(task);
904         if (tk_ops->rpc_release)
905                 tk_ops->rpc_release(calldata);
906 }
907
908 /**
909  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
910  * @clnt: pointer to RPC client
911  * @flags: RPC flags
912  * @ops: RPC call ops
913  * @data: user call data
914  */
915 struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
916                                         const struct rpc_call_ops *ops,
917                                         void *data)
918 {
919         struct rpc_task *task;
920         task = rpc_new_task(clnt, flags, ops, data);
921         if (task == NULL)
922                 return ERR_PTR(-ENOMEM);
923         atomic_inc(&task->tk_count);
924         rpc_execute(task);
925         return task;
926 }
927 EXPORT_SYMBOL(rpc_run_task);
928
929 /**
930  * rpc_find_parent - find the parent of a child task.
931  * @child: child task
932  * @parent: parent task
933  *
934  * Checks that the parent task is still sleeping on the
935  * queue 'childq'. If so returns a pointer to the parent.
936  * Upon failure returns NULL.
937  *
938  * Caller must hold childq.lock
939  */
940 static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent)
941 {
942         struct rpc_task *task;
943         struct list_head *le;
944
945         task_for_each(task, le, &childq.tasks[0])
946                 if (task == parent)
947                         return parent;
948
949         return NULL;
950 }
951
952 static void rpc_child_exit(struct rpc_task *child, void *calldata)
953 {
954         struct rpc_task *parent;
955
956         spin_lock_bh(&childq.lock);
957         if ((parent = rpc_find_parent(child, calldata)) != NULL) {
958                 parent->tk_status = child->tk_status;
959                 __rpc_wake_up_task(parent);
960         }
961         spin_unlock_bh(&childq.lock);
962 }
963
964 static const struct rpc_call_ops rpc_child_ops = {
965         .rpc_call_done = rpc_child_exit,
966 };
967
968 /*
969  * Note: rpc_new_task releases the client after a failure.
970  */
971 struct rpc_task *
972 rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
973 {
974         struct rpc_task *task;
975
976         task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent);
977         if (!task)
978                 goto fail;
979         return task;
980
981 fail:
982         parent->tk_status = -ENOMEM;
983         return NULL;
984 }
985
986 void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
987 {
988         spin_lock_bh(&childq.lock);
989         /* N.B. Is it possible for the child to have already finished? */
990         __rpc_sleep_on(&childq, task, func, NULL);
991         rpc_schedule_run(child);
992         spin_unlock_bh(&childq.lock);
993 }
994
995 /*
996  * Kill all tasks for the given client.
997  * XXX: kill their descendants as well?
998  */
999 void rpc_killall_tasks(struct rpc_clnt *clnt)
1000 {
1001         struct rpc_task *rovr;
1002         struct list_head *le;
1003
1004         dprintk("RPC:      killing all tasks for client %p\n", clnt);
1005
1006         /*
1007          * Spin lock all_tasks to prevent changes...
1008          */
1009         spin_lock(&rpc_sched_lock);
1010         alltask_for_each(rovr, le, &all_tasks) {
1011                 if (! RPC_IS_ACTIVATED(rovr))
1012                         continue;
1013                 if (!clnt || rovr->tk_client == clnt) {
1014                         rovr->tk_flags |= RPC_TASK_KILLED;
1015                         rpc_exit(rovr, -EIO);
1016                         rpc_wake_up_task(rovr);
1017                 }
1018         }
1019         spin_unlock(&rpc_sched_lock);
1020 }
1021
1022 static DECLARE_MUTEX_LOCKED(rpciod_running);
1023
1024 static void rpciod_killall(void)
1025 {
1026         unsigned long flags;
1027
1028         while (!list_empty(&all_tasks)) {
1029                 clear_thread_flag(TIF_SIGPENDING);
1030                 rpc_killall_tasks(NULL);
1031                 flush_workqueue(rpciod_workqueue);
1032                 if (!list_empty(&all_tasks)) {
1033                         dprintk("rpciod_killall: waiting for tasks to exit\n");
1034                         yield();
1035                 }
1036         }
1037
1038         spin_lock_irqsave(&current->sighand->siglock, flags);
1039         recalc_sigpending();
1040         spin_unlock_irqrestore(&current->sighand->siglock, flags);
1041 }
1042
1043 /*
1044  * Start up the rpciod process if it's not already running.
1045  */
1046 int
1047 rpciod_up(void)
1048 {
1049         struct workqueue_struct *wq;
1050         int error = 0;
1051
1052         down(&rpciod_sema);
1053         dprintk("rpciod_up: users %d\n", rpciod_users);
1054         rpciod_users++;
1055         if (rpciod_workqueue)
1056                 goto out;
1057         /*
1058          * If there's no pid, we should be the first user.
1059          */
1060         if (rpciod_users > 1)
1061                 printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
1062         /*
1063          * Create the rpciod thread and wait for it to start.
1064          */
1065         error = -ENOMEM;
1066         wq = create_workqueue("rpciod");
1067         if (wq == NULL) {
1068                 printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
1069                 rpciod_users--;
1070                 goto out;
1071         }
1072         rpciod_workqueue = wq;
1073         error = 0;
1074 out:
1075         up(&rpciod_sema);
1076         return error;
1077 }
1078
1079 void
1080 rpciod_down(void)
1081 {
1082         down(&rpciod_sema);
1083         dprintk("rpciod_down sema %d\n", rpciod_users);
1084         if (rpciod_users) {
1085                 if (--rpciod_users)
1086                         goto out;
1087         } else
1088                 printk(KERN_WARNING "rpciod_down: no users??\n");
1089
1090         if (!rpciod_workqueue) {
1091                 dprintk("rpciod_down: Nothing to do!\n");
1092                 goto out;
1093         }
1094         rpciod_killall();
1095
1096         destroy_workqueue(rpciod_workqueue);
1097         rpciod_workqueue = NULL;
1098  out:
1099         up(&rpciod_sema);
1100 }
1101
1102 #ifdef RPC_DEBUG
1103 void rpc_show_tasks(void)
1104 {
1105         struct list_head *le;
1106         struct rpc_task *t;
1107
1108         spin_lock(&rpc_sched_lock);
1109         if (list_empty(&all_tasks)) {
1110                 spin_unlock(&rpc_sched_lock);
1111                 return;
1112         }
1113         printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
1114                 "-rpcwait -action- ---ops--\n");
1115         alltask_for_each(t, le, &all_tasks) {
1116                 const char *rpc_waitq = "none";
1117
1118                 if (RPC_IS_QUEUED(t))
1119                         rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
1120
1121                 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
1122                         t->tk_pid,
1123                         (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
1124                         t->tk_flags, t->tk_status,
1125                         t->tk_client,
1126                         (t->tk_client ? t->tk_client->cl_prog : 0),
1127                         t->tk_rqstp, t->tk_timeout,
1128                         rpc_waitq,
1129                         t->tk_action, t->tk_ops);
1130         }
1131         spin_unlock(&rpc_sched_lock);
1132 }
1133 #endif
1134
1135 void
1136 rpc_destroy_mempool(void)
1137 {
1138         if (rpc_buffer_mempool)
1139                 mempool_destroy(rpc_buffer_mempool);
1140         if (rpc_task_mempool)
1141                 mempool_destroy(rpc_task_mempool);
1142         if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp))
1143                 printk(KERN_INFO "rpc_task: not all structures were freed\n");
1144         if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp))
1145                 printk(KERN_INFO "rpc_buffers: not all structures were freed\n");
1146 }
1147
1148 int
1149 rpc_init_mempool(void)
1150 {
1151         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1152                                              sizeof(struct rpc_task),
1153                                              0, SLAB_HWCACHE_ALIGN,
1154                                              NULL, NULL);
1155         if (!rpc_task_slabp)
1156                 goto err_nomem;
1157         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1158                                              RPC_BUFFER_MAXSIZE,
1159                                              0, SLAB_HWCACHE_ALIGN,
1160                                              NULL, NULL);
1161         if (!rpc_buffer_slabp)
1162                 goto err_nomem;
1163         rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,
1164                                             mempool_alloc_slab,
1165                                             mempool_free_slab,
1166                                             rpc_task_slabp);
1167         if (!rpc_task_mempool)
1168                 goto err_nomem;
1169         rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,
1170                                             mempool_alloc_slab,
1171                                             mempool_free_slab,
1172                                             rpc_buffer_slabp);
1173         if (!rpc_buffer_mempool)
1174                 goto err_nomem;
1175         return 0;
1176 err_nomem:
1177         rpc_destroy_mempool();
1178         return -ENOMEM;
1179 }