xref: /openbmc/linux/net/sunrpc/sched.c (revision 22246614)
1 /*
2  * linux/net/sunrpc/sched.c
3  *
4  * Scheduling for synchronous and asynchronous RPC requests.
5  *
6  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7  *
8  * TCP NFS related read + write fixes
9  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  */
11 
12 #include <linux/module.h>
13 
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22 
23 #include <linux/sunrpc/clnt.h>
24 
25 #ifdef RPC_DEBUG
26 #define RPCDBG_FACILITY		RPCDBG_SCHED
27 #define RPC_TASK_MAGIC_ID	0xf00baa
28 #endif
29 
30 /*
31  * RPC slabs and memory pools
32  */
33 #define RPC_BUFFER_MAXSIZE	(2048)
34 #define RPC_BUFFER_POOLSIZE	(8)
35 #define RPC_TASK_POOLSIZE	(8)
36 static struct kmem_cache	*rpc_task_slabp __read_mostly;
37 static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
38 static mempool_t	*rpc_task_mempool __read_mostly;
39 static mempool_t	*rpc_buffer_mempool __read_mostly;
40 
41 static void			rpc_async_schedule(struct work_struct *);
42 static void			 rpc_release_task(struct rpc_task *task);
43 static void __rpc_queue_timer_fn(unsigned long ptr);
44 
45 /*
46  * RPC tasks sit here while waiting for conditions to improve.
47  */
48 static struct rpc_wait_queue delay_queue;
49 
50 /*
51  * rpciod-related stuff
52  */
53 struct workqueue_struct *rpciod_workqueue;
54 
55 /*
56  * Disable the timer for a given RPC task. Should be called with
57  * queue->lock and bh_disabled in order to avoid races within
58  * rpc_run_timer().
59  */
60 static void
61 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
62 {
63 	if (task->tk_timeout == 0)
64 		return;
65 	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
66 	task->tk_timeout = 0;
67 	list_del(&task->u.tk_wait.timer_list);
68 	if (list_empty(&queue->timer_list.list))
69 		del_timer(&queue->timer_list.timer);
70 }
71 
72 static void
73 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
74 {
75 	queue->timer_list.expires = expires;
76 	mod_timer(&queue->timer_list.timer, expires);
77 }
78 
79 /*
80  * Set up a timer for the current task.
81  */
82 static void
83 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
84 {
85 	if (!task->tk_timeout)
86 		return;
87 
88 	dprintk("RPC: %5u setting alarm for %lu ms\n",
89 			task->tk_pid, task->tk_timeout * 1000 / HZ);
90 
91 	task->u.tk_wait.expires = jiffies + task->tk_timeout;
92 	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
93 		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
94 	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
95 }
96 
97 /*
98  * Add new request to a priority queue.
99  */
100 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
101 {
102 	struct list_head *q;
103 	struct rpc_task *t;
104 
105 	INIT_LIST_HEAD(&task->u.tk_wait.links);
106 	q = &queue->tasks[task->tk_priority];
107 	if (unlikely(task->tk_priority > queue->maxpriority))
108 		q = &queue->tasks[queue->maxpriority];
109 	list_for_each_entry(t, q, u.tk_wait.list) {
110 		if (t->tk_owner == task->tk_owner) {
111 			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
112 			return;
113 		}
114 	}
115 	list_add_tail(&task->u.tk_wait.list, q);
116 }
117 
118 /*
119  * Add new request to wait queue.
120  *
121  * Swapper tasks always get inserted at the head of the queue.
122  * This should avoid many nasty memory deadlocks and hopefully
123  * improve overall performance.
124  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
125  */
126 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
127 {
128 	BUG_ON (RPC_IS_QUEUED(task));
129 
130 	if (RPC_IS_PRIORITY(queue))
131 		__rpc_add_wait_queue_priority(queue, task);
132 	else if (RPC_IS_SWAPPER(task))
133 		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
134 	else
135 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
136 	task->tk_waitqueue = queue;
137 	queue->qlen++;
138 	rpc_set_queued(task);
139 
140 	dprintk("RPC: %5u added to queue %p \"%s\"\n",
141 			task->tk_pid, queue, rpc_qname(queue));
142 }
143 
144 /*
145  * Remove request from a priority queue.
146  */
147 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
148 {
149 	struct rpc_task *t;
150 
151 	if (!list_empty(&task->u.tk_wait.links)) {
152 		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
153 		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
154 		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
155 	}
156 }
157 
158 /*
159  * Remove request from queue.
160  * Note: must be called with spin lock held.
161  */
162 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
163 {
164 	__rpc_disable_timer(queue, task);
165 	if (RPC_IS_PRIORITY(queue))
166 		__rpc_remove_wait_queue_priority(task);
167 	list_del(&task->u.tk_wait.list);
168 	queue->qlen--;
169 	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
170 			task->tk_pid, queue, rpc_qname(queue));
171 }
172 
173 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
174 {
175 	queue->priority = priority;
176 	queue->count = 1 << (priority * 2);
177 }
178 
179 static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
180 {
181 	queue->owner = pid;
182 	queue->nr = RPC_BATCH_COUNT;
183 }
184 
185 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
186 {
187 	rpc_set_waitqueue_priority(queue, queue->maxpriority);
188 	rpc_set_waitqueue_owner(queue, 0);
189 }
190 
191 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
192 {
193 	int i;
194 
195 	spin_lock_init(&queue->lock);
196 	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
197 		INIT_LIST_HEAD(&queue->tasks[i]);
198 	queue->maxpriority = nr_queues - 1;
199 	rpc_reset_waitqueue_priority(queue);
200 	queue->qlen = 0;
201 	setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
202 	INIT_LIST_HEAD(&queue->timer_list.list);
203 #ifdef RPC_DEBUG
204 	queue->name = qname;
205 #endif
206 }
207 
208 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
209 {
210 	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
211 }
212 
213 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
214 {
215 	__rpc_init_priority_wait_queue(queue, qname, 1);
216 }
217 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
218 
219 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
220 {
221 	del_timer_sync(&queue->timer_list.timer);
222 }
223 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
224 
225 static int rpc_wait_bit_killable(void *word)
226 {
227 	if (fatal_signal_pending(current))
228 		return -ERESTARTSYS;
229 	schedule();
230 	return 0;
231 }
232 
233 #ifdef RPC_DEBUG
234 static void rpc_task_set_debuginfo(struct rpc_task *task)
235 {
236 	static atomic_t rpc_pid;
237 
238 	task->tk_magic = RPC_TASK_MAGIC_ID;
239 	task->tk_pid = atomic_inc_return(&rpc_pid);
240 }
241 #else
242 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
243 {
244 }
245 #endif
246 
247 static void rpc_set_active(struct rpc_task *task)
248 {
249 	struct rpc_clnt *clnt;
250 	if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
251 		return;
252 	rpc_task_set_debuginfo(task);
253 	/* Add to global list of all tasks */
254 	clnt = task->tk_client;
255 	if (clnt != NULL) {
256 		spin_lock(&clnt->cl_lock);
257 		list_add_tail(&task->tk_task, &clnt->cl_tasks);
258 		spin_unlock(&clnt->cl_lock);
259 	}
260 }
261 
262 /*
263  * Mark an RPC call as having completed by clearing the 'active' bit
264  */
265 static void rpc_mark_complete_task(struct rpc_task *task)
266 {
267 	smp_mb__before_clear_bit();
268 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
269 	smp_mb__after_clear_bit();
270 	wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
271 }
272 
273 /*
274  * Allow callers to wait for completion of an RPC call
275  */
276 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
277 {
278 	if (action == NULL)
279 		action = rpc_wait_bit_killable;
280 	return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
281 			action, TASK_KILLABLE);
282 }
283 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
284 
285 /*
286  * Make an RPC task runnable.
287  *
288  * Note: If the task is ASYNC, this must be called with
289  * the spinlock held to protect the wait queue operation.
290  */
291 static void rpc_make_runnable(struct rpc_task *task)
292 {
293 	rpc_clear_queued(task);
294 	if (rpc_test_and_set_running(task))
295 		return;
296 	/* We might have raced */
297 	if (RPC_IS_QUEUED(task)) {
298 		rpc_clear_running(task);
299 		return;
300 	}
301 	if (RPC_IS_ASYNC(task)) {
302 		int status;
303 
304 		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
305 		status = queue_work(rpciod_workqueue, &task->u.tk_work);
306 		if (status < 0) {
307 			printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
308 			task->tk_status = status;
309 			return;
310 		}
311 	} else
312 		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
313 }
314 
315 /*
316  * Prepare for sleeping on a wait queue.
317  * By always appending tasks to the list we ensure FIFO behavior.
318  * NB: An RPC task will only receive interrupt-driven events as long
319  * as it's on a wait queue.
320  */
321 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
322 			rpc_action action)
323 {
324 	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
325 			task->tk_pid, rpc_qname(q), jiffies);
326 
327 	if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
328 		printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
329 		return;
330 	}
331 
332 	__rpc_add_wait_queue(q, task);
333 
334 	BUG_ON(task->tk_callback != NULL);
335 	task->tk_callback = action;
336 	__rpc_add_timer(q, task);
337 }
338 
339 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
340 				rpc_action action)
341 {
342 	/* Mark the task as being activated if so needed */
343 	rpc_set_active(task);
344 
345 	/*
346 	 * Protect the queue operations.
347 	 */
348 	spin_lock_bh(&q->lock);
349 	__rpc_sleep_on(q, task, action);
350 	spin_unlock_bh(&q->lock);
351 }
352 EXPORT_SYMBOL_GPL(rpc_sleep_on);
353 
354 /**
355  * __rpc_do_wake_up_task - wake up a single rpc_task
356  * @queue: wait queue
357  * @task: task to be woken up
358  *
359  * Caller must hold queue->lock, and have cleared the task queued flag.
360  */
361 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
362 {
363 	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
364 			task->tk_pid, jiffies);
365 
366 #ifdef RPC_DEBUG
367 	BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
368 #endif
369 	/* Has the task been executed yet? If not, we cannot wake it up! */
370 	if (!RPC_IS_ACTIVATED(task)) {
371 		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
372 		return;
373 	}
374 
375 	__rpc_remove_wait_queue(queue, task);
376 
377 	rpc_make_runnable(task);
378 
379 	dprintk("RPC:       __rpc_wake_up_task done\n");
380 }
381 
382 /*
383  * Wake up a queued task while the queue lock is being held
384  */
385 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
386 {
387 	if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
388 		__rpc_do_wake_up_task(queue, task);
389 }
390 
391 /*
392  * Wake up a task on a specific queue
393  */
394 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
395 {
396 	spin_lock_bh(&queue->lock);
397 	rpc_wake_up_task_queue_locked(queue, task);
398 	spin_unlock_bh(&queue->lock);
399 }
400 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
401 
402 /*
403  * Wake up the specified task
404  */
405 static void rpc_wake_up_task(struct rpc_task *task)
406 {
407 	rpc_wake_up_queued_task(task->tk_waitqueue, task);
408 }
409 
410 /*
411  * Wake up the next task on a priority queue.
412  */
413 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
414 {
415 	struct list_head *q;
416 	struct rpc_task *task;
417 
418 	/*
419 	 * Service a batch of tasks from a single owner.
420 	 */
421 	q = &queue->tasks[queue->priority];
422 	if (!list_empty(q)) {
423 		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
424 		if (queue->owner == task->tk_owner) {
425 			if (--queue->nr)
426 				goto out;
427 			list_move_tail(&task->u.tk_wait.list, q);
428 		}
429 		/*
430 		 * Check if we need to switch queues.
431 		 */
432 		if (--queue->count)
433 			goto new_owner;
434 	}
435 
436 	/*
437 	 * Service the next queue.
438 	 */
439 	do {
440 		if (q == &queue->tasks[0])
441 			q = &queue->tasks[queue->maxpriority];
442 		else
443 			q = q - 1;
444 		if (!list_empty(q)) {
445 			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
446 			goto new_queue;
447 		}
448 	} while (q != &queue->tasks[queue->priority]);
449 
450 	rpc_reset_waitqueue_priority(queue);
451 	return NULL;
452 
453 new_queue:
454 	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
455 new_owner:
456 	rpc_set_waitqueue_owner(queue, task->tk_owner);
457 out:
458 	rpc_wake_up_task_queue_locked(queue, task);
459 	return task;
460 }
461 
462 /*
463  * Wake up the next task on the wait queue.
464  */
465 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
466 {
467 	struct rpc_task	*task = NULL;
468 
469 	dprintk("RPC:       wake_up_next(%p \"%s\")\n",
470 			queue, rpc_qname(queue));
471 	spin_lock_bh(&queue->lock);
472 	if (RPC_IS_PRIORITY(queue))
473 		task = __rpc_wake_up_next_priority(queue);
474 	else {
475 		task_for_first(task, &queue->tasks[0])
476 			rpc_wake_up_task_queue_locked(queue, task);
477 	}
478 	spin_unlock_bh(&queue->lock);
479 
480 	return task;
481 }
482 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
483 
484 /**
485  * rpc_wake_up - wake up all rpc_tasks
486  * @queue: rpc_wait_queue on which the tasks are sleeping
487  *
488  * Grabs queue->lock
489  */
490 void rpc_wake_up(struct rpc_wait_queue *queue)
491 {
492 	struct rpc_task *task, *next;
493 	struct list_head *head;
494 
495 	spin_lock_bh(&queue->lock);
496 	head = &queue->tasks[queue->maxpriority];
497 	for (;;) {
498 		list_for_each_entry_safe(task, next, head, u.tk_wait.list)
499 			rpc_wake_up_task_queue_locked(queue, task);
500 		if (head == &queue->tasks[0])
501 			break;
502 		head--;
503 	}
504 	spin_unlock_bh(&queue->lock);
505 }
506 EXPORT_SYMBOL_GPL(rpc_wake_up);
507 
508 /**
509  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
510  * @queue: rpc_wait_queue on which the tasks are sleeping
511  * @status: status value to set
512  *
513  * Grabs queue->lock
514  */
515 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
516 {
517 	struct rpc_task *task, *next;
518 	struct list_head *head;
519 
520 	spin_lock_bh(&queue->lock);
521 	head = &queue->tasks[queue->maxpriority];
522 	for (;;) {
523 		list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
524 			task->tk_status = status;
525 			rpc_wake_up_task_queue_locked(queue, task);
526 		}
527 		if (head == &queue->tasks[0])
528 			break;
529 		head--;
530 	}
531 	spin_unlock_bh(&queue->lock);
532 }
533 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
534 
535 static void __rpc_queue_timer_fn(unsigned long ptr)
536 {
537 	struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
538 	struct rpc_task *task, *n;
539 	unsigned long expires, now, timeo;
540 
541 	spin_lock(&queue->lock);
542 	expires = now = jiffies;
543 	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
544 		timeo = task->u.tk_wait.expires;
545 		if (time_after_eq(now, timeo)) {
546 			dprintk("RPC: %5u timeout\n", task->tk_pid);
547 			task->tk_status = -ETIMEDOUT;
548 			rpc_wake_up_task_queue_locked(queue, task);
549 			continue;
550 		}
551 		if (expires == now || time_after(expires, timeo))
552 			expires = timeo;
553 	}
554 	if (!list_empty(&queue->timer_list.list))
555 		rpc_set_queue_timer(queue, expires);
556 	spin_unlock(&queue->lock);
557 }
558 
559 static void __rpc_atrun(struct rpc_task *task)
560 {
561 	task->tk_status = 0;
562 }
563 
564 /*
565  * Run a task at a later time
566  */
567 void rpc_delay(struct rpc_task *task, unsigned long delay)
568 {
569 	task->tk_timeout = delay;
570 	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
571 }
572 EXPORT_SYMBOL_GPL(rpc_delay);
573 
574 /*
575  * Helper to call task->tk_ops->rpc_call_prepare
576  */
577 static void rpc_prepare_task(struct rpc_task *task)
578 {
579 	lock_kernel();
580 	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
581 	unlock_kernel();
582 }
583 
584 /*
585  * Helper that calls task->tk_ops->rpc_call_done if it exists
586  */
587 void rpc_exit_task(struct rpc_task *task)
588 {
589 	task->tk_action = NULL;
590 	if (task->tk_ops->rpc_call_done != NULL) {
591 		lock_kernel();
592 		task->tk_ops->rpc_call_done(task, task->tk_calldata);
593 		unlock_kernel();
594 		if (task->tk_action != NULL) {
595 			WARN_ON(RPC_ASSASSINATED(task));
596 			/* Always release the RPC slot and buffer memory */
597 			xprt_release(task);
598 		}
599 	}
600 }
601 EXPORT_SYMBOL_GPL(rpc_exit_task);
602 
603 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
604 {
605 	if (ops->rpc_release != NULL) {
606 		lock_kernel();
607 		ops->rpc_release(calldata);
608 		unlock_kernel();
609 	}
610 }
611 
612 /*
613  * This is the RPC `scheduler' (or rather, the finite state machine).
614  */
615 static void __rpc_execute(struct rpc_task *task)
616 {
617 	int		status = 0;
618 
619 	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
620 			task->tk_pid, task->tk_flags);
621 
622 	BUG_ON(RPC_IS_QUEUED(task));
623 
624 	for (;;) {
625 
626 		/*
627 		 * Execute any pending callback.
628 		 */
629 		if (RPC_DO_CALLBACK(task)) {
630 			/* Define a callback save pointer */
631 			void (*save_callback)(struct rpc_task *);
632 
633 			/*
634 			 * If a callback exists, save it, reset it,
635 			 * call it.
636 			 * The save is needed to stop from resetting
637 			 * another callback set within the callback handler
638 			 * - Dave
639 			 */
640 			save_callback=task->tk_callback;
641 			task->tk_callback=NULL;
642 			save_callback(task);
643 		}
644 
645 		/*
646 		 * Perform the next FSM step.
647 		 * tk_action may be NULL when the task has been killed
648 		 * by someone else.
649 		 */
650 		if (!RPC_IS_QUEUED(task)) {
651 			if (task->tk_action == NULL)
652 				break;
653 			task->tk_action(task);
654 		}
655 
656 		/*
657 		 * Lockless check for whether task is sleeping or not.
658 		 */
659 		if (!RPC_IS_QUEUED(task))
660 			continue;
661 		rpc_clear_running(task);
662 		if (RPC_IS_ASYNC(task)) {
663 			/* Careful! we may have raced... */
664 			if (RPC_IS_QUEUED(task))
665 				return;
666 			if (rpc_test_and_set_running(task))
667 				return;
668 			continue;
669 		}
670 
671 		/* sync task: sleep here */
672 		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
673 		status = out_of_line_wait_on_bit(&task->tk_runstate,
674 				RPC_TASK_QUEUED, rpc_wait_bit_killable,
675 				TASK_KILLABLE);
676 		if (status == -ERESTARTSYS) {
677 			/*
678 			 * When a sync task receives a signal, it exits with
679 			 * -ERESTARTSYS. In order to catch any callbacks that
680 			 * clean up after sleeping on some queue, we don't
681 			 * break the loop here, but go around once more.
682 			 */
683 			dprintk("RPC: %5u got signal\n", task->tk_pid);
684 			task->tk_flags |= RPC_TASK_KILLED;
685 			rpc_exit(task, -ERESTARTSYS);
686 			rpc_wake_up_task(task);
687 		}
688 		rpc_set_running(task);
689 		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
690 	}
691 
692 	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
693 			task->tk_status);
694 	/* Release all resources associated with the task */
695 	rpc_release_task(task);
696 }
697 
698 /*
699  * User-visible entry point to the scheduler.
700  *
701  * This may be called recursively if e.g. an async NFS task updates
702  * the attributes and finds that dirty pages must be flushed.
703  * NOTE: Upon exit of this function the task is guaranteed to be
704  *	 released. In particular note that tk_release() will have
705  *	 been called, so your task memory may have been freed.
706  */
707 void rpc_execute(struct rpc_task *task)
708 {
709 	rpc_set_active(task);
710 	rpc_set_running(task);
711 	__rpc_execute(task);
712 }
713 
714 static void rpc_async_schedule(struct work_struct *work)
715 {
716 	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
717 }
718 
719 struct rpc_buffer {
720 	size_t	len;
721 	char	data[];
722 };
723 
724 /**
725  * rpc_malloc - allocate an RPC buffer
726  * @task: RPC task that will use this buffer
727  * @size: requested byte size
728  *
729  * To prevent rpciod from hanging, this allocator never sleeps,
730  * returning NULL if the request cannot be serviced immediately.
731  * The caller can arrange to sleep in a way that is safe for rpciod.
732  *
733  * Most requests are 'small' (under 2KiB) and can be serviced from a
734  * mempool, ensuring that NFS reads and writes can always proceed,
735  * and that there is good locality of reference for these buffers.
736  *
737  * In order to avoid memory starvation triggering more writebacks of
738  * NFS requests, we avoid using GFP_KERNEL.
739  */
740 void *rpc_malloc(struct rpc_task *task, size_t size)
741 {
742 	struct rpc_buffer *buf;
743 	gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
744 
745 	size += sizeof(struct rpc_buffer);
746 	if (size <= RPC_BUFFER_MAXSIZE)
747 		buf = mempool_alloc(rpc_buffer_mempool, gfp);
748 	else
749 		buf = kmalloc(size, gfp);
750 
751 	if (!buf)
752 		return NULL;
753 
754 	buf->len = size;
755 	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
756 			task->tk_pid, size, buf);
757 	return &buf->data;
758 }
759 EXPORT_SYMBOL_GPL(rpc_malloc);
760 
761 /**
762  * rpc_free - free buffer allocated via rpc_malloc
763  * @buffer: buffer to free
764  *
765  */
766 void rpc_free(void *buffer)
767 {
768 	size_t size;
769 	struct rpc_buffer *buf;
770 
771 	if (!buffer)
772 		return;
773 
774 	buf = container_of(buffer, struct rpc_buffer, data);
775 	size = buf->len;
776 
777 	dprintk("RPC:       freeing buffer of size %zu at %p\n",
778 			size, buf);
779 
780 	if (size <= RPC_BUFFER_MAXSIZE)
781 		mempool_free(buf, rpc_buffer_mempool);
782 	else
783 		kfree(buf);
784 }
785 EXPORT_SYMBOL_GPL(rpc_free);
786 
787 /*
788  * Creation and deletion of RPC task structures
789  */
790 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
791 {
792 	memset(task, 0, sizeof(*task));
793 	atomic_set(&task->tk_count, 1);
794 	task->tk_flags  = task_setup_data->flags;
795 	task->tk_ops = task_setup_data->callback_ops;
796 	task->tk_calldata = task_setup_data->callback_data;
797 	INIT_LIST_HEAD(&task->tk_task);
798 
799 	/* Initialize retry counters */
800 	task->tk_garb_retry = 2;
801 	task->tk_cred_retry = 2;
802 
803 	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
804 	task->tk_owner = current->tgid;
805 
806 	/* Initialize workqueue for async tasks */
807 	task->tk_workqueue = task_setup_data->workqueue;
808 
809 	task->tk_client = task_setup_data->rpc_client;
810 	if (task->tk_client != NULL) {
811 		kref_get(&task->tk_client->cl_kref);
812 		if (task->tk_client->cl_softrtry)
813 			task->tk_flags |= RPC_TASK_SOFT;
814 	}
815 
816 	if (task->tk_ops->rpc_call_prepare != NULL)
817 		task->tk_action = rpc_prepare_task;
818 
819 	if (task_setup_data->rpc_message != NULL) {
820 		task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc;
821 		task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp;
822 		task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp;
823 		/* Bind the user cred */
824 		rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags);
825 		if (task->tk_action == NULL)
826 			rpc_call_start(task);
827 	}
828 
829 	/* starting timestamp */
830 	task->tk_start = jiffies;
831 
832 	dprintk("RPC:       new task initialized, procpid %u\n",
833 				task_pid_nr(current));
834 }
835 
836 static struct rpc_task *
837 rpc_alloc_task(void)
838 {
839 	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
840 }
841 
842 /*
843  * Create a new task for the specified client.
844  */
845 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
846 {
847 	struct rpc_task	*task = setup_data->task;
848 	unsigned short flags = 0;
849 
850 	if (task == NULL) {
851 		task = rpc_alloc_task();
852 		if (task == NULL)
853 			goto out;
854 		flags = RPC_TASK_DYNAMIC;
855 	}
856 
857 	rpc_init_task(task, setup_data);
858 
859 	task->tk_flags |= flags;
860 	dprintk("RPC:       allocated task %p\n", task);
861 out:
862 	return task;
863 }
864 
865 static void rpc_free_task(struct rpc_task *task)
866 {
867 	const struct rpc_call_ops *tk_ops = task->tk_ops;
868 	void *calldata = task->tk_calldata;
869 
870 	if (task->tk_flags & RPC_TASK_DYNAMIC) {
871 		dprintk("RPC: %5u freeing task\n", task->tk_pid);
872 		mempool_free(task, rpc_task_mempool);
873 	}
874 	rpc_release_calldata(tk_ops, calldata);
875 }
876 
877 static void rpc_async_release(struct work_struct *work)
878 {
879 	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
880 }
881 
882 void rpc_put_task(struct rpc_task *task)
883 {
884 	if (!atomic_dec_and_test(&task->tk_count))
885 		return;
886 	/* Release resources */
887 	if (task->tk_rqstp)
888 		xprt_release(task);
889 	if (task->tk_msg.rpc_cred)
890 		rpcauth_unbindcred(task);
891 	if (task->tk_client) {
892 		rpc_release_client(task->tk_client);
893 		task->tk_client = NULL;
894 	}
895 	if (task->tk_workqueue != NULL) {
896 		INIT_WORK(&task->u.tk_work, rpc_async_release);
897 		queue_work(task->tk_workqueue, &task->u.tk_work);
898 	} else
899 		rpc_free_task(task);
900 }
901 EXPORT_SYMBOL_GPL(rpc_put_task);
902 
903 static void rpc_release_task(struct rpc_task *task)
904 {
905 #ifdef RPC_DEBUG
906 	BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
907 #endif
908 	dprintk("RPC: %5u release task\n", task->tk_pid);
909 
910 	if (!list_empty(&task->tk_task)) {
911 		struct rpc_clnt *clnt = task->tk_client;
912 		/* Remove from client task list */
913 		spin_lock(&clnt->cl_lock);
914 		list_del(&task->tk_task);
915 		spin_unlock(&clnt->cl_lock);
916 	}
917 	BUG_ON (RPC_IS_QUEUED(task));
918 
919 #ifdef RPC_DEBUG
920 	task->tk_magic = 0;
921 #endif
922 	/* Wake up anyone who is waiting for task completion */
923 	rpc_mark_complete_task(task);
924 
925 	rpc_put_task(task);
926 }
927 
928 /*
929  * Kill all tasks for the given client.
930  * XXX: kill their descendants as well?
931  */
932 void rpc_killall_tasks(struct rpc_clnt *clnt)
933 {
934 	struct rpc_task	*rovr;
935 
936 
937 	if (list_empty(&clnt->cl_tasks))
938 		return;
939 	dprintk("RPC:       killing all tasks for client %p\n", clnt);
940 	/*
941 	 * Spin lock all_tasks to prevent changes...
942 	 */
943 	spin_lock(&clnt->cl_lock);
944 	list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
945 		if (! RPC_IS_ACTIVATED(rovr))
946 			continue;
947 		if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
948 			rovr->tk_flags |= RPC_TASK_KILLED;
949 			rpc_exit(rovr, -EIO);
950 			rpc_wake_up_task(rovr);
951 		}
952 	}
953 	spin_unlock(&clnt->cl_lock);
954 }
955 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
956 
957 int rpciod_up(void)
958 {
959 	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
960 }
961 
962 void rpciod_down(void)
963 {
964 	module_put(THIS_MODULE);
965 }
966 
967 /*
968  * Start up the rpciod workqueue.
969  */
970 static int rpciod_start(void)
971 {
972 	struct workqueue_struct *wq;
973 
974 	/*
975 	 * Create the rpciod thread and wait for it to start.
976 	 */
977 	dprintk("RPC:       creating workqueue rpciod\n");
978 	wq = create_workqueue("rpciod");
979 	rpciod_workqueue = wq;
980 	return rpciod_workqueue != NULL;
981 }
982 
983 static void rpciod_stop(void)
984 {
985 	struct workqueue_struct *wq = NULL;
986 
987 	if (rpciod_workqueue == NULL)
988 		return;
989 	dprintk("RPC:       destroying workqueue rpciod\n");
990 
991 	wq = rpciod_workqueue;
992 	rpciod_workqueue = NULL;
993 	destroy_workqueue(wq);
994 }
995 
996 void
997 rpc_destroy_mempool(void)
998 {
999 	rpciod_stop();
1000 	if (rpc_buffer_mempool)
1001 		mempool_destroy(rpc_buffer_mempool);
1002 	if (rpc_task_mempool)
1003 		mempool_destroy(rpc_task_mempool);
1004 	if (rpc_task_slabp)
1005 		kmem_cache_destroy(rpc_task_slabp);
1006 	if (rpc_buffer_slabp)
1007 		kmem_cache_destroy(rpc_buffer_slabp);
1008 	rpc_destroy_wait_queue(&delay_queue);
1009 }
1010 
1011 int
1012 rpc_init_mempool(void)
1013 {
1014 	/*
1015 	 * The following is not strictly a mempool initialisation,
1016 	 * but there is no harm in doing it here
1017 	 */
1018 	rpc_init_wait_queue(&delay_queue, "delayq");
1019 	if (!rpciod_start())
1020 		goto err_nomem;
1021 
1022 	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1023 					     sizeof(struct rpc_task),
1024 					     0, SLAB_HWCACHE_ALIGN,
1025 					     NULL);
1026 	if (!rpc_task_slabp)
1027 		goto err_nomem;
1028 	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1029 					     RPC_BUFFER_MAXSIZE,
1030 					     0, SLAB_HWCACHE_ALIGN,
1031 					     NULL);
1032 	if (!rpc_buffer_slabp)
1033 		goto err_nomem;
1034 	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1035 						    rpc_task_slabp);
1036 	if (!rpc_task_mempool)
1037 		goto err_nomem;
1038 	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1039 						      rpc_buffer_slabp);
1040 	if (!rpc_buffer_mempool)
1041 		goto err_nomem;
1042 	return 0;
1043 err_nomem:
1044 	rpc_destroy_mempool();
1045 	return -ENOMEM;
1046 }
1047