xref: /openbmc/linux/net/sunrpc/sched.c (revision e639c869)
1 /*
2  * linux/net/sunrpc/sched.c
3  *
4  * Scheduling for synchronous and asynchronous RPC requests.
5  *
6  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7  *
8  * TCP NFS related read + write fixes
9  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  */
11 
12 #include <linux/module.h>
13 
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/mutex.h>
21 #include <linux/freezer.h>
22 
23 #include <linux/sunrpc/clnt.h>
24 
25 #include "sunrpc.h"
26 
27 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
28 #define RPCDBG_FACILITY		RPCDBG_SCHED
29 #endif
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/sunrpc.h>
33 
34 /*
35  * RPC slabs and memory pools
36  */
37 #define RPC_BUFFER_MAXSIZE	(2048)
38 #define RPC_BUFFER_POOLSIZE	(8)
39 #define RPC_TASK_POOLSIZE	(8)
40 static struct kmem_cache	*rpc_task_slabp __read_mostly;
41 static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
42 static mempool_t	*rpc_task_mempool __read_mostly;
43 static mempool_t	*rpc_buffer_mempool __read_mostly;
44 
45 static void			rpc_async_schedule(struct work_struct *);
46 static void			 rpc_release_task(struct rpc_task *task);
47 static void __rpc_queue_timer_fn(struct timer_list *t);
48 
49 /*
50  * RPC tasks sit here while waiting for conditions to improve.
51  */
52 static struct rpc_wait_queue delay_queue;
53 
54 /*
55  * rpciod-related stuff
56  */
57 struct workqueue_struct *rpciod_workqueue __read_mostly;
58 struct workqueue_struct *xprtiod_workqueue __read_mostly;
59 
60 /*
61  * Disable the timer for a given RPC task. Should be called with
62  * queue->lock and bh_disabled in order to avoid races within
63  * rpc_run_timer().
64  */
65 static void
66 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
67 {
68 	if (task->tk_timeout == 0)
69 		return;
70 	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
71 	task->tk_timeout = 0;
72 	list_del(&task->u.tk_wait.timer_list);
73 	if (list_empty(&queue->timer_list.list))
74 		del_timer(&queue->timer_list.timer);
75 }
76 
77 static void
78 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
79 {
80 	queue->timer_list.expires = expires;
81 	mod_timer(&queue->timer_list.timer, expires);
82 }
83 
84 /*
85  * Set up a timer for the current task.
86  */
87 static void
88 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
89 {
90 	if (!task->tk_timeout)
91 		return;
92 
93 	dprintk("RPC: %5u setting alarm for %u ms\n",
94 		task->tk_pid, jiffies_to_msecs(task->tk_timeout));
95 
96 	task->u.tk_wait.expires = jiffies + task->tk_timeout;
97 	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
98 		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
99 	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
100 }
101 
102 static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
103 {
104 	struct list_head *q = &queue->tasks[queue->priority];
105 	struct rpc_task *task;
106 
107 	if (!list_empty(q)) {
108 		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
109 		if (task->tk_owner == queue->owner)
110 			list_move_tail(&task->u.tk_wait.list, q);
111 	}
112 }
113 
114 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
115 {
116 	if (queue->priority != priority) {
117 		/* Fairness: rotate the list when changing priority */
118 		rpc_rotate_queue_owner(queue);
119 		queue->priority = priority;
120 	}
121 }
122 
123 static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
124 {
125 	queue->owner = pid;
126 	queue->nr = RPC_BATCH_COUNT;
127 }
128 
129 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
130 {
131 	rpc_set_waitqueue_priority(queue, queue->maxpriority);
132 	rpc_set_waitqueue_owner(queue, 0);
133 }
134 
135 /*
136  * Add new request to a priority queue.
137  */
138 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
139 		struct rpc_task *task,
140 		unsigned char queue_priority)
141 {
142 	struct list_head *q;
143 	struct rpc_task *t;
144 
145 	INIT_LIST_HEAD(&task->u.tk_wait.links);
146 	if (unlikely(queue_priority > queue->maxpriority))
147 		queue_priority = queue->maxpriority;
148 	if (queue_priority > queue->priority)
149 		rpc_set_waitqueue_priority(queue, queue_priority);
150 	q = &queue->tasks[queue_priority];
151 	list_for_each_entry(t, q, u.tk_wait.list) {
152 		if (t->tk_owner == task->tk_owner) {
153 			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
154 			return;
155 		}
156 	}
157 	list_add_tail(&task->u.tk_wait.list, q);
158 }
159 
160 /*
161  * Add new request to wait queue.
162  *
163  * Swapper tasks always get inserted at the head of the queue.
164  * This should avoid many nasty memory deadlocks and hopefully
165  * improve overall performance.
166  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
167  */
168 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
169 		struct rpc_task *task,
170 		unsigned char queue_priority)
171 {
172 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
173 	if (RPC_IS_QUEUED(task))
174 		return;
175 
176 	if (RPC_IS_PRIORITY(queue))
177 		__rpc_add_wait_queue_priority(queue, task, queue_priority);
178 	else if (RPC_IS_SWAPPER(task))
179 		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
180 	else
181 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
182 	task->tk_waitqueue = queue;
183 	queue->qlen++;
184 	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
185 	smp_wmb();
186 	rpc_set_queued(task);
187 
188 	dprintk("RPC: %5u added to queue %p \"%s\"\n",
189 			task->tk_pid, queue, rpc_qname(queue));
190 }
191 
192 /*
193  * Remove request from a priority queue.
194  */
195 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
196 {
197 	struct rpc_task *t;
198 
199 	if (!list_empty(&task->u.tk_wait.links)) {
200 		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
201 		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
202 		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
203 	}
204 }
205 
206 /*
207  * Remove request from queue.
208  * Note: must be called with spin lock held.
209  */
210 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
211 {
212 	__rpc_disable_timer(queue, task);
213 	if (RPC_IS_PRIORITY(queue))
214 		__rpc_remove_wait_queue_priority(task);
215 	list_del(&task->u.tk_wait.list);
216 	queue->qlen--;
217 	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
218 			task->tk_pid, queue, rpc_qname(queue));
219 }
220 
221 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
222 {
223 	int i;
224 
225 	spin_lock_init(&queue->lock);
226 	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
227 		INIT_LIST_HEAD(&queue->tasks[i]);
228 	queue->maxpriority = nr_queues - 1;
229 	rpc_reset_waitqueue_priority(queue);
230 	queue->qlen = 0;
231 	timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0);
232 	INIT_LIST_HEAD(&queue->timer_list.list);
233 	rpc_assign_waitqueue_name(queue, qname);
234 }
235 
236 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
237 {
238 	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
239 }
240 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
241 
242 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
243 {
244 	__rpc_init_priority_wait_queue(queue, qname, 1);
245 }
246 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
247 
248 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
249 {
250 	del_timer_sync(&queue->timer_list.timer);
251 }
252 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
253 
254 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
255 {
256 	freezable_schedule_unsafe();
257 	if (signal_pending_state(mode, current))
258 		return -ERESTARTSYS;
259 	return 0;
260 }
261 
262 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
263 static void rpc_task_set_debuginfo(struct rpc_task *task)
264 {
265 	static atomic_t rpc_pid;
266 
267 	task->tk_pid = atomic_inc_return(&rpc_pid);
268 }
269 #else
270 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
271 {
272 }
273 #endif
274 
275 static void rpc_set_active(struct rpc_task *task)
276 {
277 	rpc_task_set_debuginfo(task);
278 	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
279 	trace_rpc_task_begin(task->tk_client, task, NULL);
280 }
281 
282 /*
283  * Mark an RPC call as having completed by clearing the 'active' bit
284  * and then waking up all tasks that were sleeping.
285  */
286 static int rpc_complete_task(struct rpc_task *task)
287 {
288 	void *m = &task->tk_runstate;
289 	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
290 	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
291 	unsigned long flags;
292 	int ret;
293 
294 	trace_rpc_task_complete(task->tk_client, task, NULL);
295 
296 	spin_lock_irqsave(&wq->lock, flags);
297 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
298 	ret = atomic_dec_and_test(&task->tk_count);
299 	if (waitqueue_active(wq))
300 		__wake_up_locked_key(wq, TASK_NORMAL, &k);
301 	spin_unlock_irqrestore(&wq->lock, flags);
302 	return ret;
303 }
304 
305 /*
306  * Allow callers to wait for completion of an RPC call
307  *
308  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
309  * to enforce taking of the wq->lock and hence avoid races with
310  * rpc_complete_task().
311  */
312 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
313 {
314 	if (action == NULL)
315 		action = rpc_wait_bit_killable;
316 	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
317 			action, TASK_KILLABLE);
318 }
319 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
320 
321 /*
322  * Make an RPC task runnable.
323  *
324  * Note: If the task is ASYNC, and is being made runnable after sitting on an
325  * rpc_wait_queue, this must be called with the queue spinlock held to protect
326  * the wait queue operation.
327  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
328  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
329  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
330  * the RPC_TASK_RUNNING flag.
331  */
332 static void rpc_make_runnable(struct workqueue_struct *wq,
333 		struct rpc_task *task)
334 {
335 	bool need_wakeup = !rpc_test_and_set_running(task);
336 
337 	rpc_clear_queued(task);
338 	if (!need_wakeup)
339 		return;
340 	if (RPC_IS_ASYNC(task)) {
341 		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
342 		queue_work(wq, &task->u.tk_work);
343 	} else
344 		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
345 }
346 
347 /*
348  * Prepare for sleeping on a wait queue.
349  * By always appending tasks to the list we ensure FIFO behavior.
350  * NB: An RPC task will only receive interrupt-driven events as long
351  * as it's on a wait queue.
352  */
353 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
354 		struct rpc_task *task,
355 		rpc_action action,
356 		unsigned char queue_priority)
357 {
358 	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
359 			task->tk_pid, rpc_qname(q), jiffies);
360 
361 	trace_rpc_task_sleep(task->tk_client, task, q);
362 
363 	__rpc_add_wait_queue(q, task, queue_priority);
364 
365 	WARN_ON_ONCE(task->tk_callback != NULL);
366 	task->tk_callback = action;
367 	__rpc_add_timer(q, task);
368 }
369 
370 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
371 				rpc_action action)
372 {
373 	/* We shouldn't ever put an inactive task to sleep */
374 	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
375 	if (!RPC_IS_ACTIVATED(task)) {
376 		task->tk_status = -EIO;
377 		rpc_put_task_async(task);
378 		return;
379 	}
380 
381 	/*
382 	 * Protect the queue operations.
383 	 */
384 	spin_lock_bh(&q->lock);
385 	__rpc_sleep_on_priority(q, task, action, task->tk_priority);
386 	spin_unlock_bh(&q->lock);
387 }
388 EXPORT_SYMBOL_GPL(rpc_sleep_on);
389 
390 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
391 		rpc_action action, int priority)
392 {
393 	/* We shouldn't ever put an inactive task to sleep */
394 	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
395 	if (!RPC_IS_ACTIVATED(task)) {
396 		task->tk_status = -EIO;
397 		rpc_put_task_async(task);
398 		return;
399 	}
400 
401 	/*
402 	 * Protect the queue operations.
403 	 */
404 	spin_lock_bh(&q->lock);
405 	__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
406 	spin_unlock_bh(&q->lock);
407 }
408 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
409 
410 /**
411  * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
412  * @wq: workqueue on which to run task
413  * @queue: wait queue
414  * @task: task to be woken up
415  *
416  * Caller must hold queue->lock, and have cleared the task queued flag.
417  */
418 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
419 		struct rpc_wait_queue *queue,
420 		struct rpc_task *task)
421 {
422 	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
423 			task->tk_pid, jiffies);
424 
425 	/* Has the task been executed yet? If not, we cannot wake it up! */
426 	if (!RPC_IS_ACTIVATED(task)) {
427 		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
428 		return;
429 	}
430 
431 	trace_rpc_task_wakeup(task->tk_client, task, queue);
432 
433 	__rpc_remove_wait_queue(queue, task);
434 
435 	rpc_make_runnable(wq, task);
436 
437 	dprintk("RPC:       __rpc_wake_up_task done\n");
438 }
439 
440 /*
441  * Wake up a queued task while the queue lock is being held
442  */
443 static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
444 		struct rpc_wait_queue *queue, struct rpc_task *task)
445 {
446 	if (RPC_IS_QUEUED(task)) {
447 		smp_rmb();
448 		if (task->tk_waitqueue == queue)
449 			__rpc_do_wake_up_task_on_wq(wq, queue, task);
450 	}
451 }
452 
453 /*
454  * Wake up a queued task while the queue lock is being held
455  */
456 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
457 {
458 	rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
459 }
460 
461 /*
462  * Wake up a task on a specific queue
463  */
464 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
465 {
466 	spin_lock_bh(&queue->lock);
467 	rpc_wake_up_task_queue_locked(queue, task);
468 	spin_unlock_bh(&queue->lock);
469 }
470 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
471 
472 /*
473  * Wake up the next task on a priority queue.
474  */
475 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
476 {
477 	struct list_head *q;
478 	struct rpc_task *task;
479 
480 	/*
481 	 * Service a batch of tasks from a single owner.
482 	 */
483 	q = &queue->tasks[queue->priority];
484 	if (!list_empty(q)) {
485 		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
486 		if (queue->owner == task->tk_owner) {
487 			if (--queue->nr)
488 				goto out;
489 			list_move_tail(&task->u.tk_wait.list, q);
490 		}
491 		/*
492 		 * Check if we need to switch queues.
493 		 */
494 		goto new_owner;
495 	}
496 
497 	/*
498 	 * Service the next queue.
499 	 */
500 	do {
501 		if (q == &queue->tasks[0])
502 			q = &queue->tasks[queue->maxpriority];
503 		else
504 			q = q - 1;
505 		if (!list_empty(q)) {
506 			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
507 			goto new_queue;
508 		}
509 	} while (q != &queue->tasks[queue->priority]);
510 
511 	rpc_reset_waitqueue_priority(queue);
512 	return NULL;
513 
514 new_queue:
515 	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
516 new_owner:
517 	rpc_set_waitqueue_owner(queue, task->tk_owner);
518 out:
519 	return task;
520 }
521 
522 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
523 {
524 	if (RPC_IS_PRIORITY(queue))
525 		return __rpc_find_next_queued_priority(queue);
526 	if (!list_empty(&queue->tasks[0]))
527 		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
528 	return NULL;
529 }
530 
531 /*
532  * Wake up the first task on the wait queue.
533  */
534 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
535 		struct rpc_wait_queue *queue,
536 		bool (*func)(struct rpc_task *, void *), void *data)
537 {
538 	struct rpc_task	*task = NULL;
539 
540 	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
541 			queue, rpc_qname(queue));
542 	spin_lock_bh(&queue->lock);
543 	task = __rpc_find_next_queued(queue);
544 	if (task != NULL) {
545 		if (func(task, data))
546 			rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
547 		else
548 			task = NULL;
549 	}
550 	spin_unlock_bh(&queue->lock);
551 
552 	return task;
553 }
554 
555 /*
556  * Wake up the first task on the wait queue.
557  */
558 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
559 		bool (*func)(struct rpc_task *, void *), void *data)
560 {
561 	return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
562 }
563 EXPORT_SYMBOL_GPL(rpc_wake_up_first);
564 
565 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
566 {
567 	return true;
568 }
569 
570 /*
571  * Wake up the next task on the wait queue.
572 */
573 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
574 {
575 	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
576 }
577 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
578 
579 /**
580  * rpc_wake_up - wake up all rpc_tasks
581  * @queue: rpc_wait_queue on which the tasks are sleeping
582  *
583  * Grabs queue->lock
584  */
585 void rpc_wake_up(struct rpc_wait_queue *queue)
586 {
587 	struct list_head *head;
588 
589 	spin_lock_bh(&queue->lock);
590 	head = &queue->tasks[queue->maxpriority];
591 	for (;;) {
592 		while (!list_empty(head)) {
593 			struct rpc_task *task;
594 			task = list_first_entry(head,
595 					struct rpc_task,
596 					u.tk_wait.list);
597 			rpc_wake_up_task_queue_locked(queue, task);
598 		}
599 		if (head == &queue->tasks[0])
600 			break;
601 		head--;
602 	}
603 	spin_unlock_bh(&queue->lock);
604 }
605 EXPORT_SYMBOL_GPL(rpc_wake_up);
606 
607 /**
608  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
609  * @queue: rpc_wait_queue on which the tasks are sleeping
610  * @status: status value to set
611  *
612  * Grabs queue->lock
613  */
614 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
615 {
616 	struct list_head *head;
617 
618 	spin_lock_bh(&queue->lock);
619 	head = &queue->tasks[queue->maxpriority];
620 	for (;;) {
621 		while (!list_empty(head)) {
622 			struct rpc_task *task;
623 			task = list_first_entry(head,
624 					struct rpc_task,
625 					u.tk_wait.list);
626 			task->tk_status = status;
627 			rpc_wake_up_task_queue_locked(queue, task);
628 		}
629 		if (head == &queue->tasks[0])
630 			break;
631 		head--;
632 	}
633 	spin_unlock_bh(&queue->lock);
634 }
635 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
636 
637 static void __rpc_queue_timer_fn(struct timer_list *t)
638 {
639 	struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer);
640 	struct rpc_task *task, *n;
641 	unsigned long expires, now, timeo;
642 
643 	spin_lock(&queue->lock);
644 	expires = now = jiffies;
645 	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
646 		timeo = task->u.tk_wait.expires;
647 		if (time_after_eq(now, timeo)) {
648 			dprintk("RPC: %5u timeout\n", task->tk_pid);
649 			task->tk_status = -ETIMEDOUT;
650 			rpc_wake_up_task_queue_locked(queue, task);
651 			continue;
652 		}
653 		if (expires == now || time_after(expires, timeo))
654 			expires = timeo;
655 	}
656 	if (!list_empty(&queue->timer_list.list))
657 		rpc_set_queue_timer(queue, expires);
658 	spin_unlock(&queue->lock);
659 }
660 
661 static void __rpc_atrun(struct rpc_task *task)
662 {
663 	if (task->tk_status == -ETIMEDOUT)
664 		task->tk_status = 0;
665 }
666 
667 /*
668  * Run a task at a later time
669  */
670 void rpc_delay(struct rpc_task *task, unsigned long delay)
671 {
672 	task->tk_timeout = delay;
673 	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
674 }
675 EXPORT_SYMBOL_GPL(rpc_delay);
676 
677 /*
678  * Helper to call task->tk_ops->rpc_call_prepare
679  */
680 void rpc_prepare_task(struct rpc_task *task)
681 {
682 	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
683 }
684 
685 static void
686 rpc_init_task_statistics(struct rpc_task *task)
687 {
688 	/* Initialize retry counters */
689 	task->tk_garb_retry = 2;
690 	task->tk_cred_retry = 2;
691 	task->tk_rebind_retry = 2;
692 
693 	/* starting timestamp */
694 	task->tk_start = ktime_get();
695 }
696 
697 static void
698 rpc_reset_task_statistics(struct rpc_task *task)
699 {
700 	task->tk_timeouts = 0;
701 	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
702 
703 	rpc_init_task_statistics(task);
704 }
705 
706 /*
707  * Helper that calls task->tk_ops->rpc_call_done if it exists
708  */
709 void rpc_exit_task(struct rpc_task *task)
710 {
711 	task->tk_action = NULL;
712 	if (task->tk_ops->rpc_call_done != NULL) {
713 		task->tk_ops->rpc_call_done(task, task->tk_calldata);
714 		if (task->tk_action != NULL) {
715 			WARN_ON(RPC_ASSASSINATED(task));
716 			/* Always release the RPC slot and buffer memory */
717 			xprt_release(task);
718 			rpc_reset_task_statistics(task);
719 		}
720 	}
721 }
722 
723 void rpc_exit(struct rpc_task *task, int status)
724 {
725 	task->tk_status = status;
726 	task->tk_action = rpc_exit_task;
727 	if (RPC_IS_QUEUED(task))
728 		rpc_wake_up_queued_task(task->tk_waitqueue, task);
729 }
730 EXPORT_SYMBOL_GPL(rpc_exit);
731 
732 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
733 {
734 	if (ops->rpc_release != NULL)
735 		ops->rpc_release(calldata);
736 }
737 
738 /*
739  * This is the RPC `scheduler' (or rather, the finite state machine).
740  */
741 static void __rpc_execute(struct rpc_task *task)
742 {
743 	struct rpc_wait_queue *queue;
744 	int task_is_async = RPC_IS_ASYNC(task);
745 	int status = 0;
746 
747 	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
748 			task->tk_pid, task->tk_flags);
749 
750 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
751 	if (RPC_IS_QUEUED(task))
752 		return;
753 
754 	for (;;) {
755 		void (*do_action)(struct rpc_task *);
756 
757 		/*
758 		 * Execute any pending callback first.
759 		 */
760 		do_action = task->tk_callback;
761 		task->tk_callback = NULL;
762 		if (do_action == NULL) {
763 			/*
764 			 * Perform the next FSM step.
765 			 * tk_action may be NULL if the task has been killed.
766 			 * In particular, note that rpc_killall_tasks may
767 			 * do this at any time, so beware when dereferencing.
768 			 */
769 			do_action = task->tk_action;
770 			if (do_action == NULL)
771 				break;
772 		}
773 		trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
774 		do_action(task);
775 
776 		/*
777 		 * Lockless check for whether task is sleeping or not.
778 		 */
779 		if (!RPC_IS_QUEUED(task))
780 			continue;
781 		/*
782 		 * The queue->lock protects against races with
783 		 * rpc_make_runnable().
784 		 *
785 		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
786 		 * rpc_task, rpc_make_runnable() can assign it to a
787 		 * different workqueue. We therefore cannot assume that the
788 		 * rpc_task pointer may still be dereferenced.
789 		 */
790 		queue = task->tk_waitqueue;
791 		spin_lock_bh(&queue->lock);
792 		if (!RPC_IS_QUEUED(task)) {
793 			spin_unlock_bh(&queue->lock);
794 			continue;
795 		}
796 		rpc_clear_running(task);
797 		spin_unlock_bh(&queue->lock);
798 		if (task_is_async)
799 			return;
800 
801 		/* sync task: sleep here */
802 		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
803 		status = out_of_line_wait_on_bit(&task->tk_runstate,
804 				RPC_TASK_QUEUED, rpc_wait_bit_killable,
805 				TASK_KILLABLE);
806 		if (status == -ERESTARTSYS) {
807 			/*
808 			 * When a sync task receives a signal, it exits with
809 			 * -ERESTARTSYS. In order to catch any callbacks that
810 			 * clean up after sleeping on some queue, we don't
811 			 * break the loop here, but go around once more.
812 			 */
813 			dprintk("RPC: %5u got signal\n", task->tk_pid);
814 			task->tk_flags |= RPC_TASK_KILLED;
815 			rpc_exit(task, -ERESTARTSYS);
816 		}
817 		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
818 	}
819 
820 	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
821 			task->tk_status);
822 	/* Release all resources associated with the task */
823 	rpc_release_task(task);
824 }
825 
826 /*
827  * User-visible entry point to the scheduler.
828  *
829  * This may be called recursively if e.g. an async NFS task updates
830  * the attributes and finds that dirty pages must be flushed.
831  * NOTE: Upon exit of this function the task is guaranteed to be
832  *	 released. In particular note that tk_release() will have
833  *	 been called, so your task memory may have been freed.
834  */
835 void rpc_execute(struct rpc_task *task)
836 {
837 	bool is_async = RPC_IS_ASYNC(task);
838 
839 	rpc_set_active(task);
840 	rpc_make_runnable(rpciod_workqueue, task);
841 	if (!is_async)
842 		__rpc_execute(task);
843 }
844 
845 static void rpc_async_schedule(struct work_struct *work)
846 {
847 	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
848 }
849 
850 /**
851  * rpc_malloc - allocate RPC buffer resources
852  * @task: RPC task
853  *
854  * A single memory region is allocated, which is split between the
855  * RPC call and RPC reply that this task is being used for. When
856  * this RPC is retired, the memory is released by calling rpc_free.
857  *
858  * To prevent rpciod from hanging, this allocator never sleeps,
859  * returning -ENOMEM and suppressing warning if the request cannot
860  * be serviced immediately. The caller can arrange to sleep in a
861  * way that is safe for rpciod.
862  *
863  * Most requests are 'small' (under 2KiB) and can be serviced from a
864  * mempool, ensuring that NFS reads and writes can always proceed,
865  * and that there is good locality of reference for these buffers.
866  *
867  * In order to avoid memory starvation triggering more writebacks of
868  * NFS requests, we avoid using GFP_KERNEL.
869  */
870 int rpc_malloc(struct rpc_task *task)
871 {
872 	struct rpc_rqst *rqst = task->tk_rqstp;
873 	size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
874 	struct rpc_buffer *buf;
875 	gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
876 
877 	if (RPC_IS_SWAPPER(task))
878 		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
879 
880 	size += sizeof(struct rpc_buffer);
881 	if (size <= RPC_BUFFER_MAXSIZE)
882 		buf = mempool_alloc(rpc_buffer_mempool, gfp);
883 	else
884 		buf = kmalloc(size, gfp);
885 
886 	if (!buf)
887 		return -ENOMEM;
888 
889 	buf->len = size;
890 	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
891 			task->tk_pid, size, buf);
892 	rqst->rq_buffer = buf->data;
893 	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
894 	return 0;
895 }
896 EXPORT_SYMBOL_GPL(rpc_malloc);
897 
898 /**
899  * rpc_free - free RPC buffer resources allocated via rpc_malloc
900  * @task: RPC task
901  *
902  */
903 void rpc_free(struct rpc_task *task)
904 {
905 	void *buffer = task->tk_rqstp->rq_buffer;
906 	size_t size;
907 	struct rpc_buffer *buf;
908 
909 	buf = container_of(buffer, struct rpc_buffer, data);
910 	size = buf->len;
911 
912 	dprintk("RPC:       freeing buffer of size %zu at %p\n",
913 			size, buf);
914 
915 	if (size <= RPC_BUFFER_MAXSIZE)
916 		mempool_free(buf, rpc_buffer_mempool);
917 	else
918 		kfree(buf);
919 }
920 EXPORT_SYMBOL_GPL(rpc_free);
921 
922 /*
923  * Creation and deletion of RPC task structures
924  */
925 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
926 {
927 	memset(task, 0, sizeof(*task));
928 	atomic_set(&task->tk_count, 1);
929 	task->tk_flags  = task_setup_data->flags;
930 	task->tk_ops = task_setup_data->callback_ops;
931 	task->tk_calldata = task_setup_data->callback_data;
932 	INIT_LIST_HEAD(&task->tk_task);
933 
934 	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
935 	task->tk_owner = current->tgid;
936 
937 	/* Initialize workqueue for async tasks */
938 	task->tk_workqueue = task_setup_data->workqueue;
939 
940 	task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);
941 
942 	if (task->tk_ops->rpc_call_prepare != NULL)
943 		task->tk_action = rpc_prepare_task;
944 
945 	rpc_init_task_statistics(task);
946 
947 	dprintk("RPC:       new task initialized, procpid %u\n",
948 				task_pid_nr(current));
949 }
950 
951 static struct rpc_task *
952 rpc_alloc_task(void)
953 {
954 	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
955 }
956 
957 /*
958  * Create a new task for the specified client.
959  */
960 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
961 {
962 	struct rpc_task	*task = setup_data->task;
963 	unsigned short flags = 0;
964 
965 	if (task == NULL) {
966 		task = rpc_alloc_task();
967 		flags = RPC_TASK_DYNAMIC;
968 	}
969 
970 	rpc_init_task(task, setup_data);
971 	task->tk_flags |= flags;
972 	dprintk("RPC:       allocated task %p\n", task);
973 	return task;
974 }
975 
976 /*
977  * rpc_free_task - release rpc task and perform cleanups
978  *
979  * Note that we free up the rpc_task _after_ rpc_release_calldata()
980  * in order to work around a workqueue dependency issue.
981  *
982  * Tejun Heo states:
983  * "Workqueue currently considers two work items to be the same if they're
984  * on the same address and won't execute them concurrently - ie. it
985  * makes a work item which is queued again while being executed wait
986  * for the previous execution to complete.
987  *
988  * If a work function frees the work item, and then waits for an event
989  * which should be performed by another work item and *that* work item
990  * recycles the freed work item, it can create a false dependency loop.
991  * There really is no reliable way to detect this short of verifying
992  * every memory free."
993  *
994  */
995 static void rpc_free_task(struct rpc_task *task)
996 {
997 	unsigned short tk_flags = task->tk_flags;
998 
999 	rpc_release_calldata(task->tk_ops, task->tk_calldata);
1000 
1001 	if (tk_flags & RPC_TASK_DYNAMIC) {
1002 		dprintk("RPC: %5u freeing task\n", task->tk_pid);
1003 		mempool_free(task, rpc_task_mempool);
1004 	}
1005 }
1006 
1007 static void rpc_async_release(struct work_struct *work)
1008 {
1009 	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1010 }
1011 
1012 static void rpc_release_resources_task(struct rpc_task *task)
1013 {
1014 	xprt_release(task);
1015 	if (task->tk_msg.rpc_cred) {
1016 		put_rpccred(task->tk_msg.rpc_cred);
1017 		task->tk_msg.rpc_cred = NULL;
1018 	}
1019 	rpc_task_release_client(task);
1020 }
1021 
1022 static void rpc_final_put_task(struct rpc_task *task,
1023 		struct workqueue_struct *q)
1024 {
1025 	if (q != NULL) {
1026 		INIT_WORK(&task->u.tk_work, rpc_async_release);
1027 		queue_work(q, &task->u.tk_work);
1028 	} else
1029 		rpc_free_task(task);
1030 }
1031 
1032 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1033 {
1034 	if (atomic_dec_and_test(&task->tk_count)) {
1035 		rpc_release_resources_task(task);
1036 		rpc_final_put_task(task, q);
1037 	}
1038 }
1039 
1040 void rpc_put_task(struct rpc_task *task)
1041 {
1042 	rpc_do_put_task(task, NULL);
1043 }
1044 EXPORT_SYMBOL_GPL(rpc_put_task);
1045 
1046 void rpc_put_task_async(struct rpc_task *task)
1047 {
1048 	rpc_do_put_task(task, task->tk_workqueue);
1049 }
1050 EXPORT_SYMBOL_GPL(rpc_put_task_async);
1051 
1052 static void rpc_release_task(struct rpc_task *task)
1053 {
1054 	dprintk("RPC: %5u release task\n", task->tk_pid);
1055 
1056 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
1057 
1058 	rpc_release_resources_task(task);
1059 
1060 	/*
1061 	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1062 	 * so it should be safe to use task->tk_count as a test for whether
1063 	 * or not any other processes still hold references to our rpc_task.
1064 	 */
1065 	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1066 		/* Wake up anyone who may be waiting for task completion */
1067 		if (!rpc_complete_task(task))
1068 			return;
1069 	} else {
1070 		if (!atomic_dec_and_test(&task->tk_count))
1071 			return;
1072 	}
1073 	rpc_final_put_task(task, task->tk_workqueue);
1074 }
1075 
1076 int rpciod_up(void)
1077 {
1078 	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1079 }
1080 
1081 void rpciod_down(void)
1082 {
1083 	module_put(THIS_MODULE);
1084 }
1085 
1086 /*
1087  * Start up the rpciod workqueue.
1088  */
1089 static int rpciod_start(void)
1090 {
1091 	struct workqueue_struct *wq;
1092 
1093 	/*
1094 	 * Create the rpciod thread and wait for it to start.
1095 	 */
1096 	dprintk("RPC:       creating workqueue rpciod\n");
1097 	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
1098 	if (!wq)
1099 		goto out_failed;
1100 	rpciod_workqueue = wq;
1101 	/* Note: highpri because network receive is latency sensitive */
1102 	wq = alloc_workqueue("xprtiod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1103 	if (!wq)
1104 		goto free_rpciod;
1105 	xprtiod_workqueue = wq;
1106 	return 1;
1107 free_rpciod:
1108 	wq = rpciod_workqueue;
1109 	rpciod_workqueue = NULL;
1110 	destroy_workqueue(wq);
1111 out_failed:
1112 	return 0;
1113 }
1114 
1115 static void rpciod_stop(void)
1116 {
1117 	struct workqueue_struct *wq = NULL;
1118 
1119 	if (rpciod_workqueue == NULL)
1120 		return;
1121 	dprintk("RPC:       destroying workqueue rpciod\n");
1122 
1123 	wq = rpciod_workqueue;
1124 	rpciod_workqueue = NULL;
1125 	destroy_workqueue(wq);
1126 	wq = xprtiod_workqueue;
1127 	xprtiod_workqueue = NULL;
1128 	destroy_workqueue(wq);
1129 }
1130 
1131 void
1132 rpc_destroy_mempool(void)
1133 {
1134 	rpciod_stop();
1135 	mempool_destroy(rpc_buffer_mempool);
1136 	mempool_destroy(rpc_task_mempool);
1137 	kmem_cache_destroy(rpc_task_slabp);
1138 	kmem_cache_destroy(rpc_buffer_slabp);
1139 	rpc_destroy_wait_queue(&delay_queue);
1140 }
1141 
1142 int
1143 rpc_init_mempool(void)
1144 {
1145 	/*
1146 	 * The following is not strictly a mempool initialisation,
1147 	 * but there is no harm in doing it here
1148 	 */
1149 	rpc_init_wait_queue(&delay_queue, "delayq");
1150 	if (!rpciod_start())
1151 		goto err_nomem;
1152 
1153 	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1154 					     sizeof(struct rpc_task),
1155 					     0, SLAB_HWCACHE_ALIGN,
1156 					     NULL);
1157 	if (!rpc_task_slabp)
1158 		goto err_nomem;
1159 	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1160 					     RPC_BUFFER_MAXSIZE,
1161 					     0, SLAB_HWCACHE_ALIGN,
1162 					     NULL);
1163 	if (!rpc_buffer_slabp)
1164 		goto err_nomem;
1165 	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1166 						    rpc_task_slabp);
1167 	if (!rpc_task_mempool)
1168 		goto err_nomem;
1169 	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1170 						      rpc_buffer_slabp);
1171 	if (!rpc_buffer_mempool)
1172 		goto err_nomem;
1173 	return 0;
1174 err_nomem:
1175 	rpc_destroy_mempool();
1176 	return -ENOMEM;
1177 }
1178