xref: /openbmc/linux/net/sunrpc/sched.c (revision f66501dc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/net/sunrpc/sched.c
4  *
5  * Scheduling for synchronous and asynchronous RPC requests.
6  *
7  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8  *
9  * TCP NFS related read + write fixes
10  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11  */
12 
13 #include <linux/module.h>
14 
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/mempool.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22 #include <linux/freezer.h>
23 #include <linux/sched/mm.h>
24 
25 #include <linux/sunrpc/clnt.h>
26 
27 #include "sunrpc.h"
28 
29 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
30 #define RPCDBG_FACILITY		RPCDBG_SCHED
31 #endif
32 
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/sunrpc.h>
35 
36 /*
37  * RPC slabs and memory pools
38  */
39 #define RPC_BUFFER_MAXSIZE	(2048)
40 #define RPC_BUFFER_POOLSIZE	(8)
41 #define RPC_TASK_POOLSIZE	(8)
42 static struct kmem_cache	*rpc_task_slabp __read_mostly;
43 static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
44 static mempool_t	*rpc_task_mempool __read_mostly;
45 static mempool_t	*rpc_buffer_mempool __read_mostly;
46 
47 static void			rpc_async_schedule(struct work_struct *);
48 static void			 rpc_release_task(struct rpc_task *task);
49 static void __rpc_queue_timer_fn(struct timer_list *t);
50 
51 /*
52  * RPC tasks sit here while waiting for conditions to improve.
53  */
54 static struct rpc_wait_queue delay_queue;
55 
56 /*
57  * rpciod-related stuff
58  */
59 struct workqueue_struct *rpciod_workqueue __read_mostly;
60 struct workqueue_struct *xprtiod_workqueue __read_mostly;
61 
62 unsigned long
63 rpc_task_timeout(const struct rpc_task *task)
64 {
65 	unsigned long timeout = READ_ONCE(task->tk_timeout);
66 
67 	if (timeout != 0) {
68 		unsigned long now = jiffies;
69 		if (time_before(now, timeout))
70 			return timeout - now;
71 	}
72 	return 0;
73 }
74 EXPORT_SYMBOL_GPL(rpc_task_timeout);
75 
76 /*
77  * Disable the timer for a given RPC task. Should be called with
78  * queue->lock and bh_disabled in order to avoid races within
79  * rpc_run_timer().
80  */
81 static void
82 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
83 {
84 	if (list_empty(&task->u.tk_wait.timer_list))
85 		return;
86 	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
87 	task->tk_timeout = 0;
88 	list_del(&task->u.tk_wait.timer_list);
89 	if (list_empty(&queue->timer_list.list))
90 		del_timer(&queue->timer_list.timer);
91 }
92 
93 static void
94 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
95 {
96 	timer_reduce(&queue->timer_list.timer, expires);
97 }
98 
99 /*
100  * Set up a timer for the current task.
101  */
102 static void
103 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
104 		unsigned long timeout)
105 {
106 	dprintk("RPC: %5u setting alarm for %u ms\n",
107 		task->tk_pid, jiffies_to_msecs(timeout - jiffies));
108 
109 	task->tk_timeout = timeout;
110 	rpc_set_queue_timer(queue, timeout);
111 	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
112 }
113 
114 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
115 {
116 	if (queue->priority != priority) {
117 		queue->priority = priority;
118 		queue->nr = 1U << priority;
119 	}
120 }
121 
122 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
123 {
124 	rpc_set_waitqueue_priority(queue, queue->maxpriority);
125 }
126 
127 /*
128  * Add a request to a queue list
129  */
130 static void
131 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
132 {
133 	struct rpc_task *t;
134 
135 	list_for_each_entry(t, q, u.tk_wait.list) {
136 		if (t->tk_owner == task->tk_owner) {
137 			list_add_tail(&task->u.tk_wait.links,
138 					&t->u.tk_wait.links);
139 			/* Cache the queue head in task->u.tk_wait.list */
140 			task->u.tk_wait.list.next = q;
141 			task->u.tk_wait.list.prev = NULL;
142 			return;
143 		}
144 	}
145 	INIT_LIST_HEAD(&task->u.tk_wait.links);
146 	list_add_tail(&task->u.tk_wait.list, q);
147 }
148 
149 /*
150  * Remove request from a queue list
151  */
152 static void
153 __rpc_list_dequeue_task(struct rpc_task *task)
154 {
155 	struct list_head *q;
156 	struct rpc_task *t;
157 
158 	if (task->u.tk_wait.list.prev == NULL) {
159 		list_del(&task->u.tk_wait.links);
160 		return;
161 	}
162 	if (!list_empty(&task->u.tk_wait.links)) {
163 		t = list_first_entry(&task->u.tk_wait.links,
164 				struct rpc_task,
165 				u.tk_wait.links);
166 		/* Assume __rpc_list_enqueue_task() cached the queue head */
167 		q = t->u.tk_wait.list.next;
168 		list_add_tail(&t->u.tk_wait.list, q);
169 		list_del(&task->u.tk_wait.links);
170 	}
171 	list_del(&task->u.tk_wait.list);
172 }
173 
174 /*
175  * Add new request to a priority queue.
176  */
177 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
178 		struct rpc_task *task,
179 		unsigned char queue_priority)
180 {
181 	if (unlikely(queue_priority > queue->maxpriority))
182 		queue_priority = queue->maxpriority;
183 	__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
184 }
185 
186 /*
187  * Add new request to wait queue.
188  *
189  * Swapper tasks always get inserted at the head of the queue.
190  * This should avoid many nasty memory deadlocks and hopefully
191  * improve overall performance.
192  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
193  */
194 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
195 		struct rpc_task *task,
196 		unsigned char queue_priority)
197 {
198 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
199 	if (RPC_IS_QUEUED(task))
200 		return;
201 
202 	INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
203 	if (RPC_IS_PRIORITY(queue))
204 		__rpc_add_wait_queue_priority(queue, task, queue_priority);
205 	else if (RPC_IS_SWAPPER(task))
206 		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
207 	else
208 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
209 	task->tk_waitqueue = queue;
210 	queue->qlen++;
211 	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
212 	smp_wmb();
213 	rpc_set_queued(task);
214 
215 	dprintk("RPC: %5u added to queue %p \"%s\"\n",
216 			task->tk_pid, queue, rpc_qname(queue));
217 }
218 
219 /*
220  * Remove request from a priority queue.
221  */
222 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
223 {
224 	__rpc_list_dequeue_task(task);
225 }
226 
227 /*
228  * Remove request from queue.
229  * Note: must be called with spin lock held.
230  */
231 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
232 {
233 	__rpc_disable_timer(queue, task);
234 	if (RPC_IS_PRIORITY(queue))
235 		__rpc_remove_wait_queue_priority(task);
236 	else
237 		list_del(&task->u.tk_wait.list);
238 	queue->qlen--;
239 	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
240 			task->tk_pid, queue, rpc_qname(queue));
241 }
242 
243 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
244 {
245 	int i;
246 
247 	spin_lock_init(&queue->lock);
248 	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
249 		INIT_LIST_HEAD(&queue->tasks[i]);
250 	queue->maxpriority = nr_queues - 1;
251 	rpc_reset_waitqueue_priority(queue);
252 	queue->qlen = 0;
253 	timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0);
254 	INIT_LIST_HEAD(&queue->timer_list.list);
255 	rpc_assign_waitqueue_name(queue, qname);
256 }
257 
258 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
259 {
260 	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
261 }
262 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
263 
264 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
265 {
266 	__rpc_init_priority_wait_queue(queue, qname, 1);
267 }
268 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
269 
270 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
271 {
272 	del_timer_sync(&queue->timer_list.timer);
273 }
274 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
275 
276 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
277 {
278 	freezable_schedule_unsafe();
279 	if (signal_pending_state(mode, current))
280 		return -ERESTARTSYS;
281 	return 0;
282 }
283 
284 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
285 static void rpc_task_set_debuginfo(struct rpc_task *task)
286 {
287 	static atomic_t rpc_pid;
288 
289 	task->tk_pid = atomic_inc_return(&rpc_pid);
290 }
291 #else
292 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
293 {
294 }
295 #endif
296 
297 static void rpc_set_active(struct rpc_task *task)
298 {
299 	rpc_task_set_debuginfo(task);
300 	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
301 	trace_rpc_task_begin(task, NULL);
302 }
303 
304 /*
305  * Mark an RPC call as having completed by clearing the 'active' bit
306  * and then waking up all tasks that were sleeping.
307  */
308 static int rpc_complete_task(struct rpc_task *task)
309 {
310 	void *m = &task->tk_runstate;
311 	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
312 	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
313 	unsigned long flags;
314 	int ret;
315 
316 	trace_rpc_task_complete(task, NULL);
317 
318 	spin_lock_irqsave(&wq->lock, flags);
319 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
320 	ret = atomic_dec_and_test(&task->tk_count);
321 	if (waitqueue_active(wq))
322 		__wake_up_locked_key(wq, TASK_NORMAL, &k);
323 	spin_unlock_irqrestore(&wq->lock, flags);
324 	return ret;
325 }
326 
327 /*
328  * Allow callers to wait for completion of an RPC call
329  *
330  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
331  * to enforce taking of the wq->lock and hence avoid races with
332  * rpc_complete_task().
333  */
334 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
335 {
336 	if (action == NULL)
337 		action = rpc_wait_bit_killable;
338 	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
339 			action, TASK_KILLABLE);
340 }
341 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
342 
343 /*
344  * Make an RPC task runnable.
345  *
346  * Note: If the task is ASYNC, and is being made runnable after sitting on an
347  * rpc_wait_queue, this must be called with the queue spinlock held to protect
348  * the wait queue operation.
349  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
350  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
351  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
352  * the RPC_TASK_RUNNING flag.
353  */
354 static void rpc_make_runnable(struct workqueue_struct *wq,
355 		struct rpc_task *task)
356 {
357 	bool need_wakeup = !rpc_test_and_set_running(task);
358 
359 	rpc_clear_queued(task);
360 	if (!need_wakeup)
361 		return;
362 	if (RPC_IS_ASYNC(task)) {
363 		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
364 		queue_work(wq, &task->u.tk_work);
365 	} else
366 		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
367 }
368 
369 /*
370  * Prepare for sleeping on a wait queue.
371  * By always appending tasks to the list we ensure FIFO behavior.
372  * NB: An RPC task will only receive interrupt-driven events as long
373  * as it's on a wait queue.
374  */
375 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
376 		struct rpc_task *task,
377 		unsigned char queue_priority)
378 {
379 	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
380 			task->tk_pid, rpc_qname(q), jiffies);
381 
382 	trace_rpc_task_sleep(task, q);
383 
384 	__rpc_add_wait_queue(q, task, queue_priority);
385 
386 }
387 
388 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
389 		struct rpc_task *task, unsigned long timeout,
390 		unsigned char queue_priority)
391 {
392 	if (time_is_after_jiffies(timeout)) {
393 		__rpc_sleep_on_priority(q, task, queue_priority);
394 		__rpc_add_timer(q, task, timeout);
395 	} else
396 		task->tk_status = -ETIMEDOUT;
397 }
398 
399 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
400 {
401 	if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
402 		task->tk_callback = action;
403 }
404 
405 static bool rpc_sleep_check_activated(struct rpc_task *task)
406 {
407 	/* We shouldn't ever put an inactive task to sleep */
408 	if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
409 		task->tk_status = -EIO;
410 		rpc_put_task_async(task);
411 		return false;
412 	}
413 	return true;
414 }
415 
416 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
417 				rpc_action action, unsigned long timeout)
418 {
419 	if (!rpc_sleep_check_activated(task))
420 		return;
421 
422 	rpc_set_tk_callback(task, action);
423 
424 	/*
425 	 * Protect the queue operations.
426 	 */
427 	spin_lock_bh(&q->lock);
428 	__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
429 	spin_unlock_bh(&q->lock);
430 }
431 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
432 
433 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
434 				rpc_action action)
435 {
436 	if (!rpc_sleep_check_activated(task))
437 		return;
438 
439 	rpc_set_tk_callback(task, action);
440 
441 	WARN_ON_ONCE(task->tk_timeout != 0);
442 	/*
443 	 * Protect the queue operations.
444 	 */
445 	spin_lock_bh(&q->lock);
446 	__rpc_sleep_on_priority(q, task, task->tk_priority);
447 	spin_unlock_bh(&q->lock);
448 }
449 EXPORT_SYMBOL_GPL(rpc_sleep_on);
450 
451 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
452 		struct rpc_task *task, unsigned long timeout, int priority)
453 {
454 	if (!rpc_sleep_check_activated(task))
455 		return;
456 
457 	priority -= RPC_PRIORITY_LOW;
458 	/*
459 	 * Protect the queue operations.
460 	 */
461 	spin_lock_bh(&q->lock);
462 	__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
463 	spin_unlock_bh(&q->lock);
464 }
465 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
466 
467 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
468 		int priority)
469 {
470 	if (!rpc_sleep_check_activated(task))
471 		return;
472 
473 	WARN_ON_ONCE(task->tk_timeout != 0);
474 	priority -= RPC_PRIORITY_LOW;
475 	/*
476 	 * Protect the queue operations.
477 	 */
478 	spin_lock_bh(&q->lock);
479 	__rpc_sleep_on_priority(q, task, priority);
480 	spin_unlock_bh(&q->lock);
481 }
482 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
483 
484 /**
485  * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
486  * @wq: workqueue on which to run task
487  * @queue: wait queue
488  * @task: task to be woken up
489  *
490  * Caller must hold queue->lock, and have cleared the task queued flag.
491  */
492 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
493 		struct rpc_wait_queue *queue,
494 		struct rpc_task *task)
495 {
496 	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
497 			task->tk_pid, jiffies);
498 
499 	/* Has the task been executed yet? If not, we cannot wake it up! */
500 	if (!RPC_IS_ACTIVATED(task)) {
501 		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
502 		return;
503 	}
504 
505 	trace_rpc_task_wakeup(task, queue);
506 
507 	__rpc_remove_wait_queue(queue, task);
508 
509 	rpc_make_runnable(wq, task);
510 
511 	dprintk("RPC:       __rpc_wake_up_task done\n");
512 }
513 
514 /*
515  * Wake up a queued task while the queue lock is being held
516  */
517 static struct rpc_task *
518 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
519 		struct rpc_wait_queue *queue, struct rpc_task *task,
520 		bool (*action)(struct rpc_task *, void *), void *data)
521 {
522 	if (RPC_IS_QUEUED(task)) {
523 		smp_rmb();
524 		if (task->tk_waitqueue == queue) {
525 			if (action == NULL || action(task, data)) {
526 				__rpc_do_wake_up_task_on_wq(wq, queue, task);
527 				return task;
528 			}
529 		}
530 	}
531 	return NULL;
532 }
533 
534 static void
535 rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
536 		struct rpc_wait_queue *queue, struct rpc_task *task)
537 {
538 	rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL);
539 }
540 
541 /*
542  * Wake up a queued task while the queue lock is being held
543  */
544 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
545 {
546 	rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
547 }
548 
549 /*
550  * Wake up a task on a specific queue
551  */
552 void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
553 		struct rpc_wait_queue *queue,
554 		struct rpc_task *task)
555 {
556 	if (!RPC_IS_QUEUED(task))
557 		return;
558 	spin_lock_bh(&queue->lock);
559 	rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
560 	spin_unlock_bh(&queue->lock);
561 }
562 
563 /*
564  * Wake up a task on a specific queue
565  */
566 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
567 {
568 	if (!RPC_IS_QUEUED(task))
569 		return;
570 	spin_lock_bh(&queue->lock);
571 	rpc_wake_up_task_queue_locked(queue, task);
572 	spin_unlock_bh(&queue->lock);
573 }
574 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
575 
576 static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
577 {
578 	task->tk_status = *(int *)status;
579 	return true;
580 }
581 
582 static void
583 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
584 		struct rpc_task *task, int status)
585 {
586 	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
587 			task, rpc_task_action_set_status, &status);
588 }
589 
590 /**
591  * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
592  * @queue: pointer to rpc_wait_queue
593  * @task: pointer to rpc_task
594  * @status: integer error value
595  *
596  * If @task is queued on @queue, then it is woken up, and @task->tk_status is
597  * set to the value of @status.
598  */
599 void
600 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
601 		struct rpc_task *task, int status)
602 {
603 	if (!RPC_IS_QUEUED(task))
604 		return;
605 	spin_lock_bh(&queue->lock);
606 	rpc_wake_up_task_queue_set_status_locked(queue, task, status);
607 	spin_unlock_bh(&queue->lock);
608 }
609 
610 /*
611  * Wake up the next task on a priority queue.
612  */
613 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
614 {
615 	struct list_head *q;
616 	struct rpc_task *task;
617 
618 	/*
619 	 * Service a batch of tasks from a single owner.
620 	 */
621 	q = &queue->tasks[queue->priority];
622 	if (!list_empty(q) && --queue->nr) {
623 		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
624 		goto out;
625 	}
626 
627 	/*
628 	 * Service the next queue.
629 	 */
630 	do {
631 		if (q == &queue->tasks[0])
632 			q = &queue->tasks[queue->maxpriority];
633 		else
634 			q = q - 1;
635 		if (!list_empty(q)) {
636 			task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
637 			goto new_queue;
638 		}
639 	} while (q != &queue->tasks[queue->priority]);
640 
641 	rpc_reset_waitqueue_priority(queue);
642 	return NULL;
643 
644 new_queue:
645 	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
646 out:
647 	return task;
648 }
649 
650 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
651 {
652 	if (RPC_IS_PRIORITY(queue))
653 		return __rpc_find_next_queued_priority(queue);
654 	if (!list_empty(&queue->tasks[0]))
655 		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
656 	return NULL;
657 }
658 
659 /*
660  * Wake up the first task on the wait queue.
661  */
662 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
663 		struct rpc_wait_queue *queue,
664 		bool (*func)(struct rpc_task *, void *), void *data)
665 {
666 	struct rpc_task	*task = NULL;
667 
668 	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
669 			queue, rpc_qname(queue));
670 	spin_lock_bh(&queue->lock);
671 	task = __rpc_find_next_queued(queue);
672 	if (task != NULL)
673 		task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
674 				task, func, data);
675 	spin_unlock_bh(&queue->lock);
676 
677 	return task;
678 }
679 
680 /*
681  * Wake up the first task on the wait queue.
682  */
683 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
684 		bool (*func)(struct rpc_task *, void *), void *data)
685 {
686 	return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
687 }
688 EXPORT_SYMBOL_GPL(rpc_wake_up_first);
689 
690 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
691 {
692 	return true;
693 }
694 
695 /*
696  * Wake up the next task on the wait queue.
697 */
698 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
699 {
700 	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
701 }
702 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
703 
704 /**
705  * rpc_wake_up - wake up all rpc_tasks
706  * @queue: rpc_wait_queue on which the tasks are sleeping
707  *
708  * Grabs queue->lock
709  */
710 void rpc_wake_up(struct rpc_wait_queue *queue)
711 {
712 	struct list_head *head;
713 
714 	spin_lock_bh(&queue->lock);
715 	head = &queue->tasks[queue->maxpriority];
716 	for (;;) {
717 		while (!list_empty(head)) {
718 			struct rpc_task *task;
719 			task = list_first_entry(head,
720 					struct rpc_task,
721 					u.tk_wait.list);
722 			rpc_wake_up_task_queue_locked(queue, task);
723 		}
724 		if (head == &queue->tasks[0])
725 			break;
726 		head--;
727 	}
728 	spin_unlock_bh(&queue->lock);
729 }
730 EXPORT_SYMBOL_GPL(rpc_wake_up);
731 
732 /**
733  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
734  * @queue: rpc_wait_queue on which the tasks are sleeping
735  * @status: status value to set
736  *
737  * Grabs queue->lock
738  */
739 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
740 {
741 	struct list_head *head;
742 
743 	spin_lock_bh(&queue->lock);
744 	head = &queue->tasks[queue->maxpriority];
745 	for (;;) {
746 		while (!list_empty(head)) {
747 			struct rpc_task *task;
748 			task = list_first_entry(head,
749 					struct rpc_task,
750 					u.tk_wait.list);
751 			task->tk_status = status;
752 			rpc_wake_up_task_queue_locked(queue, task);
753 		}
754 		if (head == &queue->tasks[0])
755 			break;
756 		head--;
757 	}
758 	spin_unlock_bh(&queue->lock);
759 }
760 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
761 
762 static void __rpc_queue_timer_fn(struct timer_list *t)
763 {
764 	struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer);
765 	struct rpc_task *task, *n;
766 	unsigned long expires, now, timeo;
767 
768 	spin_lock(&queue->lock);
769 	expires = now = jiffies;
770 	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
771 		timeo = task->tk_timeout;
772 		if (time_after_eq(now, timeo)) {
773 			dprintk("RPC: %5u timeout\n", task->tk_pid);
774 			task->tk_status = -ETIMEDOUT;
775 			rpc_wake_up_task_queue_locked(queue, task);
776 			continue;
777 		}
778 		if (expires == now || time_after(expires, timeo))
779 			expires = timeo;
780 	}
781 	if (!list_empty(&queue->timer_list.list))
782 		rpc_set_queue_timer(queue, expires);
783 	spin_unlock(&queue->lock);
784 }
785 
786 static void __rpc_atrun(struct rpc_task *task)
787 {
788 	if (task->tk_status == -ETIMEDOUT)
789 		task->tk_status = 0;
790 }
791 
792 /*
793  * Run a task at a later time
794  */
795 void rpc_delay(struct rpc_task *task, unsigned long delay)
796 {
797 	rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
798 }
799 EXPORT_SYMBOL_GPL(rpc_delay);
800 
801 /*
802  * Helper to call task->tk_ops->rpc_call_prepare
803  */
804 void rpc_prepare_task(struct rpc_task *task)
805 {
806 	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
807 }
808 
809 static void
810 rpc_init_task_statistics(struct rpc_task *task)
811 {
812 	/* Initialize retry counters */
813 	task->tk_garb_retry = 2;
814 	task->tk_cred_retry = 2;
815 	task->tk_rebind_retry = 2;
816 
817 	/* starting timestamp */
818 	task->tk_start = ktime_get();
819 }
820 
821 static void
822 rpc_reset_task_statistics(struct rpc_task *task)
823 {
824 	task->tk_timeouts = 0;
825 	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
826 	rpc_init_task_statistics(task);
827 }
828 
829 /*
830  * Helper that calls task->tk_ops->rpc_call_done if it exists
831  */
832 void rpc_exit_task(struct rpc_task *task)
833 {
834 	task->tk_action = NULL;
835 	if (task->tk_ops->rpc_call_done != NULL) {
836 		task->tk_ops->rpc_call_done(task, task->tk_calldata);
837 		if (task->tk_action != NULL) {
838 			/* Always release the RPC slot and buffer memory */
839 			xprt_release(task);
840 			rpc_reset_task_statistics(task);
841 		}
842 	}
843 }
844 
845 void rpc_signal_task(struct rpc_task *task)
846 {
847 	struct rpc_wait_queue *queue;
848 
849 	if (!RPC_IS_ACTIVATED(task))
850 		return;
851 	set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
852 	smp_mb__after_atomic();
853 	queue = READ_ONCE(task->tk_waitqueue);
854 	if (queue)
855 		rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
856 }
857 
858 void rpc_exit(struct rpc_task *task, int status)
859 {
860 	task->tk_status = status;
861 	task->tk_action = rpc_exit_task;
862 	rpc_wake_up_queued_task(task->tk_waitqueue, task);
863 }
864 EXPORT_SYMBOL_GPL(rpc_exit);
865 
866 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
867 {
868 	if (ops->rpc_release != NULL)
869 		ops->rpc_release(calldata);
870 }
871 
872 /*
873  * This is the RPC `scheduler' (or rather, the finite state machine).
874  */
875 static void __rpc_execute(struct rpc_task *task)
876 {
877 	struct rpc_wait_queue *queue;
878 	int task_is_async = RPC_IS_ASYNC(task);
879 	int status = 0;
880 
881 	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
882 			task->tk_pid, task->tk_flags);
883 
884 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
885 	if (RPC_IS_QUEUED(task))
886 		return;
887 
888 	for (;;) {
889 		void (*do_action)(struct rpc_task *);
890 
891 		/*
892 		 * Perform the next FSM step or a pending callback.
893 		 *
894 		 * tk_action may be NULL if the task has been killed.
895 		 * In particular, note that rpc_killall_tasks may
896 		 * do this at any time, so beware when dereferencing.
897 		 */
898 		do_action = task->tk_action;
899 		if (task->tk_callback) {
900 			do_action = task->tk_callback;
901 			task->tk_callback = NULL;
902 		}
903 		if (!do_action)
904 			break;
905 		trace_rpc_task_run_action(task, do_action);
906 		do_action(task);
907 
908 		/*
909 		 * Lockless check for whether task is sleeping or not.
910 		 */
911 		if (!RPC_IS_QUEUED(task))
912 			continue;
913 
914 		/*
915 		 * Signalled tasks should exit rather than sleep.
916 		 */
917 		if (RPC_SIGNALLED(task))
918 			rpc_exit(task, -ERESTARTSYS);
919 
920 		/*
921 		 * The queue->lock protects against races with
922 		 * rpc_make_runnable().
923 		 *
924 		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
925 		 * rpc_task, rpc_make_runnable() can assign it to a
926 		 * different workqueue. We therefore cannot assume that the
927 		 * rpc_task pointer may still be dereferenced.
928 		 */
929 		queue = task->tk_waitqueue;
930 		spin_lock_bh(&queue->lock);
931 		if (!RPC_IS_QUEUED(task)) {
932 			spin_unlock_bh(&queue->lock);
933 			continue;
934 		}
935 		rpc_clear_running(task);
936 		spin_unlock_bh(&queue->lock);
937 		if (task_is_async)
938 			return;
939 
940 		/* sync task: sleep here */
941 		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
942 		status = out_of_line_wait_on_bit(&task->tk_runstate,
943 				RPC_TASK_QUEUED, rpc_wait_bit_killable,
944 				TASK_KILLABLE);
945 		if (status < 0) {
946 			/*
947 			 * When a sync task receives a signal, it exits with
948 			 * -ERESTARTSYS. In order to catch any callbacks that
949 			 * clean up after sleeping on some queue, we don't
950 			 * break the loop here, but go around once more.
951 			 */
952 			dprintk("RPC: %5u got signal\n", task->tk_pid);
953 			set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
954 			rpc_exit(task, -ERESTARTSYS);
955 		}
956 		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
957 	}
958 
959 	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
960 			task->tk_status);
961 	/* Release all resources associated with the task */
962 	rpc_release_task(task);
963 }
964 
965 /*
966  * User-visible entry point to the scheduler.
967  *
968  * This may be called recursively if e.g. an async NFS task updates
969  * the attributes and finds that dirty pages must be flushed.
970  * NOTE: Upon exit of this function the task is guaranteed to be
971  *	 released. In particular note that tk_release() will have
972  *	 been called, so your task memory may have been freed.
973  */
974 void rpc_execute(struct rpc_task *task)
975 {
976 	bool is_async = RPC_IS_ASYNC(task);
977 
978 	rpc_set_active(task);
979 	rpc_make_runnable(rpciod_workqueue, task);
980 	if (!is_async)
981 		__rpc_execute(task);
982 }
983 
984 static void rpc_async_schedule(struct work_struct *work)
985 {
986 	unsigned int pflags = memalloc_nofs_save();
987 
988 	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
989 	memalloc_nofs_restore(pflags);
990 }
991 
992 /**
993  * rpc_malloc - allocate RPC buffer resources
994  * @task: RPC task
995  *
996  * A single memory region is allocated, which is split between the
997  * RPC call and RPC reply that this task is being used for. When
998  * this RPC is retired, the memory is released by calling rpc_free.
999  *
1000  * To prevent rpciod from hanging, this allocator never sleeps,
1001  * returning -ENOMEM and suppressing warning if the request cannot
1002  * be serviced immediately. The caller can arrange to sleep in a
1003  * way that is safe for rpciod.
1004  *
1005  * Most requests are 'small' (under 2KiB) and can be serviced from a
1006  * mempool, ensuring that NFS reads and writes can always proceed,
1007  * and that there is good locality of reference for these buffers.
1008  */
1009 int rpc_malloc(struct rpc_task *task)
1010 {
1011 	struct rpc_rqst *rqst = task->tk_rqstp;
1012 	size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1013 	struct rpc_buffer *buf;
1014 	gfp_t gfp = GFP_NOFS;
1015 
1016 	if (RPC_IS_SWAPPER(task))
1017 		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
1018 
1019 	size += sizeof(struct rpc_buffer);
1020 	if (size <= RPC_BUFFER_MAXSIZE)
1021 		buf = mempool_alloc(rpc_buffer_mempool, gfp);
1022 	else
1023 		buf = kmalloc(size, gfp);
1024 
1025 	if (!buf)
1026 		return -ENOMEM;
1027 
1028 	buf->len = size;
1029 	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
1030 			task->tk_pid, size, buf);
1031 	rqst->rq_buffer = buf->data;
1032 	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1033 	return 0;
1034 }
1035 EXPORT_SYMBOL_GPL(rpc_malloc);
1036 
1037 /**
1038  * rpc_free - free RPC buffer resources allocated via rpc_malloc
1039  * @task: RPC task
1040  *
1041  */
1042 void rpc_free(struct rpc_task *task)
1043 {
1044 	void *buffer = task->tk_rqstp->rq_buffer;
1045 	size_t size;
1046 	struct rpc_buffer *buf;
1047 
1048 	buf = container_of(buffer, struct rpc_buffer, data);
1049 	size = buf->len;
1050 
1051 	dprintk("RPC:       freeing buffer of size %zu at %p\n",
1052 			size, buf);
1053 
1054 	if (size <= RPC_BUFFER_MAXSIZE)
1055 		mempool_free(buf, rpc_buffer_mempool);
1056 	else
1057 		kfree(buf);
1058 }
1059 EXPORT_SYMBOL_GPL(rpc_free);
1060 
1061 /*
1062  * Creation and deletion of RPC task structures
1063  */
1064 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1065 {
1066 	memset(task, 0, sizeof(*task));
1067 	atomic_set(&task->tk_count, 1);
1068 	task->tk_flags  = task_setup_data->flags;
1069 	task->tk_ops = task_setup_data->callback_ops;
1070 	task->tk_calldata = task_setup_data->callback_data;
1071 	INIT_LIST_HEAD(&task->tk_task);
1072 
1073 	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1074 	task->tk_owner = current->tgid;
1075 
1076 	/* Initialize workqueue for async tasks */
1077 	task->tk_workqueue = task_setup_data->workqueue;
1078 
1079 	task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);
1080 
1081 	task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1082 
1083 	if (task->tk_ops->rpc_call_prepare != NULL)
1084 		task->tk_action = rpc_prepare_task;
1085 
1086 	rpc_init_task_statistics(task);
1087 
1088 	dprintk("RPC:       new task initialized, procpid %u\n",
1089 				task_pid_nr(current));
1090 }
1091 
1092 static struct rpc_task *
1093 rpc_alloc_task(void)
1094 {
1095 	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1096 }
1097 
1098 /*
1099  * Create a new task for the specified client.
1100  */
1101 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1102 {
1103 	struct rpc_task	*task = setup_data->task;
1104 	unsigned short flags = 0;
1105 
1106 	if (task == NULL) {
1107 		task = rpc_alloc_task();
1108 		flags = RPC_TASK_DYNAMIC;
1109 	}
1110 
1111 	rpc_init_task(task, setup_data);
1112 	task->tk_flags |= flags;
1113 	dprintk("RPC:       allocated task %p\n", task);
1114 	return task;
1115 }
1116 
1117 /*
1118  * rpc_free_task - release rpc task and perform cleanups
1119  *
1120  * Note that we free up the rpc_task _after_ rpc_release_calldata()
1121  * in order to work around a workqueue dependency issue.
1122  *
1123  * Tejun Heo states:
1124  * "Workqueue currently considers two work items to be the same if they're
1125  * on the same address and won't execute them concurrently - ie. it
1126  * makes a work item which is queued again while being executed wait
1127  * for the previous execution to complete.
1128  *
1129  * If a work function frees the work item, and then waits for an event
1130  * which should be performed by another work item and *that* work item
1131  * recycles the freed work item, it can create a false dependency loop.
1132  * There really is no reliable way to detect this short of verifying
1133  * every memory free."
1134  *
1135  */
1136 static void rpc_free_task(struct rpc_task *task)
1137 {
1138 	unsigned short tk_flags = task->tk_flags;
1139 
1140 	put_rpccred(task->tk_op_cred);
1141 	rpc_release_calldata(task->tk_ops, task->tk_calldata);
1142 
1143 	if (tk_flags & RPC_TASK_DYNAMIC) {
1144 		dprintk("RPC: %5u freeing task\n", task->tk_pid);
1145 		mempool_free(task, rpc_task_mempool);
1146 	}
1147 }
1148 
1149 static void rpc_async_release(struct work_struct *work)
1150 {
1151 	unsigned int pflags = memalloc_nofs_save();
1152 
1153 	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1154 	memalloc_nofs_restore(pflags);
1155 }
1156 
1157 static void rpc_release_resources_task(struct rpc_task *task)
1158 {
1159 	xprt_release(task);
1160 	if (task->tk_msg.rpc_cred) {
1161 		put_cred(task->tk_msg.rpc_cred);
1162 		task->tk_msg.rpc_cred = NULL;
1163 	}
1164 	rpc_task_release_client(task);
1165 }
1166 
1167 static void rpc_final_put_task(struct rpc_task *task,
1168 		struct workqueue_struct *q)
1169 {
1170 	if (q != NULL) {
1171 		INIT_WORK(&task->u.tk_work, rpc_async_release);
1172 		queue_work(q, &task->u.tk_work);
1173 	} else
1174 		rpc_free_task(task);
1175 }
1176 
1177 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1178 {
1179 	if (atomic_dec_and_test(&task->tk_count)) {
1180 		rpc_release_resources_task(task);
1181 		rpc_final_put_task(task, q);
1182 	}
1183 }
1184 
1185 void rpc_put_task(struct rpc_task *task)
1186 {
1187 	rpc_do_put_task(task, NULL);
1188 }
1189 EXPORT_SYMBOL_GPL(rpc_put_task);
1190 
1191 void rpc_put_task_async(struct rpc_task *task)
1192 {
1193 	rpc_do_put_task(task, task->tk_workqueue);
1194 }
1195 EXPORT_SYMBOL_GPL(rpc_put_task_async);
1196 
1197 static void rpc_release_task(struct rpc_task *task)
1198 {
1199 	dprintk("RPC: %5u release task\n", task->tk_pid);
1200 
1201 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
1202 
1203 	rpc_release_resources_task(task);
1204 
1205 	/*
1206 	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1207 	 * so it should be safe to use task->tk_count as a test for whether
1208 	 * or not any other processes still hold references to our rpc_task.
1209 	 */
1210 	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1211 		/* Wake up anyone who may be waiting for task completion */
1212 		if (!rpc_complete_task(task))
1213 			return;
1214 	} else {
1215 		if (!atomic_dec_and_test(&task->tk_count))
1216 			return;
1217 	}
1218 	rpc_final_put_task(task, task->tk_workqueue);
1219 }
1220 
1221 int rpciod_up(void)
1222 {
1223 	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1224 }
1225 
1226 void rpciod_down(void)
1227 {
1228 	module_put(THIS_MODULE);
1229 }
1230 
1231 /*
1232  * Start up the rpciod workqueue.
1233  */
1234 static int rpciod_start(void)
1235 {
1236 	struct workqueue_struct *wq;
1237 
1238 	/*
1239 	 * Create the rpciod thread and wait for it to start.
1240 	 */
1241 	dprintk("RPC:       creating workqueue rpciod\n");
1242 	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1243 	if (!wq)
1244 		goto out_failed;
1245 	rpciod_workqueue = wq;
1246 	/* Note: highpri because network receive is latency sensitive */
1247 	wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1248 	if (!wq)
1249 		goto free_rpciod;
1250 	xprtiod_workqueue = wq;
1251 	return 1;
1252 free_rpciod:
1253 	wq = rpciod_workqueue;
1254 	rpciod_workqueue = NULL;
1255 	destroy_workqueue(wq);
1256 out_failed:
1257 	return 0;
1258 }
1259 
1260 static void rpciod_stop(void)
1261 {
1262 	struct workqueue_struct *wq = NULL;
1263 
1264 	if (rpciod_workqueue == NULL)
1265 		return;
1266 	dprintk("RPC:       destroying workqueue rpciod\n");
1267 
1268 	wq = rpciod_workqueue;
1269 	rpciod_workqueue = NULL;
1270 	destroy_workqueue(wq);
1271 	wq = xprtiod_workqueue;
1272 	xprtiod_workqueue = NULL;
1273 	destroy_workqueue(wq);
1274 }
1275 
1276 void
1277 rpc_destroy_mempool(void)
1278 {
1279 	rpciod_stop();
1280 	mempool_destroy(rpc_buffer_mempool);
1281 	mempool_destroy(rpc_task_mempool);
1282 	kmem_cache_destroy(rpc_task_slabp);
1283 	kmem_cache_destroy(rpc_buffer_slabp);
1284 	rpc_destroy_wait_queue(&delay_queue);
1285 }
1286 
1287 int
1288 rpc_init_mempool(void)
1289 {
1290 	/*
1291 	 * The following is not strictly a mempool initialisation,
1292 	 * but there is no harm in doing it here
1293 	 */
1294 	rpc_init_wait_queue(&delay_queue, "delayq");
1295 	if (!rpciod_start())
1296 		goto err_nomem;
1297 
1298 	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1299 					     sizeof(struct rpc_task),
1300 					     0, SLAB_HWCACHE_ALIGN,
1301 					     NULL);
1302 	if (!rpc_task_slabp)
1303 		goto err_nomem;
1304 	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1305 					     RPC_BUFFER_MAXSIZE,
1306 					     0, SLAB_HWCACHE_ALIGN,
1307 					     NULL);
1308 	if (!rpc_buffer_slabp)
1309 		goto err_nomem;
1310 	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1311 						    rpc_task_slabp);
1312 	if (!rpc_task_mempool)
1313 		goto err_nomem;
1314 	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1315 						      rpc_buffer_slabp);
1316 	if (!rpc_buffer_mempool)
1317 		goto err_nomem;
1318 	return 0;
1319 err_nomem:
1320 	rpc_destroy_mempool();
1321 	return -ENOMEM;
1322 }
1323