Lines Matching refs:task
45 static void rpc_release_task(struct rpc_task *task);
68 bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status) in rpc_task_set_rpc_status() argument
70 if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0) in rpc_task_set_rpc_status()
76 rpc_task_timeout(const struct rpc_task *task) in rpc_task_timeout() argument
78 unsigned long timeout = READ_ONCE(task->tk_timeout); in rpc_task_timeout()
95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
97 if (list_empty(&task->u.tk_wait.timer_list)) in __rpc_disable_timer()
99 task->tk_timeout = 0; in __rpc_disable_timer()
100 list_del(&task->u.tk_wait.timer_list); in __rpc_disable_timer()
121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument
124 task->tk_timeout = timeout; in __rpc_add_timer()
127 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer()
147 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) in __rpc_list_enqueue_task() argument
152 if (t->tk_owner == task->tk_owner) { in __rpc_list_enqueue_task()
153 list_add_tail(&task->u.tk_wait.links, in __rpc_list_enqueue_task()
156 task->u.tk_wait.list.next = q; in __rpc_list_enqueue_task()
157 task->u.tk_wait.list.prev = NULL; in __rpc_list_enqueue_task()
161 INIT_LIST_HEAD(&task->u.tk_wait.links); in __rpc_list_enqueue_task()
162 list_add_tail(&task->u.tk_wait.list, q); in __rpc_list_enqueue_task()
169 __rpc_list_dequeue_task(struct rpc_task *task) in __rpc_list_dequeue_task() argument
174 if (task->u.tk_wait.list.prev == NULL) { in __rpc_list_dequeue_task()
175 list_del(&task->u.tk_wait.links); in __rpc_list_dequeue_task()
178 if (!list_empty(&task->u.tk_wait.links)) { in __rpc_list_dequeue_task()
179 t = list_first_entry(&task->u.tk_wait.links, in __rpc_list_dequeue_task()
185 list_del(&task->u.tk_wait.links); in __rpc_list_dequeue_task()
187 list_del(&task->u.tk_wait.list); in __rpc_list_dequeue_task()
194 struct rpc_task *task, in __rpc_add_wait_queue_priority() argument
199 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); in __rpc_add_wait_queue_priority()
206 struct rpc_task *task, in __rpc_add_wait_queue() argument
209 INIT_LIST_HEAD(&task->u.tk_wait.timer_list); in __rpc_add_wait_queue()
211 __rpc_add_wait_queue_priority(queue, task, queue_priority); in __rpc_add_wait_queue()
213 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); in __rpc_add_wait_queue()
214 task->tk_waitqueue = queue; in __rpc_add_wait_queue()
218 rpc_set_queued(task); in __rpc_add_wait_queue()
224 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) in __rpc_remove_wait_queue_priority() argument
226 __rpc_list_dequeue_task(task); in __rpc_remove_wait_queue_priority()
233 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_remove_wait_queue() argument
235 __rpc_disable_timer(queue, task); in __rpc_remove_wait_queue()
237 __rpc_remove_wait_queue_priority(task); in __rpc_remove_wait_queue()
239 list_del(&task->u.tk_wait.list); in __rpc_remove_wait_queue()
286 static void rpc_task_set_debuginfo(struct rpc_task *task) in rpc_task_set_debuginfo() argument
288 struct rpc_clnt *clnt = task->tk_client; in rpc_task_set_debuginfo()
294 task->tk_pid = atomic_inc_return(&rpc_pid); in rpc_task_set_debuginfo()
298 task->tk_pid = atomic_inc_return(&clnt->cl_pid); in rpc_task_set_debuginfo()
301 static inline void rpc_task_set_debuginfo(struct rpc_task *task) in rpc_task_set_debuginfo() argument
306 static void rpc_set_active(struct rpc_task *task) in rpc_set_active() argument
308 rpc_task_set_debuginfo(task); in rpc_set_active()
309 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); in rpc_set_active()
310 trace_rpc_task_begin(task, NULL); in rpc_set_active()
317 static int rpc_complete_task(struct rpc_task *task) in rpc_complete_task() argument
319 void *m = &task->tk_runstate; in rpc_complete_task()
325 trace_rpc_task_complete(task, NULL); in rpc_complete_task()
328 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); in rpc_complete_task()
329 ret = atomic_dec_and_test(&task->tk_count); in rpc_complete_task()
343 int rpc_wait_for_completion_task(struct rpc_task *task) in rpc_wait_for_completion_task() argument
345 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, in rpc_wait_for_completion_task()
362 struct rpc_task *task) in rpc_make_runnable() argument
364 bool need_wakeup = !rpc_test_and_set_running(task); in rpc_make_runnable()
366 rpc_clear_queued(task); in rpc_make_runnable()
369 if (RPC_IS_ASYNC(task)) { in rpc_make_runnable()
370 INIT_WORK(&task->u.tk_work, rpc_async_schedule); in rpc_make_runnable()
371 queue_work(wq, &task->u.tk_work); in rpc_make_runnable()
374 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); in rpc_make_runnable()
385 struct rpc_task *task, in __rpc_do_sleep_on_priority() argument
388 trace_rpc_task_sleep(task, q); in __rpc_do_sleep_on_priority()
390 __rpc_add_wait_queue(q, task, queue_priority); in __rpc_do_sleep_on_priority()
394 struct rpc_task *task, in __rpc_sleep_on_priority() argument
397 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) in __rpc_sleep_on_priority()
399 __rpc_do_sleep_on_priority(q, task, queue_priority); in __rpc_sleep_on_priority()
403 struct rpc_task *task, unsigned long timeout, in __rpc_sleep_on_priority_timeout() argument
406 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) in __rpc_sleep_on_priority_timeout()
409 __rpc_do_sleep_on_priority(q, task, queue_priority); in __rpc_sleep_on_priority_timeout()
410 __rpc_add_timer(q, task, timeout); in __rpc_sleep_on_priority_timeout()
412 task->tk_status = -ETIMEDOUT; in __rpc_sleep_on_priority_timeout()
415 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) in rpc_set_tk_callback() argument
417 if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) in rpc_set_tk_callback()
418 task->tk_callback = action; in rpc_set_tk_callback()
421 static bool rpc_sleep_check_activated(struct rpc_task *task) in rpc_sleep_check_activated() argument
424 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { in rpc_sleep_check_activated()
425 task->tk_status = -EIO; in rpc_sleep_check_activated()
426 rpc_put_task_async(task); in rpc_sleep_check_activated()
432 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, in rpc_sleep_on_timeout() argument
435 if (!rpc_sleep_check_activated(task)) in rpc_sleep_on_timeout()
438 rpc_set_tk_callback(task, action); in rpc_sleep_on_timeout()
444 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); in rpc_sleep_on_timeout()
449 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, in rpc_sleep_on() argument
452 if (!rpc_sleep_check_activated(task)) in rpc_sleep_on()
455 rpc_set_tk_callback(task, action); in rpc_sleep_on()
457 WARN_ON_ONCE(task->tk_timeout != 0); in rpc_sleep_on()
462 __rpc_sleep_on_priority(q, task, task->tk_priority); in rpc_sleep_on()
468 struct rpc_task *task, unsigned long timeout, int priority) in rpc_sleep_on_priority_timeout() argument
470 if (!rpc_sleep_check_activated(task)) in rpc_sleep_on_priority_timeout()
478 __rpc_sleep_on_priority_timeout(q, task, timeout, priority); in rpc_sleep_on_priority_timeout()
483 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, in rpc_sleep_on_priority() argument
486 if (!rpc_sleep_check_activated(task)) in rpc_sleep_on_priority()
489 WARN_ON_ONCE(task->tk_timeout != 0); in rpc_sleep_on_priority()
495 __rpc_sleep_on_priority(q, task, priority); in rpc_sleep_on_priority()
510 struct rpc_task *task) in __rpc_do_wake_up_task_on_wq() argument
513 if (!RPC_IS_ACTIVATED(task)) { in __rpc_do_wake_up_task_on_wq()
514 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); in __rpc_do_wake_up_task_on_wq()
518 trace_rpc_task_wakeup(task, queue); in __rpc_do_wake_up_task_on_wq()
520 __rpc_remove_wait_queue(queue, task); in __rpc_do_wake_up_task_on_wq()
522 rpc_make_runnable(wq, task); in __rpc_do_wake_up_task_on_wq()
530 struct rpc_wait_queue *queue, struct rpc_task *task, in rpc_wake_up_task_on_wq_queue_action_locked() argument
533 if (RPC_IS_QUEUED(task)) { in rpc_wake_up_task_on_wq_queue_action_locked()
535 if (task->tk_waitqueue == queue) { in rpc_wake_up_task_on_wq_queue_action_locked()
536 if (action == NULL || action(task, data)) { in rpc_wake_up_task_on_wq_queue_action_locked()
537 __rpc_do_wake_up_task_on_wq(wq, queue, task); in rpc_wake_up_task_on_wq_queue_action_locked()
538 return task; in rpc_wake_up_task_on_wq_queue_action_locked()
549 struct rpc_task *task) in rpc_wake_up_task_queue_locked() argument
552 task, NULL, NULL); in rpc_wake_up_task_queue_locked()
558 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) in rpc_wake_up_queued_task() argument
560 if (!RPC_IS_QUEUED(task)) in rpc_wake_up_queued_task()
563 rpc_wake_up_task_queue_locked(queue, task); in rpc_wake_up_queued_task()
568 static bool rpc_task_action_set_status(struct rpc_task *task, void *status) in rpc_task_action_set_status() argument
570 task->tk_status = *(int *)status; in rpc_task_action_set_status()
576 struct rpc_task *task, int status) in rpc_wake_up_task_queue_set_status_locked() argument
579 task, rpc_task_action_set_status, &status); in rpc_wake_up_task_queue_set_status_locked()
593 struct rpc_task *task, int status) in rpc_wake_up_queued_task_set_status() argument
595 if (!RPC_IS_QUEUED(task)) in rpc_wake_up_queued_task_set_status()
598 rpc_wake_up_task_queue_set_status_locked(queue, task, status); in rpc_wake_up_queued_task_set_status()
608 struct rpc_task *task; in __rpc_find_next_queued_priority() local
615 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); in __rpc_find_next_queued_priority()
625 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); in __rpc_find_next_queued_priority()
638 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); in __rpc_find_next_queued_priority()
649 return task; in __rpc_find_next_queued_priority()
668 struct rpc_task *task = NULL; in rpc_wake_up_first_on_wq() local
671 task = __rpc_find_next_queued(queue); in rpc_wake_up_first_on_wq()
672 if (task != NULL) in rpc_wake_up_first_on_wq()
673 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, in rpc_wake_up_first_on_wq()
674 task, func, data); in rpc_wake_up_first_on_wq()
677 return task; in rpc_wake_up_first_on_wq()
690 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) in rpc_wake_up_next_func() argument
711 struct rpc_task *task; in rpc_wake_up_locked() local
714 task = __rpc_find_next_queued(queue); in rpc_wake_up_locked()
715 if (task == NULL) in rpc_wake_up_locked()
717 rpc_wake_up_task_queue_locked(queue, task); in rpc_wake_up_locked()
742 struct rpc_task *task; in rpc_wake_up_status_locked() local
745 task = __rpc_find_next_queued(queue); in rpc_wake_up_status_locked()
746 if (task == NULL) in rpc_wake_up_status_locked()
748 rpc_wake_up_task_queue_set_status_locked(queue, task, status); in rpc_wake_up_status_locked()
772 struct rpc_task *task, *n; in __rpc_queue_timer_fn() local
777 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { in __rpc_queue_timer_fn()
778 timeo = task->tk_timeout; in __rpc_queue_timer_fn()
780 trace_rpc_task_timeout(task, task->tk_action); in __rpc_queue_timer_fn()
781 task->tk_status = -ETIMEDOUT; in __rpc_queue_timer_fn()
782 rpc_wake_up_task_queue_locked(queue, task); in __rpc_queue_timer_fn()
793 static void __rpc_atrun(struct rpc_task *task) in __rpc_atrun() argument
795 if (task->tk_status == -ETIMEDOUT) in __rpc_atrun()
796 task->tk_status = 0; in __rpc_atrun()
802 void rpc_delay(struct rpc_task *task, unsigned long delay) in rpc_delay() argument
804 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); in rpc_delay()
811 void rpc_prepare_task(struct rpc_task *task) in rpc_prepare_task() argument
813 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); in rpc_prepare_task()
817 rpc_init_task_statistics(struct rpc_task *task) in rpc_init_task_statistics() argument
820 task->tk_garb_retry = 2; in rpc_init_task_statistics()
821 task->tk_cred_retry = 2; in rpc_init_task_statistics()
824 task->tk_start = ktime_get(); in rpc_init_task_statistics()
828 rpc_reset_task_statistics(struct rpc_task *task) in rpc_reset_task_statistics() argument
830 task->tk_timeouts = 0; in rpc_reset_task_statistics()
831 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); in rpc_reset_task_statistics()
832 rpc_init_task_statistics(task); in rpc_reset_task_statistics()
838 void rpc_exit_task(struct rpc_task *task) in rpc_exit_task() argument
840 trace_rpc_task_end(task, task->tk_action); in rpc_exit_task()
841 task->tk_action = NULL; in rpc_exit_task()
842 if (task->tk_ops->rpc_count_stats) in rpc_exit_task()
843 task->tk_ops->rpc_count_stats(task, task->tk_calldata); in rpc_exit_task()
844 else if (task->tk_client) in rpc_exit_task()
845 rpc_count_iostats(task, task->tk_client->cl_metrics); in rpc_exit_task()
846 if (task->tk_ops->rpc_call_done != NULL) { in rpc_exit_task()
847 trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done); in rpc_exit_task()
848 task->tk_ops->rpc_call_done(task, task->tk_calldata); in rpc_exit_task()
849 if (task->tk_action != NULL) { in rpc_exit_task()
851 xprt_release(task); in rpc_exit_task()
852 rpc_reset_task_statistics(task); in rpc_exit_task()
857 void rpc_signal_task(struct rpc_task *task) in rpc_signal_task() argument
861 if (!RPC_IS_ACTIVATED(task)) in rpc_signal_task()
864 if (!rpc_task_set_rpc_status(task, -ERESTARTSYS)) in rpc_signal_task()
866 trace_rpc_task_signalled(task, task->tk_action); in rpc_signal_task()
867 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); in rpc_signal_task()
869 queue = READ_ONCE(task->tk_waitqueue); in rpc_signal_task()
871 rpc_wake_up_queued_task(queue, task); in rpc_signal_task()
874 void rpc_task_try_cancel(struct rpc_task *task, int error) in rpc_task_try_cancel() argument
878 if (!rpc_task_set_rpc_status(task, error)) in rpc_task_try_cancel()
880 queue = READ_ONCE(task->tk_waitqueue); in rpc_task_try_cancel()
882 rpc_wake_up_queued_task(queue, task); in rpc_task_try_cancel()
885 void rpc_exit(struct rpc_task *task, int status) in rpc_exit() argument
887 task->tk_status = status; in rpc_exit()
888 task->tk_action = rpc_exit_task; in rpc_exit()
889 rpc_wake_up_queued_task(task->tk_waitqueue, task); in rpc_exit()
911 static void __rpc_execute(struct rpc_task *task) in __rpc_execute() argument
914 int task_is_async = RPC_IS_ASYNC(task); in __rpc_execute()
918 WARN_ON_ONCE(RPC_IS_QUEUED(task)); in __rpc_execute()
919 if (RPC_IS_QUEUED(task)) in __rpc_execute()
930 do_action = task->tk_action; in __rpc_execute()
933 (status = READ_ONCE(task->tk_rpc_status)) != 0) { in __rpc_execute()
934 task->tk_status = status; in __rpc_execute()
938 if (task->tk_callback) { in __rpc_execute()
939 do_action = task->tk_callback; in __rpc_execute()
940 task->tk_callback = NULL; in __rpc_execute()
944 if (RPC_IS_SWAPPER(task) || in __rpc_execute()
945 xprt_needs_memalloc(task->tk_xprt, task)) in __rpc_execute()
948 trace_rpc_task_run_action(task, do_action); in __rpc_execute()
949 do_action(task); in __rpc_execute()
954 if (!RPC_IS_QUEUED(task)) { in __rpc_execute()
968 queue = task->tk_waitqueue; in __rpc_execute()
970 if (!RPC_IS_QUEUED(task)) { in __rpc_execute()
975 if (READ_ONCE(task->tk_rpc_status) != 0) { in __rpc_execute()
976 rpc_wake_up_task_queue_locked(queue, task); in __rpc_execute()
980 rpc_clear_running(task); in __rpc_execute()
986 trace_rpc_task_sync_sleep(task, task->tk_action); in __rpc_execute()
987 status = out_of_line_wait_on_bit(&task->tk_runstate, in __rpc_execute()
997 rpc_signal_task(task); in __rpc_execute()
999 trace_rpc_task_sync_wake(task, task->tk_action); in __rpc_execute()
1003 rpc_release_task(task); in __rpc_execute()
1017 void rpc_execute(struct rpc_task *task) in rpc_execute() argument
1019 bool is_async = RPC_IS_ASYNC(task); in rpc_execute()
1021 rpc_set_active(task); in rpc_execute()
1022 rpc_make_runnable(rpciod_workqueue, task); in rpc_execute()
1025 __rpc_execute(task); in rpc_execute()
1055 int rpc_malloc(struct rpc_task *task) in rpc_malloc() argument
1057 struct rpc_rqst *rqst = task->tk_rqstp; in rpc_malloc()
1066 if (!buf && RPC_IS_ASYNC(task)) in rpc_malloc()
1086 void rpc_free(struct rpc_task *task) in rpc_free() argument
1088 void *buffer = task->tk_rqstp->rq_buffer; in rpc_free()
1105 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) in rpc_init_task() argument
1107 memset(task, 0, sizeof(*task)); in rpc_init_task()
1108 atomic_set(&task->tk_count, 1); in rpc_init_task()
1109 task->tk_flags = task_setup_data->flags; in rpc_init_task()
1110 task->tk_ops = task_setup_data->callback_ops; in rpc_init_task()
1111 task->tk_calldata = task_setup_data->callback_data; in rpc_init_task()
1112 INIT_LIST_HEAD(&task->tk_task); in rpc_init_task()
1114 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; in rpc_init_task()
1115 task->tk_owner = current->tgid; in rpc_init_task()
1118 task->tk_workqueue = task_setup_data->workqueue; in rpc_init_task()
1120 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, in rpc_init_task()
1123 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); in rpc_init_task()
1125 if (task->tk_ops->rpc_call_prepare != NULL) in rpc_init_task()
1126 task->tk_action = rpc_prepare_task; in rpc_init_task()
1128 rpc_init_task_statistics(task); in rpc_init_task()
1133 struct rpc_task *task; in rpc_alloc_task() local
1135 task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask()); in rpc_alloc_task()
1136 if (task) in rpc_alloc_task()
1137 return task; in rpc_alloc_task()
1146 struct rpc_task *task = setup_data->task; in rpc_new_task() local
1149 if (task == NULL) { in rpc_new_task()
1150 task = rpc_alloc_task(); in rpc_new_task()
1151 if (task == NULL) { in rpc_new_task()
1159 rpc_init_task(task, setup_data); in rpc_new_task()
1160 task->tk_flags |= flags; in rpc_new_task()
1161 return task; in rpc_new_task()
1183 static void rpc_free_task(struct rpc_task *task) in rpc_free_task() argument
1185 unsigned short tk_flags = task->tk_flags; in rpc_free_task()
1187 put_rpccred(task->tk_op_cred); in rpc_free_task()
1188 rpc_release_calldata(task->tk_ops, task->tk_calldata); in rpc_free_task()
1191 mempool_free(task, rpc_task_mempool); in rpc_free_task()
1202 static void rpc_release_resources_task(struct rpc_task *task) in rpc_release_resources_task() argument
1204 xprt_release(task); in rpc_release_resources_task()
1205 if (task->tk_msg.rpc_cred) { in rpc_release_resources_task()
1206 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) in rpc_release_resources_task()
1207 put_cred(task->tk_msg.rpc_cred); in rpc_release_resources_task()
1208 task->tk_msg.rpc_cred = NULL; in rpc_release_resources_task()
1210 rpc_task_release_client(task); in rpc_release_resources_task()
1213 static void rpc_final_put_task(struct rpc_task *task, in rpc_final_put_task() argument
1217 INIT_WORK(&task->u.tk_work, rpc_async_release); in rpc_final_put_task()
1218 queue_work(q, &task->u.tk_work); in rpc_final_put_task()
1220 rpc_free_task(task); in rpc_final_put_task()
1223 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) in rpc_do_put_task() argument
1225 if (atomic_dec_and_test(&task->tk_count)) { in rpc_do_put_task()
1226 rpc_release_resources_task(task); in rpc_do_put_task()
1227 rpc_final_put_task(task, q); in rpc_do_put_task()
1231 void rpc_put_task(struct rpc_task *task) in rpc_put_task() argument
1233 rpc_do_put_task(task, NULL); in rpc_put_task()
1237 void rpc_put_task_async(struct rpc_task *task) in rpc_put_task_async() argument
1239 rpc_do_put_task(task, task->tk_workqueue); in rpc_put_task_async()
1243 static void rpc_release_task(struct rpc_task *task) in rpc_release_task() argument
1245 WARN_ON_ONCE(RPC_IS_QUEUED(task)); in rpc_release_task()
1247 rpc_release_resources_task(task); in rpc_release_task()
1254 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { in rpc_release_task()
1256 if (!rpc_complete_task(task)) in rpc_release_task()
1259 if (!atomic_dec_and_test(&task->tk_count)) in rpc_release_task()
1262 rpc_final_put_task(task, task->tk_workqueue); in rpc_release_task()