1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/sched.c 4 * 5 * Scheduling for synchronous and asynchronous RPC requests. 6 * 7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 8 * 9 * TCP NFS related read + write fixes 10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 11 */ 12 13 #include <linux/module.h> 14 15 #include <linux/sched.h> 16 #include <linux/interrupt.h> 17 #include <linux/slab.h> 18 #include <linux/mempool.h> 19 #include <linux/smp.h> 20 #include <linux/spinlock.h> 21 #include <linux/mutex.h> 22 #include <linux/freezer.h> 23 #include <linux/sched/mm.h> 24 25 #include <linux/sunrpc/clnt.h> 26 #include <linux/sunrpc/metrics.h> 27 28 #include "sunrpc.h" 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/sunrpc.h> 32 33 /* 34 * RPC slabs and memory pools 35 */ 36 #define RPC_BUFFER_MAXSIZE (2048) 37 #define RPC_BUFFER_POOLSIZE (8) 38 #define RPC_TASK_POOLSIZE (8) 39 static struct kmem_cache *rpc_task_slabp __read_mostly; 40 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 41 static mempool_t *rpc_task_mempool __read_mostly; 42 static mempool_t *rpc_buffer_mempool __read_mostly; 43 44 static void rpc_async_schedule(struct work_struct *); 45 static void rpc_release_task(struct rpc_task *task); 46 static void __rpc_queue_timer_fn(struct work_struct *); 47 48 /* 49 * RPC tasks sit here while waiting for conditions to improve. 50 */ 51 static struct rpc_wait_queue delay_queue; 52 53 /* 54 * rpciod-related stuff 55 */ 56 struct workqueue_struct *rpciod_workqueue __read_mostly; 57 struct workqueue_struct *xprtiod_workqueue __read_mostly; 58 EXPORT_SYMBOL_GPL(xprtiod_workqueue); 59 60 unsigned long 61 rpc_task_timeout(const struct rpc_task *task) 62 { 63 unsigned long timeout = READ_ONCE(task->tk_timeout); 64 65 if (timeout != 0) { 66 unsigned long now = jiffies; 67 if (time_before(now, timeout)) 68 return timeout - now; 69 } 70 return 0; 71 } 72 EXPORT_SYMBOL_GPL(rpc_task_timeout); 73 74 /* 75 * Disable the timer for a given RPC task. Should be called with 76 * queue->lock and bh_disabled in order to avoid races within 77 * rpc_run_timer(). 78 */ 79 static void 80 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 81 { 82 if (list_empty(&task->u.tk_wait.timer_list)) 83 return; 84 task->tk_timeout = 0; 85 list_del(&task->u.tk_wait.timer_list); 86 if (list_empty(&queue->timer_list.list)) 87 cancel_delayed_work(&queue->timer_list.dwork); 88 } 89 90 static void 91 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 92 { 93 unsigned long now = jiffies; 94 queue->timer_list.expires = expires; 95 if (time_before_eq(expires, now)) 96 expires = 0; 97 else 98 expires -= now; 99 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); 100 } 101 102 /* 103 * Set up a timer for the current task. 104 */ 105 static void 106 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, 107 unsigned long timeout) 108 { 109 task->tk_timeout = timeout; 110 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) 111 rpc_set_queue_timer(queue, timeout); 112 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 113 } 114 115 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 116 { 117 if (queue->priority != priority) { 118 queue->priority = priority; 119 queue->nr = 1U << priority; 120 } 121 } 122 123 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 124 { 125 rpc_set_waitqueue_priority(queue, queue->maxpriority); 126 } 127 128 /* 129 * Add a request to a queue list 130 */ 131 static void 132 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) 133 { 134 struct rpc_task *t; 135 136 list_for_each_entry(t, q, u.tk_wait.list) { 137 if (t->tk_owner == task->tk_owner) { 138 list_add_tail(&task->u.tk_wait.links, 139 &t->u.tk_wait.links); 140 /* Cache the queue head in task->u.tk_wait.list */ 141 task->u.tk_wait.list.next = q; 142 task->u.tk_wait.list.prev = NULL; 143 return; 144 } 145 } 146 INIT_LIST_HEAD(&task->u.tk_wait.links); 147 list_add_tail(&task->u.tk_wait.list, q); 148 } 149 150 /* 151 * Remove request from a queue list 152 */ 153 static void 154 __rpc_list_dequeue_task(struct rpc_task *task) 155 { 156 struct list_head *q; 157 struct rpc_task *t; 158 159 if (task->u.tk_wait.list.prev == NULL) { 160 list_del(&task->u.tk_wait.links); 161 return; 162 } 163 if (!list_empty(&task->u.tk_wait.links)) { 164 t = list_first_entry(&task->u.tk_wait.links, 165 struct rpc_task, 166 u.tk_wait.links); 167 /* Assume __rpc_list_enqueue_task() cached the queue head */ 168 q = t->u.tk_wait.list.next; 169 list_add_tail(&t->u.tk_wait.list, q); 170 list_del(&task->u.tk_wait.links); 171 } 172 list_del(&task->u.tk_wait.list); 173 } 174 175 /* 176 * Add new request to a priority queue. 177 */ 178 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, 179 struct rpc_task *task, 180 unsigned char queue_priority) 181 { 182 if (unlikely(queue_priority > queue->maxpriority)) 183 queue_priority = queue->maxpriority; 184 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); 185 } 186 187 /* 188 * Add new request to wait queue. 189 * 190 * Swapper tasks always get inserted at the head of the queue. 191 * This should avoid many nasty memory deadlocks and hopefully 192 * improve overall performance. 193 * Everyone else gets appended to the queue to ensure proper FIFO behavior. 194 */ 195 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, 196 struct rpc_task *task, 197 unsigned char queue_priority) 198 { 199 INIT_LIST_HEAD(&task->u.tk_wait.timer_list); 200 if (RPC_IS_PRIORITY(queue)) 201 __rpc_add_wait_queue_priority(queue, task, queue_priority); 202 else if (RPC_IS_SWAPPER(task)) 203 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 204 else 205 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 206 task->tk_waitqueue = queue; 207 queue->qlen++; 208 /* barrier matches the read in rpc_wake_up_task_queue_locked() */ 209 smp_wmb(); 210 rpc_set_queued(task); 211 } 212 213 /* 214 * Remove request from a priority queue. 215 */ 216 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 217 { 218 __rpc_list_dequeue_task(task); 219 } 220 221 /* 222 * Remove request from queue. 223 * Note: must be called with spin lock held. 224 */ 225 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 226 { 227 __rpc_disable_timer(queue, task); 228 if (RPC_IS_PRIORITY(queue)) 229 __rpc_remove_wait_queue_priority(task); 230 else 231 list_del(&task->u.tk_wait.list); 232 queue->qlen--; 233 } 234 235 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 236 { 237 int i; 238 239 spin_lock_init(&queue->lock); 240 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 241 INIT_LIST_HEAD(&queue->tasks[i]); 242 queue->maxpriority = nr_queues - 1; 243 rpc_reset_waitqueue_priority(queue); 244 queue->qlen = 0; 245 queue->timer_list.expires = 0; 246 INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); 247 INIT_LIST_HEAD(&queue->timer_list.list); 248 rpc_assign_waitqueue_name(queue, qname); 249 } 250 251 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 252 { 253 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 254 } 255 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); 256 257 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 258 { 259 __rpc_init_priority_wait_queue(queue, qname, 1); 260 } 261 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 262 263 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 264 { 265 cancel_delayed_work_sync(&queue->timer_list.dwork); 266 } 267 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 268 269 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 270 { 271 freezable_schedule_unsafe(); 272 if (signal_pending_state(mode, current)) 273 return -ERESTARTSYS; 274 return 0; 275 } 276 277 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) 278 static void rpc_task_set_debuginfo(struct rpc_task *task) 279 { 280 static atomic_t rpc_pid; 281 282 task->tk_pid = atomic_inc_return(&rpc_pid); 283 } 284 #else 285 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 286 { 287 } 288 #endif 289 290 static void rpc_set_active(struct rpc_task *task) 291 { 292 rpc_task_set_debuginfo(task); 293 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 294 trace_rpc_task_begin(task, NULL); 295 } 296 297 /* 298 * Mark an RPC call as having completed by clearing the 'active' bit 299 * and then waking up all tasks that were sleeping. 300 */ 301 static int rpc_complete_task(struct rpc_task *task) 302 { 303 void *m = &task->tk_runstate; 304 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); 305 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); 306 unsigned long flags; 307 int ret; 308 309 trace_rpc_task_complete(task, NULL); 310 311 spin_lock_irqsave(&wq->lock, flags); 312 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 313 ret = atomic_dec_and_test(&task->tk_count); 314 if (waitqueue_active(wq)) 315 __wake_up_locked_key(wq, TASK_NORMAL, &k); 316 spin_unlock_irqrestore(&wq->lock, flags); 317 return ret; 318 } 319 320 /* 321 * Allow callers to wait for completion of an RPC call 322 * 323 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() 324 * to enforce taking of the wq->lock and hence avoid races with 325 * rpc_complete_task(). 326 */ 327 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) 328 { 329 if (action == NULL) 330 action = rpc_wait_bit_killable; 331 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 332 action, TASK_KILLABLE); 333 } 334 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 335 336 /* 337 * Make an RPC task runnable. 338 * 339 * Note: If the task is ASYNC, and is being made runnable after sitting on an 340 * rpc_wait_queue, this must be called with the queue spinlock held to protect 341 * the wait queue operation. 342 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), 343 * which is needed to ensure that __rpc_execute() doesn't loop (due to the 344 * lockless RPC_IS_QUEUED() test) before we've had a chance to test 345 * the RPC_TASK_RUNNING flag. 346 */ 347 static void rpc_make_runnable(struct workqueue_struct *wq, 348 struct rpc_task *task) 349 { 350 bool need_wakeup = !rpc_test_and_set_running(task); 351 352 rpc_clear_queued(task); 353 if (!need_wakeup) 354 return; 355 if (RPC_IS_ASYNC(task)) { 356 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 357 queue_work(wq, &task->u.tk_work); 358 } else 359 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 360 } 361 362 /* 363 * Prepare for sleeping on a wait queue. 364 * By always appending tasks to the list we ensure FIFO behavior. 365 * NB: An RPC task will only receive interrupt-driven events as long 366 * as it's on a wait queue. 367 */ 368 static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, 369 struct rpc_task *task, 370 unsigned char queue_priority) 371 { 372 trace_rpc_task_sleep(task, q); 373 374 __rpc_add_wait_queue(q, task, queue_priority); 375 } 376 377 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 378 struct rpc_task *task, 379 unsigned char queue_priority) 380 { 381 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) 382 return; 383 __rpc_do_sleep_on_priority(q, task, queue_priority); 384 } 385 386 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 387 struct rpc_task *task, unsigned long timeout, 388 unsigned char queue_priority) 389 { 390 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) 391 return; 392 if (time_is_after_jiffies(timeout)) { 393 __rpc_do_sleep_on_priority(q, task, queue_priority); 394 __rpc_add_timer(q, task, timeout); 395 } else 396 task->tk_status = -ETIMEDOUT; 397 } 398 399 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) 400 { 401 if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) 402 task->tk_callback = action; 403 } 404 405 static bool rpc_sleep_check_activated(struct rpc_task *task) 406 { 407 /* We shouldn't ever put an inactive task to sleep */ 408 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { 409 task->tk_status = -EIO; 410 rpc_put_task_async(task); 411 return false; 412 } 413 return true; 414 } 415 416 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, 417 rpc_action action, unsigned long timeout) 418 { 419 if (!rpc_sleep_check_activated(task)) 420 return; 421 422 rpc_set_tk_callback(task, action); 423 424 /* 425 * Protect the queue operations. 426 */ 427 spin_lock(&q->lock); 428 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); 429 spin_unlock(&q->lock); 430 } 431 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); 432 433 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 434 rpc_action action) 435 { 436 if (!rpc_sleep_check_activated(task)) 437 return; 438 439 rpc_set_tk_callback(task, action); 440 441 WARN_ON_ONCE(task->tk_timeout != 0); 442 /* 443 * Protect the queue operations. 444 */ 445 spin_lock(&q->lock); 446 __rpc_sleep_on_priority(q, task, task->tk_priority); 447 spin_unlock(&q->lock); 448 } 449 EXPORT_SYMBOL_GPL(rpc_sleep_on); 450 451 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 452 struct rpc_task *task, unsigned long timeout, int priority) 453 { 454 if (!rpc_sleep_check_activated(task)) 455 return; 456 457 priority -= RPC_PRIORITY_LOW; 458 /* 459 * Protect the queue operations. 460 */ 461 spin_lock(&q->lock); 462 __rpc_sleep_on_priority_timeout(q, task, timeout, priority); 463 spin_unlock(&q->lock); 464 } 465 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); 466 467 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 468 int priority) 469 { 470 if (!rpc_sleep_check_activated(task)) 471 return; 472 473 WARN_ON_ONCE(task->tk_timeout != 0); 474 priority -= RPC_PRIORITY_LOW; 475 /* 476 * Protect the queue operations. 477 */ 478 spin_lock(&q->lock); 479 __rpc_sleep_on_priority(q, task, priority); 480 spin_unlock(&q->lock); 481 } 482 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); 483 484 /** 485 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task 486 * @wq: workqueue on which to run task 487 * @queue: wait queue 488 * @task: task to be woken up 489 * 490 * Caller must hold queue->lock, and have cleared the task queued flag. 491 */ 492 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, 493 struct rpc_wait_queue *queue, 494 struct rpc_task *task) 495 { 496 /* Has the task been executed yet? If not, we cannot wake it up! */ 497 if (!RPC_IS_ACTIVATED(task)) { 498 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 499 return; 500 } 501 502 trace_rpc_task_wakeup(task, queue); 503 504 __rpc_remove_wait_queue(queue, task); 505 506 rpc_make_runnable(wq, task); 507 } 508 509 /* 510 * Wake up a queued task while the queue lock is being held 511 */ 512 static struct rpc_task * 513 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, 514 struct rpc_wait_queue *queue, struct rpc_task *task, 515 bool (*action)(struct rpc_task *, void *), void *data) 516 { 517 if (RPC_IS_QUEUED(task)) { 518 smp_rmb(); 519 if (task->tk_waitqueue == queue) { 520 if (action == NULL || action(task, data)) { 521 __rpc_do_wake_up_task_on_wq(wq, queue, task); 522 return task; 523 } 524 } 525 } 526 return NULL; 527 } 528 529 /* 530 * Wake up a queued task while the queue lock is being held 531 */ 532 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, 533 struct rpc_task *task) 534 { 535 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 536 task, NULL, NULL); 537 } 538 539 /* 540 * Wake up a task on a specific queue 541 */ 542 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 543 { 544 if (!RPC_IS_QUEUED(task)) 545 return; 546 spin_lock(&queue->lock); 547 rpc_wake_up_task_queue_locked(queue, task); 548 spin_unlock(&queue->lock); 549 } 550 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 551 552 static bool rpc_task_action_set_status(struct rpc_task *task, void *status) 553 { 554 task->tk_status = *(int *)status; 555 return true; 556 } 557 558 static void 559 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, 560 struct rpc_task *task, int status) 561 { 562 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 563 task, rpc_task_action_set_status, &status); 564 } 565 566 /** 567 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status 568 * @queue: pointer to rpc_wait_queue 569 * @task: pointer to rpc_task 570 * @status: integer error value 571 * 572 * If @task is queued on @queue, then it is woken up, and @task->tk_status is 573 * set to the value of @status. 574 */ 575 void 576 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, 577 struct rpc_task *task, int status) 578 { 579 if (!RPC_IS_QUEUED(task)) 580 return; 581 spin_lock(&queue->lock); 582 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 583 spin_unlock(&queue->lock); 584 } 585 586 /* 587 * Wake up the next task on a priority queue. 588 */ 589 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) 590 { 591 struct list_head *q; 592 struct rpc_task *task; 593 594 /* 595 * Service a batch of tasks from a single owner. 596 */ 597 q = &queue->tasks[queue->priority]; 598 if (!list_empty(q) && --queue->nr) { 599 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 600 goto out; 601 } 602 603 /* 604 * Service the next queue. 605 */ 606 do { 607 if (q == &queue->tasks[0]) 608 q = &queue->tasks[queue->maxpriority]; 609 else 610 q = q - 1; 611 if (!list_empty(q)) { 612 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 613 goto new_queue; 614 } 615 } while (q != &queue->tasks[queue->priority]); 616 617 rpc_reset_waitqueue_priority(queue); 618 return NULL; 619 620 new_queue: 621 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 622 out: 623 return task; 624 } 625 626 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) 627 { 628 if (RPC_IS_PRIORITY(queue)) 629 return __rpc_find_next_queued_priority(queue); 630 if (!list_empty(&queue->tasks[0])) 631 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); 632 return NULL; 633 } 634 635 /* 636 * Wake up the first task on the wait queue. 637 */ 638 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, 639 struct rpc_wait_queue *queue, 640 bool (*func)(struct rpc_task *, void *), void *data) 641 { 642 struct rpc_task *task = NULL; 643 644 spin_lock(&queue->lock); 645 task = __rpc_find_next_queued(queue); 646 if (task != NULL) 647 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, 648 task, func, data); 649 spin_unlock(&queue->lock); 650 651 return task; 652 } 653 654 /* 655 * Wake up the first task on the wait queue. 656 */ 657 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, 658 bool (*func)(struct rpc_task *, void *), void *data) 659 { 660 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data); 661 } 662 EXPORT_SYMBOL_GPL(rpc_wake_up_first); 663 664 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) 665 { 666 return true; 667 } 668 669 /* 670 * Wake up the next task on the wait queue. 671 */ 672 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) 673 { 674 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); 675 } 676 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 677 678 /** 679 * rpc_wake_up - wake up all rpc_tasks 680 * @queue: rpc_wait_queue on which the tasks are sleeping 681 * 682 * Grabs queue->lock 683 */ 684 void rpc_wake_up(struct rpc_wait_queue *queue) 685 { 686 struct list_head *head; 687 688 spin_lock(&queue->lock); 689 head = &queue->tasks[queue->maxpriority]; 690 for (;;) { 691 while (!list_empty(head)) { 692 struct rpc_task *task; 693 task = list_first_entry(head, 694 struct rpc_task, 695 u.tk_wait.list); 696 rpc_wake_up_task_queue_locked(queue, task); 697 } 698 if (head == &queue->tasks[0]) 699 break; 700 head--; 701 } 702 spin_unlock(&queue->lock); 703 } 704 EXPORT_SYMBOL_GPL(rpc_wake_up); 705 706 /** 707 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 708 * @queue: rpc_wait_queue on which the tasks are sleeping 709 * @status: status value to set 710 * 711 * Grabs queue->lock 712 */ 713 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 714 { 715 struct list_head *head; 716 717 spin_lock(&queue->lock); 718 head = &queue->tasks[queue->maxpriority]; 719 for (;;) { 720 while (!list_empty(head)) { 721 struct rpc_task *task; 722 task = list_first_entry(head, 723 struct rpc_task, 724 u.tk_wait.list); 725 task->tk_status = status; 726 rpc_wake_up_task_queue_locked(queue, task); 727 } 728 if (head == &queue->tasks[0]) 729 break; 730 head--; 731 } 732 spin_unlock(&queue->lock); 733 } 734 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 735 736 static void __rpc_queue_timer_fn(struct work_struct *work) 737 { 738 struct rpc_wait_queue *queue = container_of(work, 739 struct rpc_wait_queue, 740 timer_list.dwork.work); 741 struct rpc_task *task, *n; 742 unsigned long expires, now, timeo; 743 744 spin_lock(&queue->lock); 745 expires = now = jiffies; 746 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 747 timeo = task->tk_timeout; 748 if (time_after_eq(now, timeo)) { 749 trace_rpc_task_timeout(task, task->tk_action); 750 task->tk_status = -ETIMEDOUT; 751 rpc_wake_up_task_queue_locked(queue, task); 752 continue; 753 } 754 if (expires == now || time_after(expires, timeo)) 755 expires = timeo; 756 } 757 if (!list_empty(&queue->timer_list.list)) 758 rpc_set_queue_timer(queue, expires); 759 spin_unlock(&queue->lock); 760 } 761 762 static void __rpc_atrun(struct rpc_task *task) 763 { 764 if (task->tk_status == -ETIMEDOUT) 765 task->tk_status = 0; 766 } 767 768 /* 769 * Run a task at a later time 770 */ 771 void rpc_delay(struct rpc_task *task, unsigned long delay) 772 { 773 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); 774 } 775 EXPORT_SYMBOL_GPL(rpc_delay); 776 777 /* 778 * Helper to call task->tk_ops->rpc_call_prepare 779 */ 780 void rpc_prepare_task(struct rpc_task *task) 781 { 782 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 783 } 784 785 static void 786 rpc_init_task_statistics(struct rpc_task *task) 787 { 788 /* Initialize retry counters */ 789 task->tk_garb_retry = 2; 790 task->tk_cred_retry = 2; 791 task->tk_rebind_retry = 2; 792 793 /* starting timestamp */ 794 task->tk_start = ktime_get(); 795 } 796 797 static void 798 rpc_reset_task_statistics(struct rpc_task *task) 799 { 800 task->tk_timeouts = 0; 801 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); 802 rpc_init_task_statistics(task); 803 } 804 805 /* 806 * Helper that calls task->tk_ops->rpc_call_done if it exists 807 */ 808 void rpc_exit_task(struct rpc_task *task) 809 { 810 trace_rpc_task_end(task, task->tk_action); 811 task->tk_action = NULL; 812 if (task->tk_ops->rpc_count_stats) 813 task->tk_ops->rpc_count_stats(task, task->tk_calldata); 814 else if (task->tk_client) 815 rpc_count_iostats(task, task->tk_client->cl_metrics); 816 if (task->tk_ops->rpc_call_done != NULL) { 817 task->tk_ops->rpc_call_done(task, task->tk_calldata); 818 if (task->tk_action != NULL) { 819 /* Always release the RPC slot and buffer memory */ 820 xprt_release(task); 821 rpc_reset_task_statistics(task); 822 } 823 } 824 } 825 826 void rpc_signal_task(struct rpc_task *task) 827 { 828 struct rpc_wait_queue *queue; 829 830 if (!RPC_IS_ACTIVATED(task)) 831 return; 832 833 trace_rpc_task_signalled(task, task->tk_action); 834 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 835 smp_mb__after_atomic(); 836 queue = READ_ONCE(task->tk_waitqueue); 837 if (queue) 838 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); 839 } 840 841 void rpc_exit(struct rpc_task *task, int status) 842 { 843 task->tk_status = status; 844 task->tk_action = rpc_exit_task; 845 rpc_wake_up_queued_task(task->tk_waitqueue, task); 846 } 847 EXPORT_SYMBOL_GPL(rpc_exit); 848 849 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 850 { 851 if (ops->rpc_release != NULL) 852 ops->rpc_release(calldata); 853 } 854 855 /* 856 * This is the RPC `scheduler' (or rather, the finite state machine). 857 */ 858 static void __rpc_execute(struct rpc_task *task) 859 { 860 struct rpc_wait_queue *queue; 861 int task_is_async = RPC_IS_ASYNC(task); 862 int status = 0; 863 864 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 865 if (RPC_IS_QUEUED(task)) 866 return; 867 868 for (;;) { 869 void (*do_action)(struct rpc_task *); 870 871 /* 872 * Perform the next FSM step or a pending callback. 873 * 874 * tk_action may be NULL if the task has been killed. 875 * In particular, note that rpc_killall_tasks may 876 * do this at any time, so beware when dereferencing. 877 */ 878 do_action = task->tk_action; 879 if (task->tk_callback) { 880 do_action = task->tk_callback; 881 task->tk_callback = NULL; 882 } 883 if (!do_action) 884 break; 885 trace_rpc_task_run_action(task, do_action); 886 do_action(task); 887 888 /* 889 * Lockless check for whether task is sleeping or not. 890 */ 891 if (!RPC_IS_QUEUED(task)) 892 continue; 893 894 /* 895 * Signalled tasks should exit rather than sleep. 896 */ 897 if (RPC_SIGNALLED(task)) { 898 task->tk_rpc_status = -ERESTARTSYS; 899 rpc_exit(task, -ERESTARTSYS); 900 } 901 902 /* 903 * The queue->lock protects against races with 904 * rpc_make_runnable(). 905 * 906 * Note that once we clear RPC_TASK_RUNNING on an asynchronous 907 * rpc_task, rpc_make_runnable() can assign it to a 908 * different workqueue. We therefore cannot assume that the 909 * rpc_task pointer may still be dereferenced. 910 */ 911 queue = task->tk_waitqueue; 912 spin_lock(&queue->lock); 913 if (!RPC_IS_QUEUED(task)) { 914 spin_unlock(&queue->lock); 915 continue; 916 } 917 rpc_clear_running(task); 918 spin_unlock(&queue->lock); 919 if (task_is_async) 920 return; 921 922 /* sync task: sleep here */ 923 trace_rpc_task_sync_sleep(task, task->tk_action); 924 status = out_of_line_wait_on_bit(&task->tk_runstate, 925 RPC_TASK_QUEUED, rpc_wait_bit_killable, 926 TASK_KILLABLE); 927 if (status < 0) { 928 /* 929 * When a sync task receives a signal, it exits with 930 * -ERESTARTSYS. In order to catch any callbacks that 931 * clean up after sleeping on some queue, we don't 932 * break the loop here, but go around once more. 933 */ 934 trace_rpc_task_signalled(task, task->tk_action); 935 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 936 task->tk_rpc_status = -ERESTARTSYS; 937 rpc_exit(task, -ERESTARTSYS); 938 } 939 trace_rpc_task_sync_wake(task, task->tk_action); 940 } 941 942 /* Release all resources associated with the task */ 943 rpc_release_task(task); 944 } 945 946 /* 947 * User-visible entry point to the scheduler. 948 * 949 * This may be called recursively if e.g. an async NFS task updates 950 * the attributes and finds that dirty pages must be flushed. 951 * NOTE: Upon exit of this function the task is guaranteed to be 952 * released. In particular note that tk_release() will have 953 * been called, so your task memory may have been freed. 954 */ 955 void rpc_execute(struct rpc_task *task) 956 { 957 bool is_async = RPC_IS_ASYNC(task); 958 959 rpc_set_active(task); 960 rpc_make_runnable(rpciod_workqueue, task); 961 if (!is_async) 962 __rpc_execute(task); 963 } 964 965 static void rpc_async_schedule(struct work_struct *work) 966 { 967 unsigned int pflags = memalloc_nofs_save(); 968 969 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 970 memalloc_nofs_restore(pflags); 971 } 972 973 /** 974 * rpc_malloc - allocate RPC buffer resources 975 * @task: RPC task 976 * 977 * A single memory region is allocated, which is split between the 978 * RPC call and RPC reply that this task is being used for. When 979 * this RPC is retired, the memory is released by calling rpc_free. 980 * 981 * To prevent rpciod from hanging, this allocator never sleeps, 982 * returning -ENOMEM and suppressing warning if the request cannot 983 * be serviced immediately. The caller can arrange to sleep in a 984 * way that is safe for rpciod. 985 * 986 * Most requests are 'small' (under 2KiB) and can be serviced from a 987 * mempool, ensuring that NFS reads and writes can always proceed, 988 * and that there is good locality of reference for these buffers. 989 */ 990 int rpc_malloc(struct rpc_task *task) 991 { 992 struct rpc_rqst *rqst = task->tk_rqstp; 993 size_t size = rqst->rq_callsize + rqst->rq_rcvsize; 994 struct rpc_buffer *buf; 995 gfp_t gfp = GFP_NOFS; 996 997 if (RPC_IS_SWAPPER(task)) 998 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 999 1000 size += sizeof(struct rpc_buffer); 1001 if (size <= RPC_BUFFER_MAXSIZE) 1002 buf = mempool_alloc(rpc_buffer_mempool, gfp); 1003 else 1004 buf = kmalloc(size, gfp); 1005 1006 if (!buf) 1007 return -ENOMEM; 1008 1009 buf->len = size; 1010 rqst->rq_buffer = buf->data; 1011 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 1012 return 0; 1013 } 1014 EXPORT_SYMBOL_GPL(rpc_malloc); 1015 1016 /** 1017 * rpc_free - free RPC buffer resources allocated via rpc_malloc 1018 * @task: RPC task 1019 * 1020 */ 1021 void rpc_free(struct rpc_task *task) 1022 { 1023 void *buffer = task->tk_rqstp->rq_buffer; 1024 size_t size; 1025 struct rpc_buffer *buf; 1026 1027 buf = container_of(buffer, struct rpc_buffer, data); 1028 size = buf->len; 1029 1030 if (size <= RPC_BUFFER_MAXSIZE) 1031 mempool_free(buf, rpc_buffer_mempool); 1032 else 1033 kfree(buf); 1034 } 1035 EXPORT_SYMBOL_GPL(rpc_free); 1036 1037 /* 1038 * Creation and deletion of RPC task structures 1039 */ 1040 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 1041 { 1042 memset(task, 0, sizeof(*task)); 1043 atomic_set(&task->tk_count, 1); 1044 task->tk_flags = task_setup_data->flags; 1045 task->tk_ops = task_setup_data->callback_ops; 1046 task->tk_calldata = task_setup_data->callback_data; 1047 INIT_LIST_HEAD(&task->tk_task); 1048 1049 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 1050 task->tk_owner = current->tgid; 1051 1052 /* Initialize workqueue for async tasks */ 1053 task->tk_workqueue = task_setup_data->workqueue; 1054 1055 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, 1056 xprt_get(task_setup_data->rpc_xprt)); 1057 1058 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); 1059 1060 if (task->tk_ops->rpc_call_prepare != NULL) 1061 task->tk_action = rpc_prepare_task; 1062 1063 rpc_init_task_statistics(task); 1064 } 1065 1066 static struct rpc_task * 1067 rpc_alloc_task(void) 1068 { 1069 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 1070 } 1071 1072 /* 1073 * Create a new task for the specified client. 1074 */ 1075 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 1076 { 1077 struct rpc_task *task = setup_data->task; 1078 unsigned short flags = 0; 1079 1080 if (task == NULL) { 1081 task = rpc_alloc_task(); 1082 flags = RPC_TASK_DYNAMIC; 1083 } 1084 1085 rpc_init_task(task, setup_data); 1086 task->tk_flags |= flags; 1087 return task; 1088 } 1089 1090 /* 1091 * rpc_free_task - release rpc task and perform cleanups 1092 * 1093 * Note that we free up the rpc_task _after_ rpc_release_calldata() 1094 * in order to work around a workqueue dependency issue. 1095 * 1096 * Tejun Heo states: 1097 * "Workqueue currently considers two work items to be the same if they're 1098 * on the same address and won't execute them concurrently - ie. it 1099 * makes a work item which is queued again while being executed wait 1100 * for the previous execution to complete. 1101 * 1102 * If a work function frees the work item, and then waits for an event 1103 * which should be performed by another work item and *that* work item 1104 * recycles the freed work item, it can create a false dependency loop. 1105 * There really is no reliable way to detect this short of verifying 1106 * every memory free." 1107 * 1108 */ 1109 static void rpc_free_task(struct rpc_task *task) 1110 { 1111 unsigned short tk_flags = task->tk_flags; 1112 1113 put_rpccred(task->tk_op_cred); 1114 rpc_release_calldata(task->tk_ops, task->tk_calldata); 1115 1116 if (tk_flags & RPC_TASK_DYNAMIC) 1117 mempool_free(task, rpc_task_mempool); 1118 } 1119 1120 static void rpc_async_release(struct work_struct *work) 1121 { 1122 unsigned int pflags = memalloc_nofs_save(); 1123 1124 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 1125 memalloc_nofs_restore(pflags); 1126 } 1127 1128 static void rpc_release_resources_task(struct rpc_task *task) 1129 { 1130 xprt_release(task); 1131 if (task->tk_msg.rpc_cred) { 1132 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1133 put_cred(task->tk_msg.rpc_cred); 1134 task->tk_msg.rpc_cred = NULL; 1135 } 1136 rpc_task_release_client(task); 1137 } 1138 1139 static void rpc_final_put_task(struct rpc_task *task, 1140 struct workqueue_struct *q) 1141 { 1142 if (q != NULL) { 1143 INIT_WORK(&task->u.tk_work, rpc_async_release); 1144 queue_work(q, &task->u.tk_work); 1145 } else 1146 rpc_free_task(task); 1147 } 1148 1149 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) 1150 { 1151 if (atomic_dec_and_test(&task->tk_count)) { 1152 rpc_release_resources_task(task); 1153 rpc_final_put_task(task, q); 1154 } 1155 } 1156 1157 void rpc_put_task(struct rpc_task *task) 1158 { 1159 rpc_do_put_task(task, NULL); 1160 } 1161 EXPORT_SYMBOL_GPL(rpc_put_task); 1162 1163 void rpc_put_task_async(struct rpc_task *task) 1164 { 1165 rpc_do_put_task(task, task->tk_workqueue); 1166 } 1167 EXPORT_SYMBOL_GPL(rpc_put_task_async); 1168 1169 static void rpc_release_task(struct rpc_task *task) 1170 { 1171 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 1172 1173 rpc_release_resources_task(task); 1174 1175 /* 1176 * Note: at this point we have been removed from rpc_clnt->cl_tasks, 1177 * so it should be safe to use task->tk_count as a test for whether 1178 * or not any other processes still hold references to our rpc_task. 1179 */ 1180 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { 1181 /* Wake up anyone who may be waiting for task completion */ 1182 if (!rpc_complete_task(task)) 1183 return; 1184 } else { 1185 if (!atomic_dec_and_test(&task->tk_count)) 1186 return; 1187 } 1188 rpc_final_put_task(task, task->tk_workqueue); 1189 } 1190 1191 int rpciod_up(void) 1192 { 1193 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 1194 } 1195 1196 void rpciod_down(void) 1197 { 1198 module_put(THIS_MODULE); 1199 } 1200 1201 /* 1202 * Start up the rpciod workqueue. 1203 */ 1204 static int rpciod_start(void) 1205 { 1206 struct workqueue_struct *wq; 1207 1208 /* 1209 * Create the rpciod thread and wait for it to start. 1210 */ 1211 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1212 if (!wq) 1213 goto out_failed; 1214 rpciod_workqueue = wq; 1215 /* Note: highpri because network receive is latency sensitive */ 1216 wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0); 1217 if (!wq) 1218 goto free_rpciod; 1219 xprtiod_workqueue = wq; 1220 return 1; 1221 free_rpciod: 1222 wq = rpciod_workqueue; 1223 rpciod_workqueue = NULL; 1224 destroy_workqueue(wq); 1225 out_failed: 1226 return 0; 1227 } 1228 1229 static void rpciod_stop(void) 1230 { 1231 struct workqueue_struct *wq = NULL; 1232 1233 if (rpciod_workqueue == NULL) 1234 return; 1235 1236 wq = rpciod_workqueue; 1237 rpciod_workqueue = NULL; 1238 destroy_workqueue(wq); 1239 wq = xprtiod_workqueue; 1240 xprtiod_workqueue = NULL; 1241 destroy_workqueue(wq); 1242 } 1243 1244 void 1245 rpc_destroy_mempool(void) 1246 { 1247 rpciod_stop(); 1248 mempool_destroy(rpc_buffer_mempool); 1249 mempool_destroy(rpc_task_mempool); 1250 kmem_cache_destroy(rpc_task_slabp); 1251 kmem_cache_destroy(rpc_buffer_slabp); 1252 rpc_destroy_wait_queue(&delay_queue); 1253 } 1254 1255 int 1256 rpc_init_mempool(void) 1257 { 1258 /* 1259 * The following is not strictly a mempool initialisation, 1260 * but there is no harm in doing it here 1261 */ 1262 rpc_init_wait_queue(&delay_queue, "delayq"); 1263 if (!rpciod_start()) 1264 goto err_nomem; 1265 1266 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1267 sizeof(struct rpc_task), 1268 0, SLAB_HWCACHE_ALIGN, 1269 NULL); 1270 if (!rpc_task_slabp) 1271 goto err_nomem; 1272 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1273 RPC_BUFFER_MAXSIZE, 1274 0, SLAB_HWCACHE_ALIGN, 1275 NULL); 1276 if (!rpc_buffer_slabp) 1277 goto err_nomem; 1278 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 1279 rpc_task_slabp); 1280 if (!rpc_task_mempool) 1281 goto err_nomem; 1282 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 1283 rpc_buffer_slabp); 1284 if (!rpc_buffer_mempool) 1285 goto err_nomem; 1286 return 0; 1287 err_nomem: 1288 rpc_destroy_mempool(); 1289 return -ENOMEM; 1290 } 1291