1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/sched.c 4 * 5 * Scheduling for synchronous and asynchronous RPC requests. 6 * 7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 8 * 9 * TCP NFS related read + write fixes 10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 11 */ 12 13 #include <linux/module.h> 14 15 #include <linux/sched.h> 16 #include <linux/interrupt.h> 17 #include <linux/slab.h> 18 #include <linux/mempool.h> 19 #include <linux/smp.h> 20 #include <linux/spinlock.h> 21 #include <linux/mutex.h> 22 #include <linux/freezer.h> 23 #include <linux/sched/mm.h> 24 25 #include <linux/sunrpc/clnt.h> 26 #include <linux/sunrpc/metrics.h> 27 28 #include "sunrpc.h" 29 30 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 31 #define RPCDBG_FACILITY RPCDBG_SCHED 32 #endif 33 34 #define CREATE_TRACE_POINTS 35 #include <trace/events/sunrpc.h> 36 37 /* 38 * RPC slabs and memory pools 39 */ 40 #define RPC_BUFFER_MAXSIZE (2048) 41 #define RPC_BUFFER_POOLSIZE (8) 42 #define RPC_TASK_POOLSIZE (8) 43 static struct kmem_cache *rpc_task_slabp __read_mostly; 44 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 45 static mempool_t *rpc_task_mempool __read_mostly; 46 static mempool_t *rpc_buffer_mempool __read_mostly; 47 48 static void rpc_async_schedule(struct work_struct *); 49 static void rpc_release_task(struct rpc_task *task); 50 static void __rpc_queue_timer_fn(struct work_struct *); 51 52 /* 53 * RPC tasks sit here while waiting for conditions to improve. 54 */ 55 static struct rpc_wait_queue delay_queue; 56 57 /* 58 * rpciod-related stuff 59 */ 60 struct workqueue_struct *rpciod_workqueue __read_mostly; 61 struct workqueue_struct *xprtiod_workqueue __read_mostly; 62 EXPORT_SYMBOL_GPL(xprtiod_workqueue); 63 64 unsigned long 65 rpc_task_timeout(const struct rpc_task *task) 66 { 67 unsigned long timeout = READ_ONCE(task->tk_timeout); 68 69 if (timeout != 0) { 70 unsigned long now = jiffies; 71 if (time_before(now, timeout)) 72 return timeout - now; 73 } 74 return 0; 75 } 76 EXPORT_SYMBOL_GPL(rpc_task_timeout); 77 78 /* 79 * Disable the timer for a given RPC task. Should be called with 80 * queue->lock and bh_disabled in order to avoid races within 81 * rpc_run_timer(). 82 */ 83 static void 84 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 85 { 86 if (list_empty(&task->u.tk_wait.timer_list)) 87 return; 88 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 89 task->tk_timeout = 0; 90 list_del(&task->u.tk_wait.timer_list); 91 if (list_empty(&queue->timer_list.list)) 92 cancel_delayed_work(&queue->timer_list.dwork); 93 } 94 95 static void 96 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 97 { 98 unsigned long now = jiffies; 99 queue->timer_list.expires = expires; 100 if (time_before_eq(expires, now)) 101 expires = 0; 102 else 103 expires -= now; 104 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); 105 } 106 107 /* 108 * Set up a timer for the current task. 109 */ 110 static void 111 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, 112 unsigned long timeout) 113 { 114 dprintk("RPC: %5u setting alarm for %u ms\n", 115 task->tk_pid, jiffies_to_msecs(timeout - jiffies)); 116 117 task->tk_timeout = timeout; 118 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) 119 rpc_set_queue_timer(queue, timeout); 120 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 121 } 122 123 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 124 { 125 if (queue->priority != priority) { 126 queue->priority = priority; 127 queue->nr = 1U << priority; 128 } 129 } 130 131 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 132 { 133 rpc_set_waitqueue_priority(queue, queue->maxpriority); 134 } 135 136 /* 137 * Add a request to a queue list 138 */ 139 static void 140 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) 141 { 142 struct rpc_task *t; 143 144 list_for_each_entry(t, q, u.tk_wait.list) { 145 if (t->tk_owner == task->tk_owner) { 146 list_add_tail(&task->u.tk_wait.links, 147 &t->u.tk_wait.links); 148 /* Cache the queue head in task->u.tk_wait.list */ 149 task->u.tk_wait.list.next = q; 150 task->u.tk_wait.list.prev = NULL; 151 return; 152 } 153 } 154 INIT_LIST_HEAD(&task->u.tk_wait.links); 155 list_add_tail(&task->u.tk_wait.list, q); 156 } 157 158 /* 159 * Remove request from a queue list 160 */ 161 static void 162 __rpc_list_dequeue_task(struct rpc_task *task) 163 { 164 struct list_head *q; 165 struct rpc_task *t; 166 167 if (task->u.tk_wait.list.prev == NULL) { 168 list_del(&task->u.tk_wait.links); 169 return; 170 } 171 if (!list_empty(&task->u.tk_wait.links)) { 172 t = list_first_entry(&task->u.tk_wait.links, 173 struct rpc_task, 174 u.tk_wait.links); 175 /* Assume __rpc_list_enqueue_task() cached the queue head */ 176 q = t->u.tk_wait.list.next; 177 list_add_tail(&t->u.tk_wait.list, q); 178 list_del(&task->u.tk_wait.links); 179 } 180 list_del(&task->u.tk_wait.list); 181 } 182 183 /* 184 * Add new request to a priority queue. 185 */ 186 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, 187 struct rpc_task *task, 188 unsigned char queue_priority) 189 { 190 if (unlikely(queue_priority > queue->maxpriority)) 191 queue_priority = queue->maxpriority; 192 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); 193 } 194 195 /* 196 * Add new request to wait queue. 197 * 198 * Swapper tasks always get inserted at the head of the queue. 199 * This should avoid many nasty memory deadlocks and hopefully 200 * improve overall performance. 201 * Everyone else gets appended to the queue to ensure proper FIFO behavior. 202 */ 203 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, 204 struct rpc_task *task, 205 unsigned char queue_priority) 206 { 207 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 208 if (RPC_IS_QUEUED(task)) 209 return; 210 211 INIT_LIST_HEAD(&task->u.tk_wait.timer_list); 212 if (RPC_IS_PRIORITY(queue)) 213 __rpc_add_wait_queue_priority(queue, task, queue_priority); 214 else if (RPC_IS_SWAPPER(task)) 215 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 216 else 217 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 218 task->tk_waitqueue = queue; 219 queue->qlen++; 220 /* barrier matches the read in rpc_wake_up_task_queue_locked() */ 221 smp_wmb(); 222 rpc_set_queued(task); 223 224 dprintk("RPC: %5u added to queue %p \"%s\"\n", 225 task->tk_pid, queue, rpc_qname(queue)); 226 } 227 228 /* 229 * Remove request from a priority queue. 230 */ 231 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 232 { 233 __rpc_list_dequeue_task(task); 234 } 235 236 /* 237 * Remove request from queue. 238 * Note: must be called with spin lock held. 239 */ 240 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 241 { 242 __rpc_disable_timer(queue, task); 243 if (RPC_IS_PRIORITY(queue)) 244 __rpc_remove_wait_queue_priority(task); 245 else 246 list_del(&task->u.tk_wait.list); 247 queue->qlen--; 248 dprintk("RPC: %5u removed from queue %p \"%s\"\n", 249 task->tk_pid, queue, rpc_qname(queue)); 250 } 251 252 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 253 { 254 int i; 255 256 spin_lock_init(&queue->lock); 257 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 258 INIT_LIST_HEAD(&queue->tasks[i]); 259 queue->maxpriority = nr_queues - 1; 260 rpc_reset_waitqueue_priority(queue); 261 queue->qlen = 0; 262 queue->timer_list.expires = 0; 263 INIT_DEFERRABLE_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); 264 INIT_LIST_HEAD(&queue->timer_list.list); 265 rpc_assign_waitqueue_name(queue, qname); 266 } 267 268 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 269 { 270 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 271 } 272 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); 273 274 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 275 { 276 __rpc_init_priority_wait_queue(queue, qname, 1); 277 } 278 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 279 280 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 281 { 282 cancel_delayed_work_sync(&queue->timer_list.dwork); 283 } 284 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 285 286 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 287 { 288 freezable_schedule_unsafe(); 289 if (signal_pending_state(mode, current)) 290 return -ERESTARTSYS; 291 return 0; 292 } 293 294 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) 295 static void rpc_task_set_debuginfo(struct rpc_task *task) 296 { 297 static atomic_t rpc_pid; 298 299 task->tk_pid = atomic_inc_return(&rpc_pid); 300 } 301 #else 302 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 303 { 304 } 305 #endif 306 307 static void rpc_set_active(struct rpc_task *task) 308 { 309 rpc_task_set_debuginfo(task); 310 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 311 trace_rpc_task_begin(task, NULL); 312 } 313 314 /* 315 * Mark an RPC call as having completed by clearing the 'active' bit 316 * and then waking up all tasks that were sleeping. 317 */ 318 static int rpc_complete_task(struct rpc_task *task) 319 { 320 void *m = &task->tk_runstate; 321 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); 322 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); 323 unsigned long flags; 324 int ret; 325 326 trace_rpc_task_complete(task, NULL); 327 328 spin_lock_irqsave(&wq->lock, flags); 329 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 330 ret = atomic_dec_and_test(&task->tk_count); 331 if (waitqueue_active(wq)) 332 __wake_up_locked_key(wq, TASK_NORMAL, &k); 333 spin_unlock_irqrestore(&wq->lock, flags); 334 return ret; 335 } 336 337 /* 338 * Allow callers to wait for completion of an RPC call 339 * 340 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() 341 * to enforce taking of the wq->lock and hence avoid races with 342 * rpc_complete_task(). 343 */ 344 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) 345 { 346 if (action == NULL) 347 action = rpc_wait_bit_killable; 348 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 349 action, TASK_KILLABLE); 350 } 351 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 352 353 /* 354 * Make an RPC task runnable. 355 * 356 * Note: If the task is ASYNC, and is being made runnable after sitting on an 357 * rpc_wait_queue, this must be called with the queue spinlock held to protect 358 * the wait queue operation. 359 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), 360 * which is needed to ensure that __rpc_execute() doesn't loop (due to the 361 * lockless RPC_IS_QUEUED() test) before we've had a chance to test 362 * the RPC_TASK_RUNNING flag. 363 */ 364 static void rpc_make_runnable(struct workqueue_struct *wq, 365 struct rpc_task *task) 366 { 367 bool need_wakeup = !rpc_test_and_set_running(task); 368 369 rpc_clear_queued(task); 370 if (!need_wakeup) 371 return; 372 if (RPC_IS_ASYNC(task)) { 373 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 374 queue_work(wq, &task->u.tk_work); 375 } else 376 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 377 } 378 379 /* 380 * Prepare for sleeping on a wait queue. 381 * By always appending tasks to the list we ensure FIFO behavior. 382 * NB: An RPC task will only receive interrupt-driven events as long 383 * as it's on a wait queue. 384 */ 385 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 386 struct rpc_task *task, 387 unsigned char queue_priority) 388 { 389 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 390 task->tk_pid, rpc_qname(q), jiffies); 391 392 trace_rpc_task_sleep(task, q); 393 394 __rpc_add_wait_queue(q, task, queue_priority); 395 396 } 397 398 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 399 struct rpc_task *task, unsigned long timeout, 400 unsigned char queue_priority) 401 { 402 if (time_is_after_jiffies(timeout)) { 403 __rpc_sleep_on_priority(q, task, queue_priority); 404 __rpc_add_timer(q, task, timeout); 405 } else 406 task->tk_status = -ETIMEDOUT; 407 } 408 409 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) 410 { 411 if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) 412 task->tk_callback = action; 413 } 414 415 static bool rpc_sleep_check_activated(struct rpc_task *task) 416 { 417 /* We shouldn't ever put an inactive task to sleep */ 418 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { 419 task->tk_status = -EIO; 420 rpc_put_task_async(task); 421 return false; 422 } 423 return true; 424 } 425 426 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, 427 rpc_action action, unsigned long timeout) 428 { 429 if (!rpc_sleep_check_activated(task)) 430 return; 431 432 rpc_set_tk_callback(task, action); 433 434 /* 435 * Protect the queue operations. 436 */ 437 spin_lock(&q->lock); 438 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); 439 spin_unlock(&q->lock); 440 } 441 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); 442 443 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 444 rpc_action action) 445 { 446 if (!rpc_sleep_check_activated(task)) 447 return; 448 449 rpc_set_tk_callback(task, action); 450 451 WARN_ON_ONCE(task->tk_timeout != 0); 452 /* 453 * Protect the queue operations. 454 */ 455 spin_lock(&q->lock); 456 __rpc_sleep_on_priority(q, task, task->tk_priority); 457 spin_unlock(&q->lock); 458 } 459 EXPORT_SYMBOL_GPL(rpc_sleep_on); 460 461 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 462 struct rpc_task *task, unsigned long timeout, int priority) 463 { 464 if (!rpc_sleep_check_activated(task)) 465 return; 466 467 priority -= RPC_PRIORITY_LOW; 468 /* 469 * Protect the queue operations. 470 */ 471 spin_lock(&q->lock); 472 __rpc_sleep_on_priority_timeout(q, task, timeout, priority); 473 spin_unlock(&q->lock); 474 } 475 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); 476 477 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 478 int priority) 479 { 480 if (!rpc_sleep_check_activated(task)) 481 return; 482 483 WARN_ON_ONCE(task->tk_timeout != 0); 484 priority -= RPC_PRIORITY_LOW; 485 /* 486 * Protect the queue operations. 487 */ 488 spin_lock(&q->lock); 489 __rpc_sleep_on_priority(q, task, priority); 490 spin_unlock(&q->lock); 491 } 492 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); 493 494 /** 495 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task 496 * @wq: workqueue on which to run task 497 * @queue: wait queue 498 * @task: task to be woken up 499 * 500 * Caller must hold queue->lock, and have cleared the task queued flag. 501 */ 502 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, 503 struct rpc_wait_queue *queue, 504 struct rpc_task *task) 505 { 506 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", 507 task->tk_pid, jiffies); 508 509 /* Has the task been executed yet? If not, we cannot wake it up! */ 510 if (!RPC_IS_ACTIVATED(task)) { 511 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 512 return; 513 } 514 515 trace_rpc_task_wakeup(task, queue); 516 517 __rpc_remove_wait_queue(queue, task); 518 519 rpc_make_runnable(wq, task); 520 521 dprintk("RPC: __rpc_wake_up_task done\n"); 522 } 523 524 /* 525 * Wake up a queued task while the queue lock is being held 526 */ 527 static struct rpc_task * 528 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, 529 struct rpc_wait_queue *queue, struct rpc_task *task, 530 bool (*action)(struct rpc_task *, void *), void *data) 531 { 532 if (RPC_IS_QUEUED(task)) { 533 smp_rmb(); 534 if (task->tk_waitqueue == queue) { 535 if (action == NULL || action(task, data)) { 536 __rpc_do_wake_up_task_on_wq(wq, queue, task); 537 return task; 538 } 539 } 540 } 541 return NULL; 542 } 543 544 static void 545 rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq, 546 struct rpc_wait_queue *queue, struct rpc_task *task) 547 { 548 rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL); 549 } 550 551 /* 552 * Wake up a queued task while the queue lock is being held 553 */ 554 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 555 { 556 rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task); 557 } 558 559 /* 560 * Wake up a task on a specific queue 561 */ 562 void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, 563 struct rpc_wait_queue *queue, 564 struct rpc_task *task) 565 { 566 if (!RPC_IS_QUEUED(task)) 567 return; 568 spin_lock(&queue->lock); 569 rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); 570 spin_unlock(&queue->lock); 571 } 572 573 /* 574 * Wake up a task on a specific queue 575 */ 576 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 577 { 578 if (!RPC_IS_QUEUED(task)) 579 return; 580 spin_lock(&queue->lock); 581 rpc_wake_up_task_queue_locked(queue, task); 582 spin_unlock(&queue->lock); 583 } 584 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 585 586 static bool rpc_task_action_set_status(struct rpc_task *task, void *status) 587 { 588 task->tk_status = *(int *)status; 589 return true; 590 } 591 592 static void 593 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, 594 struct rpc_task *task, int status) 595 { 596 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 597 task, rpc_task_action_set_status, &status); 598 } 599 600 /** 601 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status 602 * @queue: pointer to rpc_wait_queue 603 * @task: pointer to rpc_task 604 * @status: integer error value 605 * 606 * If @task is queued on @queue, then it is woken up, and @task->tk_status is 607 * set to the value of @status. 608 */ 609 void 610 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, 611 struct rpc_task *task, int status) 612 { 613 if (!RPC_IS_QUEUED(task)) 614 return; 615 spin_lock(&queue->lock); 616 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 617 spin_unlock(&queue->lock); 618 } 619 620 /* 621 * Wake up the next task on a priority queue. 622 */ 623 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) 624 { 625 struct list_head *q; 626 struct rpc_task *task; 627 628 /* 629 * Service a batch of tasks from a single owner. 630 */ 631 q = &queue->tasks[queue->priority]; 632 if (!list_empty(q) && --queue->nr) { 633 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 634 goto out; 635 } 636 637 /* 638 * Service the next queue. 639 */ 640 do { 641 if (q == &queue->tasks[0]) 642 q = &queue->tasks[queue->maxpriority]; 643 else 644 q = q - 1; 645 if (!list_empty(q)) { 646 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 647 goto new_queue; 648 } 649 } while (q != &queue->tasks[queue->priority]); 650 651 rpc_reset_waitqueue_priority(queue); 652 return NULL; 653 654 new_queue: 655 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 656 out: 657 return task; 658 } 659 660 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) 661 { 662 if (RPC_IS_PRIORITY(queue)) 663 return __rpc_find_next_queued_priority(queue); 664 if (!list_empty(&queue->tasks[0])) 665 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); 666 return NULL; 667 } 668 669 /* 670 * Wake up the first task on the wait queue. 671 */ 672 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, 673 struct rpc_wait_queue *queue, 674 bool (*func)(struct rpc_task *, void *), void *data) 675 { 676 struct rpc_task *task = NULL; 677 678 dprintk("RPC: wake_up_first(%p \"%s\")\n", 679 queue, rpc_qname(queue)); 680 spin_lock(&queue->lock); 681 task = __rpc_find_next_queued(queue); 682 if (task != NULL) 683 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, 684 task, func, data); 685 spin_unlock(&queue->lock); 686 687 return task; 688 } 689 690 /* 691 * Wake up the first task on the wait queue. 692 */ 693 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, 694 bool (*func)(struct rpc_task *, void *), void *data) 695 { 696 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data); 697 } 698 EXPORT_SYMBOL_GPL(rpc_wake_up_first); 699 700 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) 701 { 702 return true; 703 } 704 705 /* 706 * Wake up the next task on the wait queue. 707 */ 708 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) 709 { 710 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); 711 } 712 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 713 714 /** 715 * rpc_wake_up - wake up all rpc_tasks 716 * @queue: rpc_wait_queue on which the tasks are sleeping 717 * 718 * Grabs queue->lock 719 */ 720 void rpc_wake_up(struct rpc_wait_queue *queue) 721 { 722 struct list_head *head; 723 724 spin_lock(&queue->lock); 725 head = &queue->tasks[queue->maxpriority]; 726 for (;;) { 727 while (!list_empty(head)) { 728 struct rpc_task *task; 729 task = list_first_entry(head, 730 struct rpc_task, 731 u.tk_wait.list); 732 rpc_wake_up_task_queue_locked(queue, task); 733 } 734 if (head == &queue->tasks[0]) 735 break; 736 head--; 737 } 738 spin_unlock(&queue->lock); 739 } 740 EXPORT_SYMBOL_GPL(rpc_wake_up); 741 742 /** 743 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 744 * @queue: rpc_wait_queue on which the tasks are sleeping 745 * @status: status value to set 746 * 747 * Grabs queue->lock 748 */ 749 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 750 { 751 struct list_head *head; 752 753 spin_lock(&queue->lock); 754 head = &queue->tasks[queue->maxpriority]; 755 for (;;) { 756 while (!list_empty(head)) { 757 struct rpc_task *task; 758 task = list_first_entry(head, 759 struct rpc_task, 760 u.tk_wait.list); 761 task->tk_status = status; 762 rpc_wake_up_task_queue_locked(queue, task); 763 } 764 if (head == &queue->tasks[0]) 765 break; 766 head--; 767 } 768 spin_unlock(&queue->lock); 769 } 770 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 771 772 static void __rpc_queue_timer_fn(struct work_struct *work) 773 { 774 struct rpc_wait_queue *queue = container_of(work, 775 struct rpc_wait_queue, 776 timer_list.dwork.work); 777 struct rpc_task *task, *n; 778 unsigned long expires, now, timeo; 779 780 spin_lock(&queue->lock); 781 expires = now = jiffies; 782 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 783 timeo = task->tk_timeout; 784 if (time_after_eq(now, timeo)) { 785 dprintk("RPC: %5u timeout\n", task->tk_pid); 786 task->tk_status = -ETIMEDOUT; 787 rpc_wake_up_task_queue_locked(queue, task); 788 continue; 789 } 790 if (expires == now || time_after(expires, timeo)) 791 expires = timeo; 792 } 793 if (!list_empty(&queue->timer_list.list)) 794 rpc_set_queue_timer(queue, expires); 795 spin_unlock(&queue->lock); 796 } 797 798 static void __rpc_atrun(struct rpc_task *task) 799 { 800 if (task->tk_status == -ETIMEDOUT) 801 task->tk_status = 0; 802 } 803 804 /* 805 * Run a task at a later time 806 */ 807 void rpc_delay(struct rpc_task *task, unsigned long delay) 808 { 809 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); 810 } 811 EXPORT_SYMBOL_GPL(rpc_delay); 812 813 /* 814 * Helper to call task->tk_ops->rpc_call_prepare 815 */ 816 void rpc_prepare_task(struct rpc_task *task) 817 { 818 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 819 } 820 821 static void 822 rpc_init_task_statistics(struct rpc_task *task) 823 { 824 /* Initialize retry counters */ 825 task->tk_garb_retry = 2; 826 task->tk_cred_retry = 2; 827 task->tk_rebind_retry = 2; 828 829 /* starting timestamp */ 830 task->tk_start = ktime_get(); 831 } 832 833 static void 834 rpc_reset_task_statistics(struct rpc_task *task) 835 { 836 task->tk_timeouts = 0; 837 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); 838 rpc_init_task_statistics(task); 839 } 840 841 /* 842 * Helper that calls task->tk_ops->rpc_call_done if it exists 843 */ 844 void rpc_exit_task(struct rpc_task *task) 845 { 846 task->tk_action = NULL; 847 if (task->tk_ops->rpc_count_stats) 848 task->tk_ops->rpc_count_stats(task, task->tk_calldata); 849 else if (task->tk_client) 850 rpc_count_iostats(task, task->tk_client->cl_metrics); 851 if (task->tk_ops->rpc_call_done != NULL) { 852 task->tk_ops->rpc_call_done(task, task->tk_calldata); 853 if (task->tk_action != NULL) { 854 /* Always release the RPC slot and buffer memory */ 855 xprt_release(task); 856 rpc_reset_task_statistics(task); 857 } 858 } 859 } 860 861 void rpc_signal_task(struct rpc_task *task) 862 { 863 struct rpc_wait_queue *queue; 864 865 if (!RPC_IS_ACTIVATED(task)) 866 return; 867 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 868 smp_mb__after_atomic(); 869 queue = READ_ONCE(task->tk_waitqueue); 870 if (queue) 871 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); 872 } 873 874 void rpc_exit(struct rpc_task *task, int status) 875 { 876 task->tk_status = status; 877 task->tk_action = rpc_exit_task; 878 rpc_wake_up_queued_task(task->tk_waitqueue, task); 879 } 880 EXPORT_SYMBOL_GPL(rpc_exit); 881 882 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 883 { 884 if (ops->rpc_release != NULL) 885 ops->rpc_release(calldata); 886 } 887 888 /* 889 * This is the RPC `scheduler' (or rather, the finite state machine). 890 */ 891 static void __rpc_execute(struct rpc_task *task) 892 { 893 struct rpc_wait_queue *queue; 894 int task_is_async = RPC_IS_ASYNC(task); 895 int status = 0; 896 897 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 898 task->tk_pid, task->tk_flags); 899 900 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 901 if (RPC_IS_QUEUED(task)) 902 return; 903 904 for (;;) { 905 void (*do_action)(struct rpc_task *); 906 907 /* 908 * Perform the next FSM step or a pending callback. 909 * 910 * tk_action may be NULL if the task has been killed. 911 * In particular, note that rpc_killall_tasks may 912 * do this at any time, so beware when dereferencing. 913 */ 914 do_action = task->tk_action; 915 if (task->tk_callback) { 916 do_action = task->tk_callback; 917 task->tk_callback = NULL; 918 } 919 if (!do_action) 920 break; 921 trace_rpc_task_run_action(task, do_action); 922 do_action(task); 923 924 /* 925 * Lockless check for whether task is sleeping or not. 926 */ 927 if (!RPC_IS_QUEUED(task)) 928 continue; 929 930 /* 931 * Signalled tasks should exit rather than sleep. 932 */ 933 if (RPC_SIGNALLED(task)) 934 rpc_exit(task, -ERESTARTSYS); 935 936 /* 937 * The queue->lock protects against races with 938 * rpc_make_runnable(). 939 * 940 * Note that once we clear RPC_TASK_RUNNING on an asynchronous 941 * rpc_task, rpc_make_runnable() can assign it to a 942 * different workqueue. We therefore cannot assume that the 943 * rpc_task pointer may still be dereferenced. 944 */ 945 queue = task->tk_waitqueue; 946 spin_lock(&queue->lock); 947 if (!RPC_IS_QUEUED(task)) { 948 spin_unlock(&queue->lock); 949 continue; 950 } 951 rpc_clear_running(task); 952 spin_unlock(&queue->lock); 953 if (task_is_async) 954 return; 955 956 /* sync task: sleep here */ 957 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); 958 status = out_of_line_wait_on_bit(&task->tk_runstate, 959 RPC_TASK_QUEUED, rpc_wait_bit_killable, 960 TASK_KILLABLE); 961 if (status < 0) { 962 /* 963 * When a sync task receives a signal, it exits with 964 * -ERESTARTSYS. In order to catch any callbacks that 965 * clean up after sleeping on some queue, we don't 966 * break the loop here, but go around once more. 967 */ 968 dprintk("RPC: %5u got signal\n", task->tk_pid); 969 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 970 rpc_exit(task, -ERESTARTSYS); 971 } 972 dprintk("RPC: %5u sync task resuming\n", task->tk_pid); 973 } 974 975 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, 976 task->tk_status); 977 /* Release all resources associated with the task */ 978 rpc_release_task(task); 979 } 980 981 /* 982 * User-visible entry point to the scheduler. 983 * 984 * This may be called recursively if e.g. an async NFS task updates 985 * the attributes and finds that dirty pages must be flushed. 986 * NOTE: Upon exit of this function the task is guaranteed to be 987 * released. In particular note that tk_release() will have 988 * been called, so your task memory may have been freed. 989 */ 990 void rpc_execute(struct rpc_task *task) 991 { 992 bool is_async = RPC_IS_ASYNC(task); 993 994 rpc_set_active(task); 995 rpc_make_runnable(rpciod_workqueue, task); 996 if (!is_async) 997 __rpc_execute(task); 998 } 999 1000 static void rpc_async_schedule(struct work_struct *work) 1001 { 1002 unsigned int pflags = memalloc_nofs_save(); 1003 1004 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 1005 memalloc_nofs_restore(pflags); 1006 } 1007 1008 /** 1009 * rpc_malloc - allocate RPC buffer resources 1010 * @task: RPC task 1011 * 1012 * A single memory region is allocated, which is split between the 1013 * RPC call and RPC reply that this task is being used for. When 1014 * this RPC is retired, the memory is released by calling rpc_free. 1015 * 1016 * To prevent rpciod from hanging, this allocator never sleeps, 1017 * returning -ENOMEM and suppressing warning if the request cannot 1018 * be serviced immediately. The caller can arrange to sleep in a 1019 * way that is safe for rpciod. 1020 * 1021 * Most requests are 'small' (under 2KiB) and can be serviced from a 1022 * mempool, ensuring that NFS reads and writes can always proceed, 1023 * and that there is good locality of reference for these buffers. 1024 */ 1025 int rpc_malloc(struct rpc_task *task) 1026 { 1027 struct rpc_rqst *rqst = task->tk_rqstp; 1028 size_t size = rqst->rq_callsize + rqst->rq_rcvsize; 1029 struct rpc_buffer *buf; 1030 gfp_t gfp = GFP_NOFS; 1031 1032 if (RPC_IS_SWAPPER(task)) 1033 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 1034 1035 size += sizeof(struct rpc_buffer); 1036 if (size <= RPC_BUFFER_MAXSIZE) 1037 buf = mempool_alloc(rpc_buffer_mempool, gfp); 1038 else 1039 buf = kmalloc(size, gfp); 1040 1041 if (!buf) 1042 return -ENOMEM; 1043 1044 buf->len = size; 1045 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", 1046 task->tk_pid, size, buf); 1047 rqst->rq_buffer = buf->data; 1048 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 1049 return 0; 1050 } 1051 EXPORT_SYMBOL_GPL(rpc_malloc); 1052 1053 /** 1054 * rpc_free - free RPC buffer resources allocated via rpc_malloc 1055 * @task: RPC task 1056 * 1057 */ 1058 void rpc_free(struct rpc_task *task) 1059 { 1060 void *buffer = task->tk_rqstp->rq_buffer; 1061 size_t size; 1062 struct rpc_buffer *buf; 1063 1064 buf = container_of(buffer, struct rpc_buffer, data); 1065 size = buf->len; 1066 1067 dprintk("RPC: freeing buffer of size %zu at %p\n", 1068 size, buf); 1069 1070 if (size <= RPC_BUFFER_MAXSIZE) 1071 mempool_free(buf, rpc_buffer_mempool); 1072 else 1073 kfree(buf); 1074 } 1075 EXPORT_SYMBOL_GPL(rpc_free); 1076 1077 /* 1078 * Creation and deletion of RPC task structures 1079 */ 1080 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 1081 { 1082 memset(task, 0, sizeof(*task)); 1083 atomic_set(&task->tk_count, 1); 1084 task->tk_flags = task_setup_data->flags; 1085 task->tk_ops = task_setup_data->callback_ops; 1086 task->tk_calldata = task_setup_data->callback_data; 1087 INIT_LIST_HEAD(&task->tk_task); 1088 1089 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 1090 task->tk_owner = current->tgid; 1091 1092 /* Initialize workqueue for async tasks */ 1093 task->tk_workqueue = task_setup_data->workqueue; 1094 1095 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, 1096 xprt_get(task_setup_data->rpc_xprt)); 1097 1098 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); 1099 1100 if (task->tk_ops->rpc_call_prepare != NULL) 1101 task->tk_action = rpc_prepare_task; 1102 1103 rpc_init_task_statistics(task); 1104 1105 dprintk("RPC: new task initialized, procpid %u\n", 1106 task_pid_nr(current)); 1107 } 1108 1109 static struct rpc_task * 1110 rpc_alloc_task(void) 1111 { 1112 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 1113 } 1114 1115 /* 1116 * Create a new task for the specified client. 1117 */ 1118 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 1119 { 1120 struct rpc_task *task = setup_data->task; 1121 unsigned short flags = 0; 1122 1123 if (task == NULL) { 1124 task = rpc_alloc_task(); 1125 flags = RPC_TASK_DYNAMIC; 1126 } 1127 1128 rpc_init_task(task, setup_data); 1129 task->tk_flags |= flags; 1130 dprintk("RPC: allocated task %p\n", task); 1131 return task; 1132 } 1133 1134 /* 1135 * rpc_free_task - release rpc task and perform cleanups 1136 * 1137 * Note that we free up the rpc_task _after_ rpc_release_calldata() 1138 * in order to work around a workqueue dependency issue. 1139 * 1140 * Tejun Heo states: 1141 * "Workqueue currently considers two work items to be the same if they're 1142 * on the same address and won't execute them concurrently - ie. it 1143 * makes a work item which is queued again while being executed wait 1144 * for the previous execution to complete. 1145 * 1146 * If a work function frees the work item, and then waits for an event 1147 * which should be performed by another work item and *that* work item 1148 * recycles the freed work item, it can create a false dependency loop. 1149 * There really is no reliable way to detect this short of verifying 1150 * every memory free." 1151 * 1152 */ 1153 static void rpc_free_task(struct rpc_task *task) 1154 { 1155 unsigned short tk_flags = task->tk_flags; 1156 1157 put_rpccred(task->tk_op_cred); 1158 rpc_release_calldata(task->tk_ops, task->tk_calldata); 1159 1160 if (tk_flags & RPC_TASK_DYNAMIC) { 1161 dprintk("RPC: %5u freeing task\n", task->tk_pid); 1162 mempool_free(task, rpc_task_mempool); 1163 } 1164 } 1165 1166 static void rpc_async_release(struct work_struct *work) 1167 { 1168 unsigned int pflags = memalloc_nofs_save(); 1169 1170 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 1171 memalloc_nofs_restore(pflags); 1172 } 1173 1174 static void rpc_release_resources_task(struct rpc_task *task) 1175 { 1176 xprt_release(task); 1177 if (task->tk_msg.rpc_cred) { 1178 put_cred(task->tk_msg.rpc_cred); 1179 task->tk_msg.rpc_cred = NULL; 1180 } 1181 rpc_task_release_client(task); 1182 } 1183 1184 static void rpc_final_put_task(struct rpc_task *task, 1185 struct workqueue_struct *q) 1186 { 1187 if (q != NULL) { 1188 INIT_WORK(&task->u.tk_work, rpc_async_release); 1189 queue_work(q, &task->u.tk_work); 1190 } else 1191 rpc_free_task(task); 1192 } 1193 1194 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) 1195 { 1196 if (atomic_dec_and_test(&task->tk_count)) { 1197 rpc_release_resources_task(task); 1198 rpc_final_put_task(task, q); 1199 } 1200 } 1201 1202 void rpc_put_task(struct rpc_task *task) 1203 { 1204 rpc_do_put_task(task, NULL); 1205 } 1206 EXPORT_SYMBOL_GPL(rpc_put_task); 1207 1208 void rpc_put_task_async(struct rpc_task *task) 1209 { 1210 rpc_do_put_task(task, task->tk_workqueue); 1211 } 1212 EXPORT_SYMBOL_GPL(rpc_put_task_async); 1213 1214 static void rpc_release_task(struct rpc_task *task) 1215 { 1216 dprintk("RPC: %5u release task\n", task->tk_pid); 1217 1218 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 1219 1220 rpc_release_resources_task(task); 1221 1222 /* 1223 * Note: at this point we have been removed from rpc_clnt->cl_tasks, 1224 * so it should be safe to use task->tk_count as a test for whether 1225 * or not any other processes still hold references to our rpc_task. 1226 */ 1227 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { 1228 /* Wake up anyone who may be waiting for task completion */ 1229 if (!rpc_complete_task(task)) 1230 return; 1231 } else { 1232 if (!atomic_dec_and_test(&task->tk_count)) 1233 return; 1234 } 1235 rpc_final_put_task(task, task->tk_workqueue); 1236 } 1237 1238 int rpciod_up(void) 1239 { 1240 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 1241 } 1242 1243 void rpciod_down(void) 1244 { 1245 module_put(THIS_MODULE); 1246 } 1247 1248 /* 1249 * Start up the rpciod workqueue. 1250 */ 1251 static int rpciod_start(void) 1252 { 1253 struct workqueue_struct *wq; 1254 1255 /* 1256 * Create the rpciod thread and wait for it to start. 1257 */ 1258 dprintk("RPC: creating workqueue rpciod\n"); 1259 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1260 if (!wq) 1261 goto out_failed; 1262 rpciod_workqueue = wq; 1263 /* Note: highpri because network receive is latency sensitive */ 1264 wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0); 1265 if (!wq) 1266 goto free_rpciod; 1267 xprtiod_workqueue = wq; 1268 return 1; 1269 free_rpciod: 1270 wq = rpciod_workqueue; 1271 rpciod_workqueue = NULL; 1272 destroy_workqueue(wq); 1273 out_failed: 1274 return 0; 1275 } 1276 1277 static void rpciod_stop(void) 1278 { 1279 struct workqueue_struct *wq = NULL; 1280 1281 if (rpciod_workqueue == NULL) 1282 return; 1283 dprintk("RPC: destroying workqueue rpciod\n"); 1284 1285 wq = rpciod_workqueue; 1286 rpciod_workqueue = NULL; 1287 destroy_workqueue(wq); 1288 wq = xprtiod_workqueue; 1289 xprtiod_workqueue = NULL; 1290 destroy_workqueue(wq); 1291 } 1292 1293 void 1294 rpc_destroy_mempool(void) 1295 { 1296 rpciod_stop(); 1297 mempool_destroy(rpc_buffer_mempool); 1298 mempool_destroy(rpc_task_mempool); 1299 kmem_cache_destroy(rpc_task_slabp); 1300 kmem_cache_destroy(rpc_buffer_slabp); 1301 rpc_destroy_wait_queue(&delay_queue); 1302 } 1303 1304 int 1305 rpc_init_mempool(void) 1306 { 1307 /* 1308 * The following is not strictly a mempool initialisation, 1309 * but there is no harm in doing it here 1310 */ 1311 rpc_init_wait_queue(&delay_queue, "delayq"); 1312 if (!rpciod_start()) 1313 goto err_nomem; 1314 1315 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1316 sizeof(struct rpc_task), 1317 0, SLAB_HWCACHE_ALIGN, 1318 NULL); 1319 if (!rpc_task_slabp) 1320 goto err_nomem; 1321 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1322 RPC_BUFFER_MAXSIZE, 1323 0, SLAB_HWCACHE_ALIGN, 1324 NULL); 1325 if (!rpc_buffer_slabp) 1326 goto err_nomem; 1327 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 1328 rpc_task_slabp); 1329 if (!rpc_task_mempool) 1330 goto err_nomem; 1331 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 1332 rpc_buffer_slabp); 1333 if (!rpc_buffer_mempool) 1334 goto err_nomem; 1335 return 0; 1336 err_nomem: 1337 rpc_destroy_mempool(); 1338 return -ENOMEM; 1339 } 1340