1 /* 2 * linux/net/sunrpc/sched.c 3 * 4 * Scheduling for synchronous and asynchronous RPC requests. 5 * 6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 7 * 8 * TCP NFS related read + write fixes 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 10 */ 11 12 #include <linux/module.h> 13 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/slab.h> 17 #include <linux/mempool.h> 18 #include <linux/smp.h> 19 #include <linux/spinlock.h> 20 #include <linux/mutex.h> 21 #include <linux/freezer.h> 22 #include <linux/sched/mm.h> 23 24 #include <linux/sunrpc/clnt.h> 25 26 #include "sunrpc.h" 27 28 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 29 #define RPCDBG_FACILITY RPCDBG_SCHED 30 #endif 31 32 #define CREATE_TRACE_POINTS 33 #include <trace/events/sunrpc.h> 34 35 /* 36 * RPC slabs and memory pools 37 */ 38 #define RPC_BUFFER_MAXSIZE (2048) 39 #define RPC_BUFFER_POOLSIZE (8) 40 #define RPC_TASK_POOLSIZE (8) 41 static struct kmem_cache *rpc_task_slabp __read_mostly; 42 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 43 static mempool_t *rpc_task_mempool __read_mostly; 44 static mempool_t *rpc_buffer_mempool __read_mostly; 45 46 static void rpc_async_schedule(struct work_struct *); 47 static void rpc_release_task(struct rpc_task *task); 48 static void __rpc_queue_timer_fn(struct timer_list *t); 49 50 /* 51 * RPC tasks sit here while waiting for conditions to improve. 52 */ 53 static struct rpc_wait_queue delay_queue; 54 55 /* 56 * rpciod-related stuff 57 */ 58 struct workqueue_struct *rpciod_workqueue __read_mostly; 59 struct workqueue_struct *xprtiod_workqueue __read_mostly; 60 61 /* 62 * Disable the timer for a given RPC task. Should be called with 63 * queue->lock and bh_disabled in order to avoid races within 64 * rpc_run_timer(). 65 */ 66 static void 67 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 68 { 69 if (task->tk_timeout == 0) 70 return; 71 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 72 task->tk_timeout = 0; 73 list_del(&task->u.tk_wait.timer_list); 74 if (list_empty(&queue->timer_list.list)) 75 del_timer(&queue->timer_list.timer); 76 } 77 78 static void 79 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 80 { 81 queue->timer_list.expires = expires; 82 mod_timer(&queue->timer_list.timer, expires); 83 } 84 85 /* 86 * Set up a timer for the current task. 87 */ 88 static void 89 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 90 { 91 if (!task->tk_timeout) 92 return; 93 94 dprintk("RPC: %5u setting alarm for %u ms\n", 95 task->tk_pid, jiffies_to_msecs(task->tk_timeout)); 96 97 task->u.tk_wait.expires = jiffies + task->tk_timeout; 98 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) 99 rpc_set_queue_timer(queue, task->u.tk_wait.expires); 100 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 101 } 102 103 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 104 { 105 if (queue->priority != priority) { 106 queue->priority = priority; 107 queue->nr = 1U << priority; 108 } 109 } 110 111 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 112 { 113 rpc_set_waitqueue_priority(queue, queue->maxpriority); 114 } 115 116 /* 117 * Add a request to a queue list 118 */ 119 static void 120 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) 121 { 122 struct rpc_task *t; 123 124 list_for_each_entry(t, q, u.tk_wait.list) { 125 if (t->tk_owner == task->tk_owner) { 126 list_add_tail(&task->u.tk_wait.links, 127 &t->u.tk_wait.links); 128 /* Cache the queue head in task->u.tk_wait.list */ 129 task->u.tk_wait.list.next = q; 130 task->u.tk_wait.list.prev = NULL; 131 return; 132 } 133 } 134 INIT_LIST_HEAD(&task->u.tk_wait.links); 135 list_add_tail(&task->u.tk_wait.list, q); 136 } 137 138 /* 139 * Remove request from a queue list 140 */ 141 static void 142 __rpc_list_dequeue_task(struct rpc_task *task) 143 { 144 struct list_head *q; 145 struct rpc_task *t; 146 147 if (task->u.tk_wait.list.prev == NULL) { 148 list_del(&task->u.tk_wait.links); 149 return; 150 } 151 if (!list_empty(&task->u.tk_wait.links)) { 152 t = list_first_entry(&task->u.tk_wait.links, 153 struct rpc_task, 154 u.tk_wait.links); 155 /* Assume __rpc_list_enqueue_task() cached the queue head */ 156 q = t->u.tk_wait.list.next; 157 list_add_tail(&t->u.tk_wait.list, q); 158 list_del(&task->u.tk_wait.links); 159 } 160 list_del(&task->u.tk_wait.list); 161 } 162 163 /* 164 * Add new request to a priority queue. 165 */ 166 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, 167 struct rpc_task *task, 168 unsigned char queue_priority) 169 { 170 if (unlikely(queue_priority > queue->maxpriority)) 171 queue_priority = queue->maxpriority; 172 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); 173 } 174 175 /* 176 * Add new request to wait queue. 177 * 178 * Swapper tasks always get inserted at the head of the queue. 179 * This should avoid many nasty memory deadlocks and hopefully 180 * improve overall performance. 181 * Everyone else gets appended to the queue to ensure proper FIFO behavior. 182 */ 183 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, 184 struct rpc_task *task, 185 unsigned char queue_priority) 186 { 187 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 188 if (RPC_IS_QUEUED(task)) 189 return; 190 191 if (RPC_IS_PRIORITY(queue)) 192 __rpc_add_wait_queue_priority(queue, task, queue_priority); 193 else if (RPC_IS_SWAPPER(task)) 194 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 195 else 196 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 197 task->tk_waitqueue = queue; 198 queue->qlen++; 199 /* barrier matches the read in rpc_wake_up_task_queue_locked() */ 200 smp_wmb(); 201 rpc_set_queued(task); 202 203 dprintk("RPC: %5u added to queue %p \"%s\"\n", 204 task->tk_pid, queue, rpc_qname(queue)); 205 } 206 207 /* 208 * Remove request from a priority queue. 209 */ 210 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 211 { 212 __rpc_list_dequeue_task(task); 213 } 214 215 /* 216 * Remove request from queue. 217 * Note: must be called with spin lock held. 218 */ 219 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 220 { 221 __rpc_disable_timer(queue, task); 222 if (RPC_IS_PRIORITY(queue)) 223 __rpc_remove_wait_queue_priority(task); 224 else 225 list_del(&task->u.tk_wait.list); 226 queue->qlen--; 227 dprintk("RPC: %5u removed from queue %p \"%s\"\n", 228 task->tk_pid, queue, rpc_qname(queue)); 229 } 230 231 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 232 { 233 int i; 234 235 spin_lock_init(&queue->lock); 236 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 237 INIT_LIST_HEAD(&queue->tasks[i]); 238 queue->maxpriority = nr_queues - 1; 239 rpc_reset_waitqueue_priority(queue); 240 queue->qlen = 0; 241 timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0); 242 INIT_LIST_HEAD(&queue->timer_list.list); 243 rpc_assign_waitqueue_name(queue, qname); 244 } 245 246 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 247 { 248 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 249 } 250 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); 251 252 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 253 { 254 __rpc_init_priority_wait_queue(queue, qname, 1); 255 } 256 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 257 258 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 259 { 260 del_timer_sync(&queue->timer_list.timer); 261 } 262 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 263 264 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 265 { 266 freezable_schedule_unsafe(); 267 if (signal_pending_state(mode, current)) 268 return -ERESTARTSYS; 269 return 0; 270 } 271 272 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) 273 static void rpc_task_set_debuginfo(struct rpc_task *task) 274 { 275 static atomic_t rpc_pid; 276 277 task->tk_pid = atomic_inc_return(&rpc_pid); 278 } 279 #else 280 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 281 { 282 } 283 #endif 284 285 static void rpc_set_active(struct rpc_task *task) 286 { 287 rpc_task_set_debuginfo(task); 288 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 289 trace_rpc_task_begin(task, NULL); 290 } 291 292 /* 293 * Mark an RPC call as having completed by clearing the 'active' bit 294 * and then waking up all tasks that were sleeping. 295 */ 296 static int rpc_complete_task(struct rpc_task *task) 297 { 298 void *m = &task->tk_runstate; 299 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); 300 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); 301 unsigned long flags; 302 int ret; 303 304 trace_rpc_task_complete(task, NULL); 305 306 spin_lock_irqsave(&wq->lock, flags); 307 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 308 ret = atomic_dec_and_test(&task->tk_count); 309 if (waitqueue_active(wq)) 310 __wake_up_locked_key(wq, TASK_NORMAL, &k); 311 spin_unlock_irqrestore(&wq->lock, flags); 312 return ret; 313 } 314 315 /* 316 * Allow callers to wait for completion of an RPC call 317 * 318 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() 319 * to enforce taking of the wq->lock and hence avoid races with 320 * rpc_complete_task(). 321 */ 322 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) 323 { 324 if (action == NULL) 325 action = rpc_wait_bit_killable; 326 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 327 action, TASK_KILLABLE); 328 } 329 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 330 331 /* 332 * Make an RPC task runnable. 333 * 334 * Note: If the task is ASYNC, and is being made runnable after sitting on an 335 * rpc_wait_queue, this must be called with the queue spinlock held to protect 336 * the wait queue operation. 337 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), 338 * which is needed to ensure that __rpc_execute() doesn't loop (due to the 339 * lockless RPC_IS_QUEUED() test) before we've had a chance to test 340 * the RPC_TASK_RUNNING flag. 341 */ 342 static void rpc_make_runnable(struct workqueue_struct *wq, 343 struct rpc_task *task) 344 { 345 bool need_wakeup = !rpc_test_and_set_running(task); 346 347 rpc_clear_queued(task); 348 if (!need_wakeup) 349 return; 350 if (RPC_IS_ASYNC(task)) { 351 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 352 queue_work(wq, &task->u.tk_work); 353 } else 354 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 355 } 356 357 /* 358 * Prepare for sleeping on a wait queue. 359 * By always appending tasks to the list we ensure FIFO behavior. 360 * NB: An RPC task will only receive interrupt-driven events as long 361 * as it's on a wait queue. 362 */ 363 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 364 struct rpc_task *task, 365 rpc_action action, 366 unsigned char queue_priority) 367 { 368 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 369 task->tk_pid, rpc_qname(q), jiffies); 370 371 trace_rpc_task_sleep(task, q); 372 373 __rpc_add_wait_queue(q, task, queue_priority); 374 375 WARN_ON_ONCE(task->tk_callback != NULL); 376 task->tk_callback = action; 377 __rpc_add_timer(q, task); 378 } 379 380 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 381 rpc_action action) 382 { 383 /* We shouldn't ever put an inactive task to sleep */ 384 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); 385 if (!RPC_IS_ACTIVATED(task)) { 386 task->tk_status = -EIO; 387 rpc_put_task_async(task); 388 return; 389 } 390 391 /* 392 * Protect the queue operations. 393 */ 394 spin_lock_bh(&q->lock); 395 __rpc_sleep_on_priority(q, task, action, task->tk_priority); 396 spin_unlock_bh(&q->lock); 397 } 398 EXPORT_SYMBOL_GPL(rpc_sleep_on); 399 400 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 401 rpc_action action, int priority) 402 { 403 /* We shouldn't ever put an inactive task to sleep */ 404 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); 405 if (!RPC_IS_ACTIVATED(task)) { 406 task->tk_status = -EIO; 407 rpc_put_task_async(task); 408 return; 409 } 410 411 /* 412 * Protect the queue operations. 413 */ 414 spin_lock_bh(&q->lock); 415 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); 416 spin_unlock_bh(&q->lock); 417 } 418 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); 419 420 /** 421 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task 422 * @wq: workqueue on which to run task 423 * @queue: wait queue 424 * @task: task to be woken up 425 * 426 * Caller must hold queue->lock, and have cleared the task queued flag. 427 */ 428 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, 429 struct rpc_wait_queue *queue, 430 struct rpc_task *task) 431 { 432 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", 433 task->tk_pid, jiffies); 434 435 /* Has the task been executed yet? If not, we cannot wake it up! */ 436 if (!RPC_IS_ACTIVATED(task)) { 437 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 438 return; 439 } 440 441 trace_rpc_task_wakeup(task, queue); 442 443 __rpc_remove_wait_queue(queue, task); 444 445 rpc_make_runnable(wq, task); 446 447 dprintk("RPC: __rpc_wake_up_task done\n"); 448 } 449 450 /* 451 * Wake up a queued task while the queue lock is being held 452 */ 453 static struct rpc_task * 454 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, 455 struct rpc_wait_queue *queue, struct rpc_task *task, 456 bool (*action)(struct rpc_task *, void *), void *data) 457 { 458 if (RPC_IS_QUEUED(task)) { 459 smp_rmb(); 460 if (task->tk_waitqueue == queue) { 461 if (action == NULL || action(task, data)) { 462 __rpc_do_wake_up_task_on_wq(wq, queue, task); 463 return task; 464 } 465 } 466 } 467 return NULL; 468 } 469 470 static void 471 rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq, 472 struct rpc_wait_queue *queue, struct rpc_task *task) 473 { 474 rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL); 475 } 476 477 /* 478 * Wake up a queued task while the queue lock is being held 479 */ 480 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 481 { 482 rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task); 483 } 484 485 /* 486 * Wake up a task on a specific queue 487 */ 488 void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, 489 struct rpc_wait_queue *queue, 490 struct rpc_task *task) 491 { 492 if (!RPC_IS_QUEUED(task)) 493 return; 494 spin_lock_bh(&queue->lock); 495 rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); 496 spin_unlock_bh(&queue->lock); 497 } 498 499 /* 500 * Wake up a task on a specific queue 501 */ 502 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 503 { 504 if (!RPC_IS_QUEUED(task)) 505 return; 506 spin_lock_bh(&queue->lock); 507 rpc_wake_up_task_queue_locked(queue, task); 508 spin_unlock_bh(&queue->lock); 509 } 510 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 511 512 static bool rpc_task_action_set_status(struct rpc_task *task, void *status) 513 { 514 task->tk_status = *(int *)status; 515 return true; 516 } 517 518 static void 519 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, 520 struct rpc_task *task, int status) 521 { 522 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 523 task, rpc_task_action_set_status, &status); 524 } 525 526 /** 527 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status 528 * @queue: pointer to rpc_wait_queue 529 * @task: pointer to rpc_task 530 * @status: integer error value 531 * 532 * If @task is queued on @queue, then it is woken up, and @task->tk_status is 533 * set to the value of @status. 534 */ 535 void 536 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, 537 struct rpc_task *task, int status) 538 { 539 if (!RPC_IS_QUEUED(task)) 540 return; 541 spin_lock_bh(&queue->lock); 542 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 543 spin_unlock_bh(&queue->lock); 544 } 545 546 /* 547 * Wake up the next task on a priority queue. 548 */ 549 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) 550 { 551 struct list_head *q; 552 struct rpc_task *task; 553 554 /* 555 * Service a batch of tasks from a single owner. 556 */ 557 q = &queue->tasks[queue->priority]; 558 if (!list_empty(q) && --queue->nr) { 559 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 560 goto out; 561 } 562 563 /* 564 * Service the next queue. 565 */ 566 do { 567 if (q == &queue->tasks[0]) 568 q = &queue->tasks[queue->maxpriority]; 569 else 570 q = q - 1; 571 if (!list_empty(q)) { 572 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 573 goto new_queue; 574 } 575 } while (q != &queue->tasks[queue->priority]); 576 577 rpc_reset_waitqueue_priority(queue); 578 return NULL; 579 580 new_queue: 581 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 582 out: 583 return task; 584 } 585 586 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) 587 { 588 if (RPC_IS_PRIORITY(queue)) 589 return __rpc_find_next_queued_priority(queue); 590 if (!list_empty(&queue->tasks[0])) 591 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); 592 return NULL; 593 } 594 595 /* 596 * Wake up the first task on the wait queue. 597 */ 598 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, 599 struct rpc_wait_queue *queue, 600 bool (*func)(struct rpc_task *, void *), void *data) 601 { 602 struct rpc_task *task = NULL; 603 604 dprintk("RPC: wake_up_first(%p \"%s\")\n", 605 queue, rpc_qname(queue)); 606 spin_lock_bh(&queue->lock); 607 task = __rpc_find_next_queued(queue); 608 if (task != NULL) 609 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, 610 task, func, data); 611 spin_unlock_bh(&queue->lock); 612 613 return task; 614 } 615 616 /* 617 * Wake up the first task on the wait queue. 618 */ 619 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, 620 bool (*func)(struct rpc_task *, void *), void *data) 621 { 622 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data); 623 } 624 EXPORT_SYMBOL_GPL(rpc_wake_up_first); 625 626 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) 627 { 628 return true; 629 } 630 631 /* 632 * Wake up the next task on the wait queue. 633 */ 634 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) 635 { 636 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); 637 } 638 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 639 640 /** 641 * rpc_wake_up - wake up all rpc_tasks 642 * @queue: rpc_wait_queue on which the tasks are sleeping 643 * 644 * Grabs queue->lock 645 */ 646 void rpc_wake_up(struct rpc_wait_queue *queue) 647 { 648 struct list_head *head; 649 650 spin_lock_bh(&queue->lock); 651 head = &queue->tasks[queue->maxpriority]; 652 for (;;) { 653 while (!list_empty(head)) { 654 struct rpc_task *task; 655 task = list_first_entry(head, 656 struct rpc_task, 657 u.tk_wait.list); 658 rpc_wake_up_task_queue_locked(queue, task); 659 } 660 if (head == &queue->tasks[0]) 661 break; 662 head--; 663 } 664 spin_unlock_bh(&queue->lock); 665 } 666 EXPORT_SYMBOL_GPL(rpc_wake_up); 667 668 /** 669 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 670 * @queue: rpc_wait_queue on which the tasks are sleeping 671 * @status: status value to set 672 * 673 * Grabs queue->lock 674 */ 675 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 676 { 677 struct list_head *head; 678 679 spin_lock_bh(&queue->lock); 680 head = &queue->tasks[queue->maxpriority]; 681 for (;;) { 682 while (!list_empty(head)) { 683 struct rpc_task *task; 684 task = list_first_entry(head, 685 struct rpc_task, 686 u.tk_wait.list); 687 task->tk_status = status; 688 rpc_wake_up_task_queue_locked(queue, task); 689 } 690 if (head == &queue->tasks[0]) 691 break; 692 head--; 693 } 694 spin_unlock_bh(&queue->lock); 695 } 696 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 697 698 static void __rpc_queue_timer_fn(struct timer_list *t) 699 { 700 struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer); 701 struct rpc_task *task, *n; 702 unsigned long expires, now, timeo; 703 704 spin_lock(&queue->lock); 705 expires = now = jiffies; 706 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 707 timeo = task->u.tk_wait.expires; 708 if (time_after_eq(now, timeo)) { 709 dprintk("RPC: %5u timeout\n", task->tk_pid); 710 task->tk_status = -ETIMEDOUT; 711 rpc_wake_up_task_queue_locked(queue, task); 712 continue; 713 } 714 if (expires == now || time_after(expires, timeo)) 715 expires = timeo; 716 } 717 if (!list_empty(&queue->timer_list.list)) 718 rpc_set_queue_timer(queue, expires); 719 spin_unlock(&queue->lock); 720 } 721 722 static void __rpc_atrun(struct rpc_task *task) 723 { 724 if (task->tk_status == -ETIMEDOUT) 725 task->tk_status = 0; 726 } 727 728 /* 729 * Run a task at a later time 730 */ 731 void rpc_delay(struct rpc_task *task, unsigned long delay) 732 { 733 task->tk_timeout = delay; 734 rpc_sleep_on(&delay_queue, task, __rpc_atrun); 735 } 736 EXPORT_SYMBOL_GPL(rpc_delay); 737 738 /* 739 * Helper to call task->tk_ops->rpc_call_prepare 740 */ 741 void rpc_prepare_task(struct rpc_task *task) 742 { 743 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 744 } 745 746 static void 747 rpc_init_task_statistics(struct rpc_task *task) 748 { 749 /* Initialize retry counters */ 750 task->tk_garb_retry = 2; 751 task->tk_cred_retry = 2; 752 task->tk_rebind_retry = 2; 753 754 /* starting timestamp */ 755 task->tk_start = ktime_get(); 756 } 757 758 static void 759 rpc_reset_task_statistics(struct rpc_task *task) 760 { 761 task->tk_timeouts = 0; 762 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); 763 764 rpc_init_task_statistics(task); 765 } 766 767 /* 768 * Helper that calls task->tk_ops->rpc_call_done if it exists 769 */ 770 void rpc_exit_task(struct rpc_task *task) 771 { 772 task->tk_action = NULL; 773 if (task->tk_ops->rpc_call_done != NULL) { 774 task->tk_ops->rpc_call_done(task, task->tk_calldata); 775 if (task->tk_action != NULL) { 776 WARN_ON(RPC_ASSASSINATED(task)); 777 /* Always release the RPC slot and buffer memory */ 778 xprt_release(task); 779 rpc_reset_task_statistics(task); 780 } 781 } 782 } 783 784 void rpc_exit(struct rpc_task *task, int status) 785 { 786 task->tk_status = status; 787 task->tk_action = rpc_exit_task; 788 rpc_wake_up_queued_task(task->tk_waitqueue, task); 789 } 790 EXPORT_SYMBOL_GPL(rpc_exit); 791 792 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 793 { 794 if (ops->rpc_release != NULL) 795 ops->rpc_release(calldata); 796 } 797 798 /* 799 * This is the RPC `scheduler' (or rather, the finite state machine). 800 */ 801 static void __rpc_execute(struct rpc_task *task) 802 { 803 struct rpc_wait_queue *queue; 804 int task_is_async = RPC_IS_ASYNC(task); 805 int status = 0; 806 807 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 808 task->tk_pid, task->tk_flags); 809 810 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 811 if (RPC_IS_QUEUED(task)) 812 return; 813 814 for (;;) { 815 void (*do_action)(struct rpc_task *); 816 817 /* 818 * Perform the next FSM step or a pending callback. 819 * 820 * tk_action may be NULL if the task has been killed. 821 * In particular, note that rpc_killall_tasks may 822 * do this at any time, so beware when dereferencing. 823 */ 824 do_action = task->tk_action; 825 if (task->tk_callback) { 826 do_action = task->tk_callback; 827 task->tk_callback = NULL; 828 } 829 if (!do_action) 830 break; 831 trace_rpc_task_run_action(task, do_action); 832 do_action(task); 833 834 /* 835 * Lockless check for whether task is sleeping or not. 836 */ 837 if (!RPC_IS_QUEUED(task)) 838 continue; 839 /* 840 * The queue->lock protects against races with 841 * rpc_make_runnable(). 842 * 843 * Note that once we clear RPC_TASK_RUNNING on an asynchronous 844 * rpc_task, rpc_make_runnable() can assign it to a 845 * different workqueue. We therefore cannot assume that the 846 * rpc_task pointer may still be dereferenced. 847 */ 848 queue = task->tk_waitqueue; 849 spin_lock_bh(&queue->lock); 850 if (!RPC_IS_QUEUED(task)) { 851 spin_unlock_bh(&queue->lock); 852 continue; 853 } 854 rpc_clear_running(task); 855 spin_unlock_bh(&queue->lock); 856 if (task_is_async) 857 return; 858 859 /* sync task: sleep here */ 860 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); 861 status = out_of_line_wait_on_bit(&task->tk_runstate, 862 RPC_TASK_QUEUED, rpc_wait_bit_killable, 863 TASK_KILLABLE); 864 if (status == -ERESTARTSYS) { 865 /* 866 * When a sync task receives a signal, it exits with 867 * -ERESTARTSYS. In order to catch any callbacks that 868 * clean up after sleeping on some queue, we don't 869 * break the loop here, but go around once more. 870 */ 871 dprintk("RPC: %5u got signal\n", task->tk_pid); 872 task->tk_flags |= RPC_TASK_KILLED; 873 rpc_exit(task, -ERESTARTSYS); 874 } 875 dprintk("RPC: %5u sync task resuming\n", task->tk_pid); 876 } 877 878 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, 879 task->tk_status); 880 /* Release all resources associated with the task */ 881 rpc_release_task(task); 882 } 883 884 /* 885 * User-visible entry point to the scheduler. 886 * 887 * This may be called recursively if e.g. an async NFS task updates 888 * the attributes and finds that dirty pages must be flushed. 889 * NOTE: Upon exit of this function the task is guaranteed to be 890 * released. In particular note that tk_release() will have 891 * been called, so your task memory may have been freed. 892 */ 893 void rpc_execute(struct rpc_task *task) 894 { 895 bool is_async = RPC_IS_ASYNC(task); 896 897 rpc_set_active(task); 898 rpc_make_runnable(rpciod_workqueue, task); 899 if (!is_async) 900 __rpc_execute(task); 901 } 902 903 static void rpc_async_schedule(struct work_struct *work) 904 { 905 unsigned int pflags = memalloc_nofs_save(); 906 907 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 908 memalloc_nofs_restore(pflags); 909 } 910 911 /** 912 * rpc_malloc - allocate RPC buffer resources 913 * @task: RPC task 914 * 915 * A single memory region is allocated, which is split between the 916 * RPC call and RPC reply that this task is being used for. When 917 * this RPC is retired, the memory is released by calling rpc_free. 918 * 919 * To prevent rpciod from hanging, this allocator never sleeps, 920 * returning -ENOMEM and suppressing warning if the request cannot 921 * be serviced immediately. The caller can arrange to sleep in a 922 * way that is safe for rpciod. 923 * 924 * Most requests are 'small' (under 2KiB) and can be serviced from a 925 * mempool, ensuring that NFS reads and writes can always proceed, 926 * and that there is good locality of reference for these buffers. 927 */ 928 int rpc_malloc(struct rpc_task *task) 929 { 930 struct rpc_rqst *rqst = task->tk_rqstp; 931 size_t size = rqst->rq_callsize + rqst->rq_rcvsize; 932 struct rpc_buffer *buf; 933 gfp_t gfp = GFP_NOFS; 934 935 if (RPC_IS_SWAPPER(task)) 936 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 937 938 size += sizeof(struct rpc_buffer); 939 if (size <= RPC_BUFFER_MAXSIZE) 940 buf = mempool_alloc(rpc_buffer_mempool, gfp); 941 else 942 buf = kmalloc(size, gfp); 943 944 if (!buf) 945 return -ENOMEM; 946 947 buf->len = size; 948 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", 949 task->tk_pid, size, buf); 950 rqst->rq_buffer = buf->data; 951 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 952 return 0; 953 } 954 EXPORT_SYMBOL_GPL(rpc_malloc); 955 956 /** 957 * rpc_free - free RPC buffer resources allocated via rpc_malloc 958 * @task: RPC task 959 * 960 */ 961 void rpc_free(struct rpc_task *task) 962 { 963 void *buffer = task->tk_rqstp->rq_buffer; 964 size_t size; 965 struct rpc_buffer *buf; 966 967 buf = container_of(buffer, struct rpc_buffer, data); 968 size = buf->len; 969 970 dprintk("RPC: freeing buffer of size %zu at %p\n", 971 size, buf); 972 973 if (size <= RPC_BUFFER_MAXSIZE) 974 mempool_free(buf, rpc_buffer_mempool); 975 else 976 kfree(buf); 977 } 978 EXPORT_SYMBOL_GPL(rpc_free); 979 980 /* 981 * Creation and deletion of RPC task structures 982 */ 983 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 984 { 985 memset(task, 0, sizeof(*task)); 986 atomic_set(&task->tk_count, 1); 987 task->tk_flags = task_setup_data->flags; 988 task->tk_ops = task_setup_data->callback_ops; 989 task->tk_calldata = task_setup_data->callback_data; 990 INIT_LIST_HEAD(&task->tk_task); 991 992 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 993 task->tk_owner = current->tgid; 994 995 /* Initialize workqueue for async tasks */ 996 task->tk_workqueue = task_setup_data->workqueue; 997 998 task->tk_xprt = xprt_get(task_setup_data->rpc_xprt); 999 1000 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); 1001 1002 if (task->tk_ops->rpc_call_prepare != NULL) 1003 task->tk_action = rpc_prepare_task; 1004 1005 rpc_init_task_statistics(task); 1006 1007 dprintk("RPC: new task initialized, procpid %u\n", 1008 task_pid_nr(current)); 1009 } 1010 1011 static struct rpc_task * 1012 rpc_alloc_task(void) 1013 { 1014 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 1015 } 1016 1017 /* 1018 * Create a new task for the specified client. 1019 */ 1020 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 1021 { 1022 struct rpc_task *task = setup_data->task; 1023 unsigned short flags = 0; 1024 1025 if (task == NULL) { 1026 task = rpc_alloc_task(); 1027 flags = RPC_TASK_DYNAMIC; 1028 } 1029 1030 rpc_init_task(task, setup_data); 1031 task->tk_flags |= flags; 1032 dprintk("RPC: allocated task %p\n", task); 1033 return task; 1034 } 1035 1036 /* 1037 * rpc_free_task - release rpc task and perform cleanups 1038 * 1039 * Note that we free up the rpc_task _after_ rpc_release_calldata() 1040 * in order to work around a workqueue dependency issue. 1041 * 1042 * Tejun Heo states: 1043 * "Workqueue currently considers two work items to be the same if they're 1044 * on the same address and won't execute them concurrently - ie. it 1045 * makes a work item which is queued again while being executed wait 1046 * for the previous execution to complete. 1047 * 1048 * If a work function frees the work item, and then waits for an event 1049 * which should be performed by another work item and *that* work item 1050 * recycles the freed work item, it can create a false dependency loop. 1051 * There really is no reliable way to detect this short of verifying 1052 * every memory free." 1053 * 1054 */ 1055 static void rpc_free_task(struct rpc_task *task) 1056 { 1057 unsigned short tk_flags = task->tk_flags; 1058 1059 put_rpccred(task->tk_op_cred); 1060 rpc_release_calldata(task->tk_ops, task->tk_calldata); 1061 1062 if (tk_flags & RPC_TASK_DYNAMIC) { 1063 dprintk("RPC: %5u freeing task\n", task->tk_pid); 1064 mempool_free(task, rpc_task_mempool); 1065 } 1066 } 1067 1068 static void rpc_async_release(struct work_struct *work) 1069 { 1070 unsigned int pflags = memalloc_nofs_save(); 1071 1072 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 1073 memalloc_nofs_restore(pflags); 1074 } 1075 1076 static void rpc_release_resources_task(struct rpc_task *task) 1077 { 1078 xprt_release(task); 1079 if (task->tk_msg.rpc_cred) { 1080 put_cred(task->tk_msg.rpc_cred); 1081 task->tk_msg.rpc_cred = NULL; 1082 } 1083 rpc_task_release_client(task); 1084 } 1085 1086 static void rpc_final_put_task(struct rpc_task *task, 1087 struct workqueue_struct *q) 1088 { 1089 if (q != NULL) { 1090 INIT_WORK(&task->u.tk_work, rpc_async_release); 1091 queue_work(q, &task->u.tk_work); 1092 } else 1093 rpc_free_task(task); 1094 } 1095 1096 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) 1097 { 1098 if (atomic_dec_and_test(&task->tk_count)) { 1099 rpc_release_resources_task(task); 1100 rpc_final_put_task(task, q); 1101 } 1102 } 1103 1104 void rpc_put_task(struct rpc_task *task) 1105 { 1106 rpc_do_put_task(task, NULL); 1107 } 1108 EXPORT_SYMBOL_GPL(rpc_put_task); 1109 1110 void rpc_put_task_async(struct rpc_task *task) 1111 { 1112 rpc_do_put_task(task, task->tk_workqueue); 1113 } 1114 EXPORT_SYMBOL_GPL(rpc_put_task_async); 1115 1116 static void rpc_release_task(struct rpc_task *task) 1117 { 1118 dprintk("RPC: %5u release task\n", task->tk_pid); 1119 1120 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 1121 1122 rpc_release_resources_task(task); 1123 1124 /* 1125 * Note: at this point we have been removed from rpc_clnt->cl_tasks, 1126 * so it should be safe to use task->tk_count as a test for whether 1127 * or not any other processes still hold references to our rpc_task. 1128 */ 1129 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { 1130 /* Wake up anyone who may be waiting for task completion */ 1131 if (!rpc_complete_task(task)) 1132 return; 1133 } else { 1134 if (!atomic_dec_and_test(&task->tk_count)) 1135 return; 1136 } 1137 rpc_final_put_task(task, task->tk_workqueue); 1138 } 1139 1140 int rpciod_up(void) 1141 { 1142 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 1143 } 1144 1145 void rpciod_down(void) 1146 { 1147 module_put(THIS_MODULE); 1148 } 1149 1150 /* 1151 * Start up the rpciod workqueue. 1152 */ 1153 static int rpciod_start(void) 1154 { 1155 struct workqueue_struct *wq; 1156 1157 /* 1158 * Create the rpciod thread and wait for it to start. 1159 */ 1160 dprintk("RPC: creating workqueue rpciod\n"); 1161 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1162 if (!wq) 1163 goto out_failed; 1164 rpciod_workqueue = wq; 1165 /* Note: highpri because network receive is latency sensitive */ 1166 wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0); 1167 if (!wq) 1168 goto free_rpciod; 1169 xprtiod_workqueue = wq; 1170 return 1; 1171 free_rpciod: 1172 wq = rpciod_workqueue; 1173 rpciod_workqueue = NULL; 1174 destroy_workqueue(wq); 1175 out_failed: 1176 return 0; 1177 } 1178 1179 static void rpciod_stop(void) 1180 { 1181 struct workqueue_struct *wq = NULL; 1182 1183 if (rpciod_workqueue == NULL) 1184 return; 1185 dprintk("RPC: destroying workqueue rpciod\n"); 1186 1187 wq = rpciod_workqueue; 1188 rpciod_workqueue = NULL; 1189 destroy_workqueue(wq); 1190 wq = xprtiod_workqueue; 1191 xprtiod_workqueue = NULL; 1192 destroy_workqueue(wq); 1193 } 1194 1195 void 1196 rpc_destroy_mempool(void) 1197 { 1198 rpciod_stop(); 1199 mempool_destroy(rpc_buffer_mempool); 1200 mempool_destroy(rpc_task_mempool); 1201 kmem_cache_destroy(rpc_task_slabp); 1202 kmem_cache_destroy(rpc_buffer_slabp); 1203 rpc_destroy_wait_queue(&delay_queue); 1204 } 1205 1206 int 1207 rpc_init_mempool(void) 1208 { 1209 /* 1210 * The following is not strictly a mempool initialisation, 1211 * but there is no harm in doing it here 1212 */ 1213 rpc_init_wait_queue(&delay_queue, "delayq"); 1214 if (!rpciod_start()) 1215 goto err_nomem; 1216 1217 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1218 sizeof(struct rpc_task), 1219 0, SLAB_HWCACHE_ALIGN, 1220 NULL); 1221 if (!rpc_task_slabp) 1222 goto err_nomem; 1223 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1224 RPC_BUFFER_MAXSIZE, 1225 0, SLAB_HWCACHE_ALIGN, 1226 NULL); 1227 if (!rpc_buffer_slabp) 1228 goto err_nomem; 1229 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 1230 rpc_task_slabp); 1231 if (!rpc_task_mempool) 1232 goto err_nomem; 1233 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 1234 rpc_buffer_slabp); 1235 if (!rpc_buffer_mempool) 1236 goto err_nomem; 1237 return 0; 1238 err_nomem: 1239 rpc_destroy_mempool(); 1240 return -ENOMEM; 1241 } 1242