1 /* 2 * linux/net/sunrpc/sched.c 3 * 4 * Scheduling for synchronous and asynchronous RPC requests. 5 * 6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 7 * 8 * TCP NFS related read + write fixes 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 10 */ 11 12 #include <linux/module.h> 13 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/slab.h> 17 #include <linux/mempool.h> 18 #include <linux/smp.h> 19 #include <linux/smp_lock.h> 20 #include <linux/spinlock.h> 21 #include <linux/mutex.h> 22 23 #include <linux/sunrpc/clnt.h> 24 25 #ifdef RPC_DEBUG 26 #define RPCDBG_FACILITY RPCDBG_SCHED 27 #define RPC_TASK_MAGIC_ID 0xf00baa 28 #endif 29 30 /* 31 * RPC slabs and memory pools 32 */ 33 #define RPC_BUFFER_MAXSIZE (2048) 34 #define RPC_BUFFER_POOLSIZE (8) 35 #define RPC_TASK_POOLSIZE (8) 36 static struct kmem_cache *rpc_task_slabp __read_mostly; 37 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 38 static mempool_t *rpc_task_mempool __read_mostly; 39 static mempool_t *rpc_buffer_mempool __read_mostly; 40 41 static void rpc_async_schedule(struct work_struct *); 42 static void rpc_release_task(struct rpc_task *task); 43 static void __rpc_queue_timer_fn(unsigned long ptr); 44 45 /* 46 * RPC tasks sit here while waiting for conditions to improve. 47 */ 48 static struct rpc_wait_queue delay_queue; 49 50 /* 51 * rpciod-related stuff 52 */ 53 struct workqueue_struct *rpciod_workqueue; 54 55 /* 56 * Disable the timer for a given RPC task. Should be called with 57 * queue->lock and bh_disabled in order to avoid races within 58 * rpc_run_timer(). 59 */ 60 static void 61 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 62 { 63 if (task->tk_timeout == 0) 64 return; 65 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 66 task->tk_timeout = 0; 67 list_del(&task->u.tk_wait.timer_list); 68 if (list_empty(&queue->timer_list.list)) 69 del_timer(&queue->timer_list.timer); 70 } 71 72 static void 73 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 74 { 75 queue->timer_list.expires = expires; 76 mod_timer(&queue->timer_list.timer, expires); 77 } 78 79 /* 80 * Set up a timer for the current task. 81 */ 82 static void 83 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 84 { 85 if (!task->tk_timeout) 86 return; 87 88 dprintk("RPC: %5u setting alarm for %lu ms\n", 89 task->tk_pid, task->tk_timeout * 1000 / HZ); 90 91 task->u.tk_wait.expires = jiffies + task->tk_timeout; 92 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) 93 rpc_set_queue_timer(queue, task->u.tk_wait.expires); 94 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 95 } 96 97 /* 98 * Add new request to a priority queue. 99 */ 100 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) 101 { 102 struct list_head *q; 103 struct rpc_task *t; 104 105 INIT_LIST_HEAD(&task->u.tk_wait.links); 106 q = &queue->tasks[task->tk_priority]; 107 if (unlikely(task->tk_priority > queue->maxpriority)) 108 q = &queue->tasks[queue->maxpriority]; 109 list_for_each_entry(t, q, u.tk_wait.list) { 110 if (t->tk_owner == task->tk_owner) { 111 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); 112 return; 113 } 114 } 115 list_add_tail(&task->u.tk_wait.list, q); 116 } 117 118 /* 119 * Add new request to wait queue. 120 * 121 * Swapper tasks always get inserted at the head of the queue. 122 * This should avoid many nasty memory deadlocks and hopefully 123 * improve overall performance. 124 * Everyone else gets appended to the queue to ensure proper FIFO behavior. 125 */ 126 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 127 { 128 BUG_ON (RPC_IS_QUEUED(task)); 129 130 if (RPC_IS_PRIORITY(queue)) 131 __rpc_add_wait_queue_priority(queue, task); 132 else if (RPC_IS_SWAPPER(task)) 133 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 134 else 135 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 136 task->tk_waitqueue = queue; 137 queue->qlen++; 138 rpc_set_queued(task); 139 140 dprintk("RPC: %5u added to queue %p \"%s\"\n", 141 task->tk_pid, queue, rpc_qname(queue)); 142 } 143 144 /* 145 * Remove request from a priority queue. 146 */ 147 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 148 { 149 struct rpc_task *t; 150 151 if (!list_empty(&task->u.tk_wait.links)) { 152 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); 153 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); 154 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); 155 } 156 } 157 158 /* 159 * Remove request from queue. 160 * Note: must be called with spin lock held. 161 */ 162 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 163 { 164 __rpc_disable_timer(queue, task); 165 if (RPC_IS_PRIORITY(queue)) 166 __rpc_remove_wait_queue_priority(task); 167 list_del(&task->u.tk_wait.list); 168 queue->qlen--; 169 dprintk("RPC: %5u removed from queue %p \"%s\"\n", 170 task->tk_pid, queue, rpc_qname(queue)); 171 } 172 173 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 174 { 175 queue->priority = priority; 176 queue->count = 1 << (priority * 2); 177 } 178 179 static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) 180 { 181 queue->owner = pid; 182 queue->nr = RPC_BATCH_COUNT; 183 } 184 185 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 186 { 187 rpc_set_waitqueue_priority(queue, queue->maxpriority); 188 rpc_set_waitqueue_owner(queue, 0); 189 } 190 191 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 192 { 193 int i; 194 195 spin_lock_init(&queue->lock); 196 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 197 INIT_LIST_HEAD(&queue->tasks[i]); 198 queue->maxpriority = nr_queues - 1; 199 rpc_reset_waitqueue_priority(queue); 200 queue->qlen = 0; 201 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); 202 INIT_LIST_HEAD(&queue->timer_list.list); 203 #ifdef RPC_DEBUG 204 queue->name = qname; 205 #endif 206 } 207 208 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 209 { 210 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 211 } 212 213 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 214 { 215 __rpc_init_priority_wait_queue(queue, qname, 1); 216 } 217 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 218 219 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 220 { 221 del_timer_sync(&queue->timer_list.timer); 222 } 223 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 224 225 static int rpc_wait_bit_killable(void *word) 226 { 227 if (fatal_signal_pending(current)) 228 return -ERESTARTSYS; 229 schedule(); 230 return 0; 231 } 232 233 #ifdef RPC_DEBUG 234 static void rpc_task_set_debuginfo(struct rpc_task *task) 235 { 236 static atomic_t rpc_pid; 237 238 task->tk_magic = RPC_TASK_MAGIC_ID; 239 task->tk_pid = atomic_inc_return(&rpc_pid); 240 } 241 #else 242 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 243 { 244 } 245 #endif 246 247 static void rpc_set_active(struct rpc_task *task) 248 { 249 struct rpc_clnt *clnt; 250 if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) 251 return; 252 rpc_task_set_debuginfo(task); 253 /* Add to global list of all tasks */ 254 clnt = task->tk_client; 255 if (clnt != NULL) { 256 spin_lock(&clnt->cl_lock); 257 list_add_tail(&task->tk_task, &clnt->cl_tasks); 258 spin_unlock(&clnt->cl_lock); 259 } 260 } 261 262 /* 263 * Mark an RPC call as having completed by clearing the 'active' bit 264 */ 265 static void rpc_mark_complete_task(struct rpc_task *task) 266 { 267 smp_mb__before_clear_bit(); 268 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 269 smp_mb__after_clear_bit(); 270 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); 271 } 272 273 /* 274 * Allow callers to wait for completion of an RPC call 275 */ 276 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) 277 { 278 if (action == NULL) 279 action = rpc_wait_bit_killable; 280 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 281 action, TASK_KILLABLE); 282 } 283 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 284 285 /* 286 * Make an RPC task runnable. 287 * 288 * Note: If the task is ASYNC, this must be called with 289 * the spinlock held to protect the wait queue operation. 290 */ 291 static void rpc_make_runnable(struct rpc_task *task) 292 { 293 rpc_clear_queued(task); 294 if (rpc_test_and_set_running(task)) 295 return; 296 /* We might have raced */ 297 if (RPC_IS_QUEUED(task)) { 298 rpc_clear_running(task); 299 return; 300 } 301 if (RPC_IS_ASYNC(task)) { 302 int status; 303 304 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 305 status = queue_work(rpciod_workqueue, &task->u.tk_work); 306 if (status < 0) { 307 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); 308 task->tk_status = status; 309 return; 310 } 311 } else 312 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 313 } 314 315 /* 316 * Prepare for sleeping on a wait queue. 317 * By always appending tasks to the list we ensure FIFO behavior. 318 * NB: An RPC task will only receive interrupt-driven events as long 319 * as it's on a wait queue. 320 */ 321 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 322 rpc_action action) 323 { 324 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 325 task->tk_pid, rpc_qname(q), jiffies); 326 327 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { 328 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); 329 return; 330 } 331 332 __rpc_add_wait_queue(q, task); 333 334 BUG_ON(task->tk_callback != NULL); 335 task->tk_callback = action; 336 __rpc_add_timer(q, task); 337 } 338 339 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 340 rpc_action action) 341 { 342 /* Mark the task as being activated if so needed */ 343 rpc_set_active(task); 344 345 /* 346 * Protect the queue operations. 347 */ 348 spin_lock_bh(&q->lock); 349 __rpc_sleep_on(q, task, action); 350 spin_unlock_bh(&q->lock); 351 } 352 EXPORT_SYMBOL_GPL(rpc_sleep_on); 353 354 /** 355 * __rpc_do_wake_up_task - wake up a single rpc_task 356 * @queue: wait queue 357 * @task: task to be woken up 358 * 359 * Caller must hold queue->lock, and have cleared the task queued flag. 360 */ 361 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) 362 { 363 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", 364 task->tk_pid, jiffies); 365 366 #ifdef RPC_DEBUG 367 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); 368 #endif 369 /* Has the task been executed yet? If not, we cannot wake it up! */ 370 if (!RPC_IS_ACTIVATED(task)) { 371 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 372 return; 373 } 374 375 __rpc_remove_wait_queue(queue, task); 376 377 rpc_make_runnable(task); 378 379 dprintk("RPC: __rpc_wake_up_task done\n"); 380 } 381 382 /* 383 * Wake up a queued task while the queue lock is being held 384 */ 385 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 386 { 387 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) 388 __rpc_do_wake_up_task(queue, task); 389 } 390 391 /* 392 * Wake up a task on a specific queue 393 */ 394 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 395 { 396 spin_lock_bh(&queue->lock); 397 rpc_wake_up_task_queue_locked(queue, task); 398 spin_unlock_bh(&queue->lock); 399 } 400 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 401 402 /* 403 * Wake up the specified task 404 */ 405 static void rpc_wake_up_task(struct rpc_task *task) 406 { 407 rpc_wake_up_queued_task(task->tk_waitqueue, task); 408 } 409 410 /* 411 * Wake up the next task on a priority queue. 412 */ 413 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) 414 { 415 struct list_head *q; 416 struct rpc_task *task; 417 418 /* 419 * Service a batch of tasks from a single owner. 420 */ 421 q = &queue->tasks[queue->priority]; 422 if (!list_empty(q)) { 423 task = list_entry(q->next, struct rpc_task, u.tk_wait.list); 424 if (queue->owner == task->tk_owner) { 425 if (--queue->nr) 426 goto out; 427 list_move_tail(&task->u.tk_wait.list, q); 428 } 429 /* 430 * Check if we need to switch queues. 431 */ 432 if (--queue->count) 433 goto new_owner; 434 } 435 436 /* 437 * Service the next queue. 438 */ 439 do { 440 if (q == &queue->tasks[0]) 441 q = &queue->tasks[queue->maxpriority]; 442 else 443 q = q - 1; 444 if (!list_empty(q)) { 445 task = list_entry(q->next, struct rpc_task, u.tk_wait.list); 446 goto new_queue; 447 } 448 } while (q != &queue->tasks[queue->priority]); 449 450 rpc_reset_waitqueue_priority(queue); 451 return NULL; 452 453 new_queue: 454 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 455 new_owner: 456 rpc_set_waitqueue_owner(queue, task->tk_owner); 457 out: 458 rpc_wake_up_task_queue_locked(queue, task); 459 return task; 460 } 461 462 /* 463 * Wake up the next task on the wait queue. 464 */ 465 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) 466 { 467 struct rpc_task *task = NULL; 468 469 dprintk("RPC: wake_up_next(%p \"%s\")\n", 470 queue, rpc_qname(queue)); 471 spin_lock_bh(&queue->lock); 472 if (RPC_IS_PRIORITY(queue)) 473 task = __rpc_wake_up_next_priority(queue); 474 else { 475 task_for_first(task, &queue->tasks[0]) 476 rpc_wake_up_task_queue_locked(queue, task); 477 } 478 spin_unlock_bh(&queue->lock); 479 480 return task; 481 } 482 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 483 484 /** 485 * rpc_wake_up - wake up all rpc_tasks 486 * @queue: rpc_wait_queue on which the tasks are sleeping 487 * 488 * Grabs queue->lock 489 */ 490 void rpc_wake_up(struct rpc_wait_queue *queue) 491 { 492 struct rpc_task *task, *next; 493 struct list_head *head; 494 495 spin_lock_bh(&queue->lock); 496 head = &queue->tasks[queue->maxpriority]; 497 for (;;) { 498 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 499 rpc_wake_up_task_queue_locked(queue, task); 500 if (head == &queue->tasks[0]) 501 break; 502 head--; 503 } 504 spin_unlock_bh(&queue->lock); 505 } 506 EXPORT_SYMBOL_GPL(rpc_wake_up); 507 508 /** 509 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 510 * @queue: rpc_wait_queue on which the tasks are sleeping 511 * @status: status value to set 512 * 513 * Grabs queue->lock 514 */ 515 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 516 { 517 struct rpc_task *task, *next; 518 struct list_head *head; 519 520 spin_lock_bh(&queue->lock); 521 head = &queue->tasks[queue->maxpriority]; 522 for (;;) { 523 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 524 task->tk_status = status; 525 rpc_wake_up_task_queue_locked(queue, task); 526 } 527 if (head == &queue->tasks[0]) 528 break; 529 head--; 530 } 531 spin_unlock_bh(&queue->lock); 532 } 533 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 534 535 static void __rpc_queue_timer_fn(unsigned long ptr) 536 { 537 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; 538 struct rpc_task *task, *n; 539 unsigned long expires, now, timeo; 540 541 spin_lock(&queue->lock); 542 expires = now = jiffies; 543 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 544 timeo = task->u.tk_wait.expires; 545 if (time_after_eq(now, timeo)) { 546 dprintk("RPC: %5u timeout\n", task->tk_pid); 547 task->tk_status = -ETIMEDOUT; 548 rpc_wake_up_task_queue_locked(queue, task); 549 continue; 550 } 551 if (expires == now || time_after(expires, timeo)) 552 expires = timeo; 553 } 554 if (!list_empty(&queue->timer_list.list)) 555 rpc_set_queue_timer(queue, expires); 556 spin_unlock(&queue->lock); 557 } 558 559 static void __rpc_atrun(struct rpc_task *task) 560 { 561 task->tk_status = 0; 562 } 563 564 /* 565 * Run a task at a later time 566 */ 567 void rpc_delay(struct rpc_task *task, unsigned long delay) 568 { 569 task->tk_timeout = delay; 570 rpc_sleep_on(&delay_queue, task, __rpc_atrun); 571 } 572 EXPORT_SYMBOL_GPL(rpc_delay); 573 574 /* 575 * Helper to call task->tk_ops->rpc_call_prepare 576 */ 577 static void rpc_prepare_task(struct rpc_task *task) 578 { 579 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 580 } 581 582 /* 583 * Helper that calls task->tk_ops->rpc_call_done if it exists 584 */ 585 void rpc_exit_task(struct rpc_task *task) 586 { 587 task->tk_action = NULL; 588 if (task->tk_ops->rpc_call_done != NULL) { 589 task->tk_ops->rpc_call_done(task, task->tk_calldata); 590 if (task->tk_action != NULL) { 591 WARN_ON(RPC_ASSASSINATED(task)); 592 /* Always release the RPC slot and buffer memory */ 593 xprt_release(task); 594 } 595 } 596 } 597 EXPORT_SYMBOL_GPL(rpc_exit_task); 598 599 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 600 { 601 if (ops->rpc_release != NULL) 602 ops->rpc_release(calldata); 603 } 604 605 /* 606 * This is the RPC `scheduler' (or rather, the finite state machine). 607 */ 608 static void __rpc_execute(struct rpc_task *task) 609 { 610 int status = 0; 611 612 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 613 task->tk_pid, task->tk_flags); 614 615 BUG_ON(RPC_IS_QUEUED(task)); 616 617 for (;;) { 618 619 /* 620 * Execute any pending callback. 621 */ 622 if (task->tk_callback) { 623 void (*save_callback)(struct rpc_task *); 624 625 /* 626 * We set tk_callback to NULL before calling it, 627 * in case it sets the tk_callback field itself: 628 */ 629 save_callback = task->tk_callback; 630 task->tk_callback = NULL; 631 save_callback(task); 632 } 633 634 /* 635 * Perform the next FSM step. 636 * tk_action may be NULL when the task has been killed 637 * by someone else. 638 */ 639 if (!RPC_IS_QUEUED(task)) { 640 if (task->tk_action == NULL) 641 break; 642 task->tk_action(task); 643 } 644 645 /* 646 * Lockless check for whether task is sleeping or not. 647 */ 648 if (!RPC_IS_QUEUED(task)) 649 continue; 650 rpc_clear_running(task); 651 if (RPC_IS_ASYNC(task)) { 652 /* Careful! we may have raced... */ 653 if (RPC_IS_QUEUED(task)) 654 return; 655 if (rpc_test_and_set_running(task)) 656 return; 657 continue; 658 } 659 660 /* sync task: sleep here */ 661 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); 662 status = out_of_line_wait_on_bit(&task->tk_runstate, 663 RPC_TASK_QUEUED, rpc_wait_bit_killable, 664 TASK_KILLABLE); 665 if (status == -ERESTARTSYS) { 666 /* 667 * When a sync task receives a signal, it exits with 668 * -ERESTARTSYS. In order to catch any callbacks that 669 * clean up after sleeping on some queue, we don't 670 * break the loop here, but go around once more. 671 */ 672 dprintk("RPC: %5u got signal\n", task->tk_pid); 673 task->tk_flags |= RPC_TASK_KILLED; 674 rpc_exit(task, -ERESTARTSYS); 675 rpc_wake_up_task(task); 676 } 677 rpc_set_running(task); 678 dprintk("RPC: %5u sync task resuming\n", task->tk_pid); 679 } 680 681 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, 682 task->tk_status); 683 /* Release all resources associated with the task */ 684 rpc_release_task(task); 685 } 686 687 /* 688 * User-visible entry point to the scheduler. 689 * 690 * This may be called recursively if e.g. an async NFS task updates 691 * the attributes and finds that dirty pages must be flushed. 692 * NOTE: Upon exit of this function the task is guaranteed to be 693 * released. In particular note that tk_release() will have 694 * been called, so your task memory may have been freed. 695 */ 696 void rpc_execute(struct rpc_task *task) 697 { 698 rpc_set_active(task); 699 rpc_set_running(task); 700 __rpc_execute(task); 701 } 702 703 static void rpc_async_schedule(struct work_struct *work) 704 { 705 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 706 } 707 708 struct rpc_buffer { 709 size_t len; 710 char data[]; 711 }; 712 713 /** 714 * rpc_malloc - allocate an RPC buffer 715 * @task: RPC task that will use this buffer 716 * @size: requested byte size 717 * 718 * To prevent rpciod from hanging, this allocator never sleeps, 719 * returning NULL if the request cannot be serviced immediately. 720 * The caller can arrange to sleep in a way that is safe for rpciod. 721 * 722 * Most requests are 'small' (under 2KiB) and can be serviced from a 723 * mempool, ensuring that NFS reads and writes can always proceed, 724 * and that there is good locality of reference for these buffers. 725 * 726 * In order to avoid memory starvation triggering more writebacks of 727 * NFS requests, we avoid using GFP_KERNEL. 728 */ 729 void *rpc_malloc(struct rpc_task *task, size_t size) 730 { 731 struct rpc_buffer *buf; 732 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; 733 734 size += sizeof(struct rpc_buffer); 735 if (size <= RPC_BUFFER_MAXSIZE) 736 buf = mempool_alloc(rpc_buffer_mempool, gfp); 737 else 738 buf = kmalloc(size, gfp); 739 740 if (!buf) 741 return NULL; 742 743 buf->len = size; 744 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", 745 task->tk_pid, size, buf); 746 return &buf->data; 747 } 748 EXPORT_SYMBOL_GPL(rpc_malloc); 749 750 /** 751 * rpc_free - free buffer allocated via rpc_malloc 752 * @buffer: buffer to free 753 * 754 */ 755 void rpc_free(void *buffer) 756 { 757 size_t size; 758 struct rpc_buffer *buf; 759 760 if (!buffer) 761 return; 762 763 buf = container_of(buffer, struct rpc_buffer, data); 764 size = buf->len; 765 766 dprintk("RPC: freeing buffer of size %zu at %p\n", 767 size, buf); 768 769 if (size <= RPC_BUFFER_MAXSIZE) 770 mempool_free(buf, rpc_buffer_mempool); 771 else 772 kfree(buf); 773 } 774 EXPORT_SYMBOL_GPL(rpc_free); 775 776 /* 777 * Creation and deletion of RPC task structures 778 */ 779 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 780 { 781 memset(task, 0, sizeof(*task)); 782 atomic_set(&task->tk_count, 1); 783 task->tk_flags = task_setup_data->flags; 784 task->tk_ops = task_setup_data->callback_ops; 785 task->tk_calldata = task_setup_data->callback_data; 786 INIT_LIST_HEAD(&task->tk_task); 787 788 /* Initialize retry counters */ 789 task->tk_garb_retry = 2; 790 task->tk_cred_retry = 2; 791 792 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 793 task->tk_owner = current->tgid; 794 795 /* Initialize workqueue for async tasks */ 796 task->tk_workqueue = task_setup_data->workqueue; 797 798 task->tk_client = task_setup_data->rpc_client; 799 if (task->tk_client != NULL) { 800 kref_get(&task->tk_client->cl_kref); 801 if (task->tk_client->cl_softrtry) 802 task->tk_flags |= RPC_TASK_SOFT; 803 } 804 805 if (task->tk_ops->rpc_call_prepare != NULL) 806 task->tk_action = rpc_prepare_task; 807 808 if (task_setup_data->rpc_message != NULL) { 809 task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc; 810 task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp; 811 task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp; 812 /* Bind the user cred */ 813 rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags); 814 if (task->tk_action == NULL) 815 rpc_call_start(task); 816 } 817 818 /* starting timestamp */ 819 task->tk_start = jiffies; 820 821 dprintk("RPC: new task initialized, procpid %u\n", 822 task_pid_nr(current)); 823 } 824 825 static struct rpc_task * 826 rpc_alloc_task(void) 827 { 828 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 829 } 830 831 /* 832 * Create a new task for the specified client. 833 */ 834 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 835 { 836 struct rpc_task *task = setup_data->task; 837 unsigned short flags = 0; 838 839 if (task == NULL) { 840 task = rpc_alloc_task(); 841 if (task == NULL) 842 goto out; 843 flags = RPC_TASK_DYNAMIC; 844 } 845 846 rpc_init_task(task, setup_data); 847 848 task->tk_flags |= flags; 849 dprintk("RPC: allocated task %p\n", task); 850 out: 851 return task; 852 } 853 854 static void rpc_free_task(struct rpc_task *task) 855 { 856 const struct rpc_call_ops *tk_ops = task->tk_ops; 857 void *calldata = task->tk_calldata; 858 859 if (task->tk_flags & RPC_TASK_DYNAMIC) { 860 dprintk("RPC: %5u freeing task\n", task->tk_pid); 861 mempool_free(task, rpc_task_mempool); 862 } 863 rpc_release_calldata(tk_ops, calldata); 864 } 865 866 static void rpc_async_release(struct work_struct *work) 867 { 868 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 869 } 870 871 void rpc_put_task(struct rpc_task *task) 872 { 873 if (!atomic_dec_and_test(&task->tk_count)) 874 return; 875 /* Release resources */ 876 if (task->tk_rqstp) 877 xprt_release(task); 878 if (task->tk_msg.rpc_cred) 879 rpcauth_unbindcred(task); 880 if (task->tk_client) { 881 rpc_release_client(task->tk_client); 882 task->tk_client = NULL; 883 } 884 if (task->tk_workqueue != NULL) { 885 INIT_WORK(&task->u.tk_work, rpc_async_release); 886 queue_work(task->tk_workqueue, &task->u.tk_work); 887 } else 888 rpc_free_task(task); 889 } 890 EXPORT_SYMBOL_GPL(rpc_put_task); 891 892 static void rpc_release_task(struct rpc_task *task) 893 { 894 #ifdef RPC_DEBUG 895 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); 896 #endif 897 dprintk("RPC: %5u release task\n", task->tk_pid); 898 899 if (!list_empty(&task->tk_task)) { 900 struct rpc_clnt *clnt = task->tk_client; 901 /* Remove from client task list */ 902 spin_lock(&clnt->cl_lock); 903 list_del(&task->tk_task); 904 spin_unlock(&clnt->cl_lock); 905 } 906 BUG_ON (RPC_IS_QUEUED(task)); 907 908 #ifdef RPC_DEBUG 909 task->tk_magic = 0; 910 #endif 911 /* Wake up anyone who is waiting for task completion */ 912 rpc_mark_complete_task(task); 913 914 rpc_put_task(task); 915 } 916 917 /* 918 * Kill all tasks for the given client. 919 * XXX: kill their descendants as well? 920 */ 921 void rpc_killall_tasks(struct rpc_clnt *clnt) 922 { 923 struct rpc_task *rovr; 924 925 926 if (list_empty(&clnt->cl_tasks)) 927 return; 928 dprintk("RPC: killing all tasks for client %p\n", clnt); 929 /* 930 * Spin lock all_tasks to prevent changes... 931 */ 932 spin_lock(&clnt->cl_lock); 933 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { 934 if (! RPC_IS_ACTIVATED(rovr)) 935 continue; 936 if (!(rovr->tk_flags & RPC_TASK_KILLED)) { 937 rovr->tk_flags |= RPC_TASK_KILLED; 938 rpc_exit(rovr, -EIO); 939 rpc_wake_up_task(rovr); 940 } 941 } 942 spin_unlock(&clnt->cl_lock); 943 } 944 EXPORT_SYMBOL_GPL(rpc_killall_tasks); 945 946 int rpciod_up(void) 947 { 948 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 949 } 950 951 void rpciod_down(void) 952 { 953 module_put(THIS_MODULE); 954 } 955 956 /* 957 * Start up the rpciod workqueue. 958 */ 959 static int rpciod_start(void) 960 { 961 struct workqueue_struct *wq; 962 963 /* 964 * Create the rpciod thread and wait for it to start. 965 */ 966 dprintk("RPC: creating workqueue rpciod\n"); 967 wq = create_workqueue("rpciod"); 968 rpciod_workqueue = wq; 969 return rpciod_workqueue != NULL; 970 } 971 972 static void rpciod_stop(void) 973 { 974 struct workqueue_struct *wq = NULL; 975 976 if (rpciod_workqueue == NULL) 977 return; 978 dprintk("RPC: destroying workqueue rpciod\n"); 979 980 wq = rpciod_workqueue; 981 rpciod_workqueue = NULL; 982 destroy_workqueue(wq); 983 } 984 985 void 986 rpc_destroy_mempool(void) 987 { 988 rpciod_stop(); 989 if (rpc_buffer_mempool) 990 mempool_destroy(rpc_buffer_mempool); 991 if (rpc_task_mempool) 992 mempool_destroy(rpc_task_mempool); 993 if (rpc_task_slabp) 994 kmem_cache_destroy(rpc_task_slabp); 995 if (rpc_buffer_slabp) 996 kmem_cache_destroy(rpc_buffer_slabp); 997 rpc_destroy_wait_queue(&delay_queue); 998 } 999 1000 int 1001 rpc_init_mempool(void) 1002 { 1003 /* 1004 * The following is not strictly a mempool initialisation, 1005 * but there is no harm in doing it here 1006 */ 1007 rpc_init_wait_queue(&delay_queue, "delayq"); 1008 if (!rpciod_start()) 1009 goto err_nomem; 1010 1011 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1012 sizeof(struct rpc_task), 1013 0, SLAB_HWCACHE_ALIGN, 1014 NULL); 1015 if (!rpc_task_slabp) 1016 goto err_nomem; 1017 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1018 RPC_BUFFER_MAXSIZE, 1019 0, SLAB_HWCACHE_ALIGN, 1020 NULL); 1021 if (!rpc_buffer_slabp) 1022 goto err_nomem; 1023 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 1024 rpc_task_slabp); 1025 if (!rpc_task_mempool) 1026 goto err_nomem; 1027 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 1028 rpc_buffer_slabp); 1029 if (!rpc_buffer_mempool) 1030 goto err_nomem; 1031 return 0; 1032 err_nomem: 1033 rpc_destroy_mempool(); 1034 return -ENOMEM; 1035 } 1036