1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/xprt.c 4 * 5 * This is a generic RPC call interface supporting congestion avoidance, 6 * and asynchronous calls. 7 * 8 * The interface works like this: 9 * 10 * - When a process places a call, it allocates a request slot if 11 * one is available. Otherwise, it sleeps on the backlog queue 12 * (xprt_reserve). 13 * - Next, the caller puts together the RPC message, stuffs it into 14 * the request struct, and calls xprt_transmit(). 15 * - xprt_transmit sends the message and installs the caller on the 16 * transport's wait list. At the same time, if a reply is expected, 17 * it installs a timer that is run after the packet's timeout has 18 * expired. 19 * - When a packet arrives, the data_ready handler walks the list of 20 * pending requests for that transport. If a matching XID is found, the 21 * caller is woken up, and the timer removed. 22 * - When no reply arrives within the timeout interval, the timer is 23 * fired by the kernel and runs xprt_timer(). It either adjusts the 24 * timeout values (minor timeout) or wakes up the caller with a status 25 * of -ETIMEDOUT. 26 * - When the caller receives a notification from RPC that a reply arrived, 27 * it should release the RPC slot, and process the reply. 28 * If the call timed out, it may choose to retry the operation by 29 * adjusting the initial timeout value, and simply calling rpc_call 30 * again. 31 * 32 * Support for async RPC is done through a set of RPC-specific scheduling 33 * primitives that `transparently' work for processes as well as async 34 * tasks that rely on callbacks. 35 * 36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 37 * 38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 39 */ 40 41 #include <linux/module.h> 42 43 #include <linux/types.h> 44 #include <linux/interrupt.h> 45 #include <linux/workqueue.h> 46 #include <linux/net.h> 47 #include <linux/ktime.h> 48 49 #include <linux/sunrpc/clnt.h> 50 #include <linux/sunrpc/metrics.h> 51 #include <linux/sunrpc/bc_xprt.h> 52 #include <linux/rcupdate.h> 53 #include <linux/sched/mm.h> 54 55 #include <trace/events/sunrpc.h> 56 57 #include "sunrpc.h" 58 59 /* 60 * Local variables 61 */ 62 63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 64 # define RPCDBG_FACILITY RPCDBG_XPRT 65 #endif 66 67 /* 68 * Local functions 69 */ 70 static void xprt_init(struct rpc_xprt *xprt, struct net *net); 71 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); 72 static void xprt_destroy(struct rpc_xprt *xprt); 73 static void xprt_request_init(struct rpc_task *task); 74 75 static DEFINE_SPINLOCK(xprt_list_lock); 76 static LIST_HEAD(xprt_list); 77 78 static unsigned long xprt_request_timeout(const struct rpc_rqst *req) 79 { 80 unsigned long timeout = jiffies + req->rq_timeout; 81 82 if (time_before(timeout, req->rq_majortimeo)) 83 return timeout; 84 return req->rq_majortimeo; 85 } 86 87 /** 88 * xprt_register_transport - register a transport implementation 89 * @transport: transport to register 90 * 91 * If a transport implementation is loaded as a kernel module, it can 92 * call this interface to make itself known to the RPC client. 93 * 94 * Returns: 95 * 0: transport successfully registered 96 * -EEXIST: transport already registered 97 * -EINVAL: transport module being unloaded 98 */ 99 int xprt_register_transport(struct xprt_class *transport) 100 { 101 struct xprt_class *t; 102 int result; 103 104 result = -EEXIST; 105 spin_lock(&xprt_list_lock); 106 list_for_each_entry(t, &xprt_list, list) { 107 /* don't register the same transport class twice */ 108 if (t->ident == transport->ident) 109 goto out; 110 } 111 112 list_add_tail(&transport->list, &xprt_list); 113 printk(KERN_INFO "RPC: Registered %s transport module.\n", 114 transport->name); 115 result = 0; 116 117 out: 118 spin_unlock(&xprt_list_lock); 119 return result; 120 } 121 EXPORT_SYMBOL_GPL(xprt_register_transport); 122 123 /** 124 * xprt_unregister_transport - unregister a transport implementation 125 * @transport: transport to unregister 126 * 127 * Returns: 128 * 0: transport successfully unregistered 129 * -ENOENT: transport never registered 130 */ 131 int xprt_unregister_transport(struct xprt_class *transport) 132 { 133 struct xprt_class *t; 134 int result; 135 136 result = 0; 137 spin_lock(&xprt_list_lock); 138 list_for_each_entry(t, &xprt_list, list) { 139 if (t == transport) { 140 printk(KERN_INFO 141 "RPC: Unregistered %s transport module.\n", 142 transport->name); 143 list_del_init(&transport->list); 144 goto out; 145 } 146 } 147 result = -ENOENT; 148 149 out: 150 spin_unlock(&xprt_list_lock); 151 return result; 152 } 153 EXPORT_SYMBOL_GPL(xprt_unregister_transport); 154 155 static void 156 xprt_class_release(const struct xprt_class *t) 157 { 158 module_put(t->owner); 159 } 160 161 static const struct xprt_class * 162 xprt_class_find_by_netid_locked(const char *netid) 163 { 164 const struct xprt_class *t; 165 unsigned int i; 166 167 list_for_each_entry(t, &xprt_list, list) { 168 for (i = 0; t->netid[i][0] != '\0'; i++) { 169 if (strcmp(t->netid[i], netid) != 0) 170 continue; 171 if (!try_module_get(t->owner)) 172 continue; 173 return t; 174 } 175 } 176 return NULL; 177 } 178 179 static const struct xprt_class * 180 xprt_class_find_by_netid(const char *netid) 181 { 182 const struct xprt_class *t; 183 184 spin_lock(&xprt_list_lock); 185 t = xprt_class_find_by_netid_locked(netid); 186 if (!t) { 187 spin_unlock(&xprt_list_lock); 188 request_module("rpc%s", netid); 189 spin_lock(&xprt_list_lock); 190 t = xprt_class_find_by_netid_locked(netid); 191 } 192 spin_unlock(&xprt_list_lock); 193 return t; 194 } 195 196 /** 197 * xprt_load_transport - load a transport implementation 198 * @netid: transport to load 199 * 200 * Returns: 201 * 0: transport successfully loaded 202 * -ENOENT: transport module not available 203 */ 204 int xprt_load_transport(const char *netid) 205 { 206 const struct xprt_class *t; 207 208 t = xprt_class_find_by_netid(netid); 209 if (!t) 210 return -ENOENT; 211 xprt_class_release(t); 212 return 0; 213 } 214 EXPORT_SYMBOL_GPL(xprt_load_transport); 215 216 static void xprt_clear_locked(struct rpc_xprt *xprt) 217 { 218 xprt->snd_task = NULL; 219 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 220 smp_mb__before_atomic(); 221 clear_bit(XPRT_LOCKED, &xprt->state); 222 smp_mb__after_atomic(); 223 } else 224 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 225 } 226 227 /** 228 * xprt_reserve_xprt - serialize write access to transports 229 * @task: task that is requesting access to the transport 230 * @xprt: pointer to the target transport 231 * 232 * This prevents mixing the payload of separate requests, and prevents 233 * transport connects from colliding with writes. No congestion control 234 * is provided. 235 */ 236 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 237 { 238 struct rpc_rqst *req = task->tk_rqstp; 239 240 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 241 if (task == xprt->snd_task) 242 goto out_locked; 243 goto out_sleep; 244 } 245 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 246 goto out_unlock; 247 xprt->snd_task = task; 248 249 out_locked: 250 trace_xprt_reserve_xprt(xprt, task); 251 return 1; 252 253 out_unlock: 254 xprt_clear_locked(xprt); 255 out_sleep: 256 task->tk_status = -EAGAIN; 257 if (RPC_IS_SOFT(task)) 258 rpc_sleep_on_timeout(&xprt->sending, task, NULL, 259 xprt_request_timeout(req)); 260 else 261 rpc_sleep_on(&xprt->sending, task, NULL); 262 return 0; 263 } 264 EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 265 266 static bool 267 xprt_need_congestion_window_wait(struct rpc_xprt *xprt) 268 { 269 return test_bit(XPRT_CWND_WAIT, &xprt->state); 270 } 271 272 static void 273 xprt_set_congestion_window_wait(struct rpc_xprt *xprt) 274 { 275 if (!list_empty(&xprt->xmit_queue)) { 276 /* Peek at head of queue to see if it can make progress */ 277 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst, 278 rq_xmit)->rq_cong) 279 return; 280 } 281 set_bit(XPRT_CWND_WAIT, &xprt->state); 282 } 283 284 static void 285 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) 286 { 287 if (!RPCXPRT_CONGESTED(xprt)) 288 clear_bit(XPRT_CWND_WAIT, &xprt->state); 289 } 290 291 /* 292 * xprt_reserve_xprt_cong - serialize write access to transports 293 * @task: task that is requesting access to the transport 294 * 295 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 296 * integrated into the decision of whether a request is allowed to be 297 * woken up and given access to the transport. 298 * Note that the lock is only granted if we know there are free slots. 299 */ 300 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 301 { 302 struct rpc_rqst *req = task->tk_rqstp; 303 304 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 305 if (task == xprt->snd_task) 306 goto out_locked; 307 goto out_sleep; 308 } 309 if (req == NULL) { 310 xprt->snd_task = task; 311 goto out_locked; 312 } 313 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 314 goto out_unlock; 315 if (!xprt_need_congestion_window_wait(xprt)) { 316 xprt->snd_task = task; 317 goto out_locked; 318 } 319 out_unlock: 320 xprt_clear_locked(xprt); 321 out_sleep: 322 task->tk_status = -EAGAIN; 323 if (RPC_IS_SOFT(task)) 324 rpc_sleep_on_timeout(&xprt->sending, task, NULL, 325 xprt_request_timeout(req)); 326 else 327 rpc_sleep_on(&xprt->sending, task, NULL); 328 return 0; 329 out_locked: 330 trace_xprt_reserve_cong(xprt, task); 331 return 1; 332 } 333 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 334 335 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 336 { 337 int retval; 338 339 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) 340 return 1; 341 spin_lock(&xprt->transport_lock); 342 retval = xprt->ops->reserve_xprt(xprt, task); 343 spin_unlock(&xprt->transport_lock); 344 return retval; 345 } 346 347 static bool __xprt_lock_write_func(struct rpc_task *task, void *data) 348 { 349 struct rpc_xprt *xprt = data; 350 351 xprt->snd_task = task; 352 return true; 353 } 354 355 static void __xprt_lock_write_next(struct rpc_xprt *xprt) 356 { 357 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 358 return; 359 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 360 goto out_unlock; 361 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 362 __xprt_lock_write_func, xprt)) 363 return; 364 out_unlock: 365 xprt_clear_locked(xprt); 366 } 367 368 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 369 { 370 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 371 return; 372 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 373 goto out_unlock; 374 if (xprt_need_congestion_window_wait(xprt)) 375 goto out_unlock; 376 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 377 __xprt_lock_write_func, xprt)) 378 return; 379 out_unlock: 380 xprt_clear_locked(xprt); 381 } 382 383 /** 384 * xprt_release_xprt - allow other requests to use a transport 385 * @xprt: transport with other tasks potentially waiting 386 * @task: task that is releasing access to the transport 387 * 388 * Note that "task" can be NULL. No congestion control is provided. 389 */ 390 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 391 { 392 if (xprt->snd_task == task) { 393 xprt_clear_locked(xprt); 394 __xprt_lock_write_next(xprt); 395 } 396 trace_xprt_release_xprt(xprt, task); 397 } 398 EXPORT_SYMBOL_GPL(xprt_release_xprt); 399 400 /** 401 * xprt_release_xprt_cong - allow other requests to use a transport 402 * @xprt: transport with other tasks potentially waiting 403 * @task: task that is releasing access to the transport 404 * 405 * Note that "task" can be NULL. Another task is awoken to use the 406 * transport if the transport's congestion window allows it. 407 */ 408 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 409 { 410 if (xprt->snd_task == task) { 411 xprt_clear_locked(xprt); 412 __xprt_lock_write_next_cong(xprt); 413 } 414 trace_xprt_release_cong(xprt, task); 415 } 416 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 417 418 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 419 { 420 if (xprt->snd_task != task) 421 return; 422 spin_lock(&xprt->transport_lock); 423 xprt->ops->release_xprt(xprt, task); 424 spin_unlock(&xprt->transport_lock); 425 } 426 427 /* 428 * Van Jacobson congestion avoidance. Check if the congestion window 429 * overflowed. Put the task to sleep if this is the case. 430 */ 431 static int 432 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 433 { 434 if (req->rq_cong) 435 return 1; 436 trace_xprt_get_cong(xprt, req->rq_task); 437 if (RPCXPRT_CONGESTED(xprt)) { 438 xprt_set_congestion_window_wait(xprt); 439 return 0; 440 } 441 req->rq_cong = 1; 442 xprt->cong += RPC_CWNDSCALE; 443 return 1; 444 } 445 446 /* 447 * Adjust the congestion window, and wake up the next task 448 * that has been sleeping due to congestion 449 */ 450 static void 451 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 452 { 453 if (!req->rq_cong) 454 return; 455 req->rq_cong = 0; 456 xprt->cong -= RPC_CWNDSCALE; 457 xprt_test_and_clear_congestion_window_wait(xprt); 458 trace_xprt_put_cong(xprt, req->rq_task); 459 __xprt_lock_write_next_cong(xprt); 460 } 461 462 /** 463 * xprt_request_get_cong - Request congestion control credits 464 * @xprt: pointer to transport 465 * @req: pointer to RPC request 466 * 467 * Useful for transports that require congestion control. 468 */ 469 bool 470 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 471 { 472 bool ret = false; 473 474 if (req->rq_cong) 475 return true; 476 spin_lock(&xprt->transport_lock); 477 ret = __xprt_get_cong(xprt, req) != 0; 478 spin_unlock(&xprt->transport_lock); 479 return ret; 480 } 481 EXPORT_SYMBOL_GPL(xprt_request_get_cong); 482 483 /** 484 * xprt_release_rqst_cong - housekeeping when request is complete 485 * @task: RPC request that recently completed 486 * 487 * Useful for transports that require congestion control. 488 */ 489 void xprt_release_rqst_cong(struct rpc_task *task) 490 { 491 struct rpc_rqst *req = task->tk_rqstp; 492 493 __xprt_put_cong(req->rq_xprt, req); 494 } 495 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 496 497 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt) 498 { 499 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) 500 __xprt_lock_write_next_cong(xprt); 501 } 502 503 /* 504 * Clear the congestion window wait flag and wake up the next 505 * entry on xprt->sending 506 */ 507 static void 508 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) 509 { 510 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { 511 spin_lock(&xprt->transport_lock); 512 __xprt_lock_write_next_cong(xprt); 513 spin_unlock(&xprt->transport_lock); 514 } 515 } 516 517 /** 518 * xprt_adjust_cwnd - adjust transport congestion window 519 * @xprt: pointer to xprt 520 * @task: recently completed RPC request used to adjust window 521 * @result: result code of completed RPC request 522 * 523 * The transport code maintains an estimate on the maximum number of out- 524 * standing RPC requests, using a smoothed version of the congestion 525 * avoidance implemented in 44BSD. This is basically the Van Jacobson 526 * congestion algorithm: If a retransmit occurs, the congestion window is 527 * halved; otherwise, it is incremented by 1/cwnd when 528 * 529 * - a reply is received and 530 * - a full number of requests are outstanding and 531 * - the congestion window hasn't been updated recently. 532 */ 533 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) 534 { 535 struct rpc_rqst *req = task->tk_rqstp; 536 unsigned long cwnd = xprt->cwnd; 537 538 if (result >= 0 && cwnd <= xprt->cong) { 539 /* The (cwnd >> 1) term makes sure 540 * the result gets rounded properly. */ 541 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 542 if (cwnd > RPC_MAXCWND(xprt)) 543 cwnd = RPC_MAXCWND(xprt); 544 __xprt_lock_write_next_cong(xprt); 545 } else if (result == -ETIMEDOUT) { 546 cwnd >>= 1; 547 if (cwnd < RPC_CWNDSCALE) 548 cwnd = RPC_CWNDSCALE; 549 } 550 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 551 xprt->cong, xprt->cwnd, cwnd); 552 xprt->cwnd = cwnd; 553 __xprt_put_cong(xprt, req); 554 } 555 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 556 557 /** 558 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 559 * @xprt: transport with waiting tasks 560 * @status: result code to plant in each task before waking it 561 * 562 */ 563 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 564 { 565 if (status < 0) 566 rpc_wake_up_status(&xprt->pending, status); 567 else 568 rpc_wake_up(&xprt->pending); 569 } 570 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 571 572 /** 573 * xprt_wait_for_buffer_space - wait for transport output buffer to clear 574 * @xprt: transport 575 * 576 * Note that we only set the timer for the case of RPC_IS_SOFT(), since 577 * we don't in general want to force a socket disconnection due to 578 * an incomplete RPC call transmission. 579 */ 580 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt) 581 { 582 set_bit(XPRT_WRITE_SPACE, &xprt->state); 583 } 584 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 585 586 static bool 587 xprt_clear_write_space_locked(struct rpc_xprt *xprt) 588 { 589 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) { 590 __xprt_lock_write_next(xprt); 591 dprintk("RPC: write space: waking waiting task on " 592 "xprt %p\n", xprt); 593 return true; 594 } 595 return false; 596 } 597 598 /** 599 * xprt_write_space - wake the task waiting for transport output buffer space 600 * @xprt: transport with waiting tasks 601 * 602 * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 603 */ 604 bool xprt_write_space(struct rpc_xprt *xprt) 605 { 606 bool ret; 607 608 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) 609 return false; 610 spin_lock(&xprt->transport_lock); 611 ret = xprt_clear_write_space_locked(xprt); 612 spin_unlock(&xprt->transport_lock); 613 return ret; 614 } 615 EXPORT_SYMBOL_GPL(xprt_write_space); 616 617 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime) 618 { 619 s64 delta = ktime_to_ns(ktime_get() - abstime); 620 return likely(delta >= 0) ? 621 jiffies - nsecs_to_jiffies(delta) : 622 jiffies + nsecs_to_jiffies(-delta); 623 } 624 625 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req) 626 { 627 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 628 unsigned long majortimeo = req->rq_timeout; 629 630 if (to->to_exponential) 631 majortimeo <<= to->to_retries; 632 else 633 majortimeo += to->to_increment * to->to_retries; 634 if (majortimeo > to->to_maxval || majortimeo == 0) 635 majortimeo = to->to_maxval; 636 return majortimeo; 637 } 638 639 static void xprt_reset_majortimeo(struct rpc_rqst *req) 640 { 641 req->rq_majortimeo += xprt_calc_majortimeo(req); 642 } 643 644 static void xprt_reset_minortimeo(struct rpc_rqst *req) 645 { 646 req->rq_minortimeo += req->rq_timeout; 647 } 648 649 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) 650 { 651 unsigned long time_init; 652 struct rpc_xprt *xprt = req->rq_xprt; 653 654 if (likely(xprt && xprt_connected(xprt))) 655 time_init = jiffies; 656 else 657 time_init = xprt_abs_ktime_to_jiffies(task->tk_start); 658 req->rq_timeout = task->tk_client->cl_timeout->to_initval; 659 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req); 660 req->rq_minortimeo = time_init + req->rq_timeout; 661 } 662 663 /** 664 * xprt_adjust_timeout - adjust timeout values for next retransmit 665 * @req: RPC request containing parameters to use for the adjustment 666 * 667 */ 668 int xprt_adjust_timeout(struct rpc_rqst *req) 669 { 670 struct rpc_xprt *xprt = req->rq_xprt; 671 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 672 int status = 0; 673 674 if (time_before(jiffies, req->rq_majortimeo)) { 675 if (time_before(jiffies, req->rq_minortimeo)) 676 return status; 677 if (to->to_exponential) 678 req->rq_timeout <<= 1; 679 else 680 req->rq_timeout += to->to_increment; 681 if (to->to_maxval && req->rq_timeout >= to->to_maxval) 682 req->rq_timeout = to->to_maxval; 683 req->rq_retries++; 684 } else { 685 req->rq_timeout = to->to_initval; 686 req->rq_retries = 0; 687 xprt_reset_majortimeo(req); 688 /* Reset the RTT counters == "slow start" */ 689 spin_lock(&xprt->transport_lock); 690 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 691 spin_unlock(&xprt->transport_lock); 692 status = -ETIMEDOUT; 693 } 694 xprt_reset_minortimeo(req); 695 696 if (req->rq_timeout == 0) { 697 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 698 req->rq_timeout = 5 * HZ; 699 } 700 return status; 701 } 702 703 static void xprt_autoclose(struct work_struct *work) 704 { 705 struct rpc_xprt *xprt = 706 container_of(work, struct rpc_xprt, task_cleanup); 707 unsigned int pflags = memalloc_nofs_save(); 708 709 trace_xprt_disconnect_auto(xprt); 710 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 711 xprt->ops->close(xprt); 712 xprt_release_write(xprt, NULL); 713 wake_up_bit(&xprt->state, XPRT_LOCKED); 714 memalloc_nofs_restore(pflags); 715 } 716 717 /** 718 * xprt_disconnect_done - mark a transport as disconnected 719 * @xprt: transport to flag for disconnect 720 * 721 */ 722 void xprt_disconnect_done(struct rpc_xprt *xprt) 723 { 724 trace_xprt_disconnect_done(xprt); 725 spin_lock(&xprt->transport_lock); 726 xprt_clear_connected(xprt); 727 xprt_clear_write_space_locked(xprt); 728 xprt_clear_congestion_window_wait_locked(xprt); 729 xprt_wake_pending_tasks(xprt, -ENOTCONN); 730 spin_unlock(&xprt->transport_lock); 731 } 732 EXPORT_SYMBOL_GPL(xprt_disconnect_done); 733 734 /** 735 * xprt_force_disconnect - force a transport to disconnect 736 * @xprt: transport to disconnect 737 * 738 */ 739 void xprt_force_disconnect(struct rpc_xprt *xprt) 740 { 741 trace_xprt_disconnect_force(xprt); 742 743 /* Don't race with the test_bit() in xprt_clear_locked() */ 744 spin_lock(&xprt->transport_lock); 745 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 746 /* Try to schedule an autoclose RPC call */ 747 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 748 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 749 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state)) 750 rpc_wake_up_queued_task_set_status(&xprt->pending, 751 xprt->snd_task, -ENOTCONN); 752 spin_unlock(&xprt->transport_lock); 753 } 754 EXPORT_SYMBOL_GPL(xprt_force_disconnect); 755 756 static unsigned int 757 xprt_connect_cookie(struct rpc_xprt *xprt) 758 { 759 return READ_ONCE(xprt->connect_cookie); 760 } 761 762 static bool 763 xprt_request_retransmit_after_disconnect(struct rpc_task *task) 764 { 765 struct rpc_rqst *req = task->tk_rqstp; 766 struct rpc_xprt *xprt = req->rq_xprt; 767 768 return req->rq_connect_cookie != xprt_connect_cookie(xprt) || 769 !xprt_connected(xprt); 770 } 771 772 /** 773 * xprt_conditional_disconnect - force a transport to disconnect 774 * @xprt: transport to disconnect 775 * @cookie: 'connection cookie' 776 * 777 * This attempts to break the connection if and only if 'cookie' matches 778 * the current transport 'connection cookie'. It ensures that we don't 779 * try to break the connection more than once when we need to retransmit 780 * a batch of RPC requests. 781 * 782 */ 783 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 784 { 785 /* Don't race with the test_bit() in xprt_clear_locked() */ 786 spin_lock(&xprt->transport_lock); 787 if (cookie != xprt->connect_cookie) 788 goto out; 789 if (test_bit(XPRT_CLOSING, &xprt->state)) 790 goto out; 791 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 792 /* Try to schedule an autoclose RPC call */ 793 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 794 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 795 xprt_wake_pending_tasks(xprt, -EAGAIN); 796 out: 797 spin_unlock(&xprt->transport_lock); 798 } 799 800 static bool 801 xprt_has_timer(const struct rpc_xprt *xprt) 802 { 803 return xprt->idle_timeout != 0; 804 } 805 806 static void 807 xprt_schedule_autodisconnect(struct rpc_xprt *xprt) 808 __must_hold(&xprt->transport_lock) 809 { 810 xprt->last_used = jiffies; 811 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt)) 812 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); 813 } 814 815 static void 816 xprt_init_autodisconnect(struct timer_list *t) 817 { 818 struct rpc_xprt *xprt = from_timer(xprt, t, timer); 819 820 if (!RB_EMPTY_ROOT(&xprt->recv_queue)) 821 return; 822 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ 823 xprt->last_used = jiffies; 824 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 825 return; 826 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 827 } 828 829 bool xprt_lock_connect(struct rpc_xprt *xprt, 830 struct rpc_task *task, 831 void *cookie) 832 { 833 bool ret = false; 834 835 spin_lock(&xprt->transport_lock); 836 if (!test_bit(XPRT_LOCKED, &xprt->state)) 837 goto out; 838 if (xprt->snd_task != task) 839 goto out; 840 set_bit(XPRT_SND_IS_COOKIE, &xprt->state); 841 xprt->snd_task = cookie; 842 ret = true; 843 out: 844 spin_unlock(&xprt->transport_lock); 845 return ret; 846 } 847 EXPORT_SYMBOL_GPL(xprt_lock_connect); 848 849 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) 850 { 851 spin_lock(&xprt->transport_lock); 852 if (xprt->snd_task != cookie) 853 goto out; 854 if (!test_bit(XPRT_LOCKED, &xprt->state)) 855 goto out; 856 xprt->snd_task =NULL; 857 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state); 858 xprt->ops->release_xprt(xprt, NULL); 859 xprt_schedule_autodisconnect(xprt); 860 out: 861 spin_unlock(&xprt->transport_lock); 862 wake_up_bit(&xprt->state, XPRT_LOCKED); 863 } 864 EXPORT_SYMBOL_GPL(xprt_unlock_connect); 865 866 /** 867 * xprt_connect - schedule a transport connect operation 868 * @task: RPC task that is requesting the connect 869 * 870 */ 871 void xprt_connect(struct rpc_task *task) 872 { 873 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 874 875 trace_xprt_connect(xprt); 876 877 if (!xprt_bound(xprt)) { 878 task->tk_status = -EAGAIN; 879 return; 880 } 881 if (!xprt_lock_write(xprt, task)) 882 return; 883 884 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 885 trace_xprt_disconnect_cleanup(xprt); 886 xprt->ops->close(xprt); 887 } 888 889 if (!xprt_connected(xprt)) { 890 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; 891 rpc_sleep_on_timeout(&xprt->pending, task, NULL, 892 xprt_request_timeout(task->tk_rqstp)); 893 894 if (test_bit(XPRT_CLOSING, &xprt->state)) 895 return; 896 if (xprt_test_and_set_connecting(xprt)) 897 return; 898 /* Race breaker */ 899 if (!xprt_connected(xprt)) { 900 xprt->stat.connect_start = jiffies; 901 xprt->ops->connect(xprt, task); 902 } else { 903 xprt_clear_connecting(xprt); 904 task->tk_status = 0; 905 rpc_wake_up_queued_task(&xprt->pending, task); 906 } 907 } 908 xprt_release_write(xprt, task); 909 } 910 911 /** 912 * xprt_reconnect_delay - compute the wait before scheduling a connect 913 * @xprt: transport instance 914 * 915 */ 916 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt) 917 { 918 unsigned long start, now = jiffies; 919 920 start = xprt->stat.connect_start + xprt->reestablish_timeout; 921 if (time_after(start, now)) 922 return start - now; 923 return 0; 924 } 925 EXPORT_SYMBOL_GPL(xprt_reconnect_delay); 926 927 /** 928 * xprt_reconnect_backoff - compute the new re-establish timeout 929 * @xprt: transport instance 930 * @init_to: initial reestablish timeout 931 * 932 */ 933 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to) 934 { 935 xprt->reestablish_timeout <<= 1; 936 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) 937 xprt->reestablish_timeout = xprt->max_reconnect_timeout; 938 if (xprt->reestablish_timeout < init_to) 939 xprt->reestablish_timeout = init_to; 940 } 941 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff); 942 943 enum xprt_xid_rb_cmp { 944 XID_RB_EQUAL, 945 XID_RB_LEFT, 946 XID_RB_RIGHT, 947 }; 948 static enum xprt_xid_rb_cmp 949 xprt_xid_cmp(__be32 xid1, __be32 xid2) 950 { 951 if (xid1 == xid2) 952 return XID_RB_EQUAL; 953 if ((__force u32)xid1 < (__force u32)xid2) 954 return XID_RB_LEFT; 955 return XID_RB_RIGHT; 956 } 957 958 static struct rpc_rqst * 959 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid) 960 { 961 struct rb_node *n = xprt->recv_queue.rb_node; 962 struct rpc_rqst *req; 963 964 while (n != NULL) { 965 req = rb_entry(n, struct rpc_rqst, rq_recv); 966 switch (xprt_xid_cmp(xid, req->rq_xid)) { 967 case XID_RB_LEFT: 968 n = n->rb_left; 969 break; 970 case XID_RB_RIGHT: 971 n = n->rb_right; 972 break; 973 case XID_RB_EQUAL: 974 return req; 975 } 976 } 977 return NULL; 978 } 979 980 static void 981 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new) 982 { 983 struct rb_node **p = &xprt->recv_queue.rb_node; 984 struct rb_node *n = NULL; 985 struct rpc_rqst *req; 986 987 while (*p != NULL) { 988 n = *p; 989 req = rb_entry(n, struct rpc_rqst, rq_recv); 990 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) { 991 case XID_RB_LEFT: 992 p = &n->rb_left; 993 break; 994 case XID_RB_RIGHT: 995 p = &n->rb_right; 996 break; 997 case XID_RB_EQUAL: 998 WARN_ON_ONCE(new != req); 999 return; 1000 } 1001 } 1002 rb_link_node(&new->rq_recv, n, p); 1003 rb_insert_color(&new->rq_recv, &xprt->recv_queue); 1004 } 1005 1006 static void 1007 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req) 1008 { 1009 rb_erase(&req->rq_recv, &xprt->recv_queue); 1010 } 1011 1012 /** 1013 * xprt_lookup_rqst - find an RPC request corresponding to an XID 1014 * @xprt: transport on which the original request was transmitted 1015 * @xid: RPC XID of incoming reply 1016 * 1017 * Caller holds xprt->queue_lock. 1018 */ 1019 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 1020 { 1021 struct rpc_rqst *entry; 1022 1023 entry = xprt_request_rb_find(xprt, xid); 1024 if (entry != NULL) { 1025 trace_xprt_lookup_rqst(xprt, xid, 0); 1026 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime); 1027 return entry; 1028 } 1029 1030 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 1031 ntohl(xid)); 1032 trace_xprt_lookup_rqst(xprt, xid, -ENOENT); 1033 xprt->stat.bad_xids++; 1034 return NULL; 1035 } 1036 EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 1037 1038 static bool 1039 xprt_is_pinned_rqst(struct rpc_rqst *req) 1040 { 1041 return atomic_read(&req->rq_pin) != 0; 1042 } 1043 1044 /** 1045 * xprt_pin_rqst - Pin a request on the transport receive list 1046 * @req: Request to pin 1047 * 1048 * Caller must ensure this is atomic with the call to xprt_lookup_rqst() 1049 * so should be holding xprt->queue_lock. 1050 */ 1051 void xprt_pin_rqst(struct rpc_rqst *req) 1052 { 1053 atomic_inc(&req->rq_pin); 1054 } 1055 EXPORT_SYMBOL_GPL(xprt_pin_rqst); 1056 1057 /** 1058 * xprt_unpin_rqst - Unpin a request on the transport receive list 1059 * @req: Request to pin 1060 * 1061 * Caller should be holding xprt->queue_lock. 1062 */ 1063 void xprt_unpin_rqst(struct rpc_rqst *req) 1064 { 1065 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) { 1066 atomic_dec(&req->rq_pin); 1067 return; 1068 } 1069 if (atomic_dec_and_test(&req->rq_pin)) 1070 wake_up_var(&req->rq_pin); 1071 } 1072 EXPORT_SYMBOL_GPL(xprt_unpin_rqst); 1073 1074 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) 1075 { 1076 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req)); 1077 } 1078 1079 static bool 1080 xprt_request_data_received(struct rpc_task *task) 1081 { 1082 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1083 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0; 1084 } 1085 1086 static bool 1087 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) 1088 { 1089 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1090 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0; 1091 } 1092 1093 /** 1094 * xprt_request_enqueue_receive - Add an request to the receive queue 1095 * @task: RPC task 1096 * 1097 */ 1098 void 1099 xprt_request_enqueue_receive(struct rpc_task *task) 1100 { 1101 struct rpc_rqst *req = task->tk_rqstp; 1102 struct rpc_xprt *xprt = req->rq_xprt; 1103 1104 if (!xprt_request_need_enqueue_receive(task, req)) 1105 return; 1106 1107 xprt_request_prepare(task->tk_rqstp); 1108 spin_lock(&xprt->queue_lock); 1109 1110 /* Update the softirq receive buffer */ 1111 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 1112 sizeof(req->rq_private_buf)); 1113 1114 /* Add request to the receive list */ 1115 xprt_request_rb_insert(xprt, req); 1116 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); 1117 spin_unlock(&xprt->queue_lock); 1118 1119 /* Turn off autodisconnect */ 1120 del_singleshot_timer_sync(&xprt->timer); 1121 } 1122 1123 /** 1124 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue 1125 * @task: RPC task 1126 * 1127 * Caller must hold xprt->queue_lock. 1128 */ 1129 static void 1130 xprt_request_dequeue_receive_locked(struct rpc_task *task) 1131 { 1132 struct rpc_rqst *req = task->tk_rqstp; 1133 1134 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 1135 xprt_request_rb_remove(req->rq_xprt, req); 1136 } 1137 1138 /** 1139 * xprt_update_rtt - Update RPC RTT statistics 1140 * @task: RPC request that recently completed 1141 * 1142 * Caller holds xprt->queue_lock. 1143 */ 1144 void xprt_update_rtt(struct rpc_task *task) 1145 { 1146 struct rpc_rqst *req = task->tk_rqstp; 1147 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 1148 unsigned int timer = task->tk_msg.rpc_proc->p_timer; 1149 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 1150 1151 if (timer) { 1152 if (req->rq_ntrans == 1) 1153 rpc_update_rtt(rtt, timer, m); 1154 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 1155 } 1156 } 1157 EXPORT_SYMBOL_GPL(xprt_update_rtt); 1158 1159 /** 1160 * xprt_complete_rqst - called when reply processing is complete 1161 * @task: RPC request that recently completed 1162 * @copied: actual number of bytes received from the transport 1163 * 1164 * Caller holds xprt->queue_lock. 1165 */ 1166 void xprt_complete_rqst(struct rpc_task *task, int copied) 1167 { 1168 struct rpc_rqst *req = task->tk_rqstp; 1169 struct rpc_xprt *xprt = req->rq_xprt; 1170 1171 xprt->stat.recvs++; 1172 1173 req->rq_private_buf.len = copied; 1174 /* Ensure all writes are done before we update */ 1175 /* req->rq_reply_bytes_recvd */ 1176 smp_wmb(); 1177 req->rq_reply_bytes_recvd = copied; 1178 xprt_request_dequeue_receive_locked(task); 1179 rpc_wake_up_queued_task(&xprt->pending, task); 1180 } 1181 EXPORT_SYMBOL_GPL(xprt_complete_rqst); 1182 1183 static void xprt_timer(struct rpc_task *task) 1184 { 1185 struct rpc_rqst *req = task->tk_rqstp; 1186 struct rpc_xprt *xprt = req->rq_xprt; 1187 1188 if (task->tk_status != -ETIMEDOUT) 1189 return; 1190 1191 trace_xprt_timer(xprt, req->rq_xid, task->tk_status); 1192 if (!req->rq_reply_bytes_recvd) { 1193 if (xprt->ops->timer) 1194 xprt->ops->timer(xprt, task); 1195 } else 1196 task->tk_status = 0; 1197 } 1198 1199 /** 1200 * xprt_wait_for_reply_request_def - wait for reply 1201 * @task: pointer to rpc_task 1202 * 1203 * Set a request's retransmit timeout based on the transport's 1204 * default timeout parameters. Used by transports that don't adjust 1205 * the retransmit timeout based on round-trip time estimation, 1206 * and put the task to sleep on the pending queue. 1207 */ 1208 void xprt_wait_for_reply_request_def(struct rpc_task *task) 1209 { 1210 struct rpc_rqst *req = task->tk_rqstp; 1211 1212 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 1213 xprt_request_timeout(req)); 1214 } 1215 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def); 1216 1217 /** 1218 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator 1219 * @task: pointer to rpc_task 1220 * 1221 * Set a request's retransmit timeout using the RTT estimator, 1222 * and put the task to sleep on the pending queue. 1223 */ 1224 void xprt_wait_for_reply_request_rtt(struct rpc_task *task) 1225 { 1226 int timer = task->tk_msg.rpc_proc->p_timer; 1227 struct rpc_clnt *clnt = task->tk_client; 1228 struct rpc_rtt *rtt = clnt->cl_rtt; 1229 struct rpc_rqst *req = task->tk_rqstp; 1230 unsigned long max_timeout = clnt->cl_timeout->to_maxval; 1231 unsigned long timeout; 1232 1233 timeout = rpc_calc_rto(rtt, timer); 1234 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 1235 if (timeout > max_timeout || timeout == 0) 1236 timeout = max_timeout; 1237 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 1238 jiffies + timeout); 1239 } 1240 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt); 1241 1242 /** 1243 * xprt_request_wait_receive - wait for the reply to an RPC request 1244 * @task: RPC task about to send a request 1245 * 1246 */ 1247 void xprt_request_wait_receive(struct rpc_task *task) 1248 { 1249 struct rpc_rqst *req = task->tk_rqstp; 1250 struct rpc_xprt *xprt = req->rq_xprt; 1251 1252 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 1253 return; 1254 /* 1255 * Sleep on the pending queue if we're expecting a reply. 1256 * The spinlock ensures atomicity between the test of 1257 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). 1258 */ 1259 spin_lock(&xprt->queue_lock); 1260 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { 1261 xprt->ops->wait_for_reply_request(task); 1262 /* 1263 * Send an extra queue wakeup call if the 1264 * connection was dropped in case the call to 1265 * rpc_sleep_on() raced. 1266 */ 1267 if (xprt_request_retransmit_after_disconnect(task)) 1268 rpc_wake_up_queued_task_set_status(&xprt->pending, 1269 task, -ENOTCONN); 1270 } 1271 spin_unlock(&xprt->queue_lock); 1272 } 1273 1274 static bool 1275 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req) 1276 { 1277 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1278 } 1279 1280 /** 1281 * xprt_request_enqueue_transmit - queue a task for transmission 1282 * @task: pointer to rpc_task 1283 * 1284 * Add a task to the transmission queue. 1285 */ 1286 void 1287 xprt_request_enqueue_transmit(struct rpc_task *task) 1288 { 1289 struct rpc_rqst *pos, *req = task->tk_rqstp; 1290 struct rpc_xprt *xprt = req->rq_xprt; 1291 1292 if (xprt_request_need_enqueue_transmit(task, req)) { 1293 req->rq_bytes_sent = 0; 1294 spin_lock(&xprt->queue_lock); 1295 /* 1296 * Requests that carry congestion control credits are added 1297 * to the head of the list to avoid starvation issues. 1298 */ 1299 if (req->rq_cong) { 1300 xprt_clear_congestion_window_wait(xprt); 1301 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1302 if (pos->rq_cong) 1303 continue; 1304 /* Note: req is added _before_ pos */ 1305 list_add_tail(&req->rq_xmit, &pos->rq_xmit); 1306 INIT_LIST_HEAD(&req->rq_xmit2); 1307 goto out; 1308 } 1309 } else if (RPC_IS_SWAPPER(task)) { 1310 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1311 if (pos->rq_cong || pos->rq_bytes_sent) 1312 continue; 1313 if (RPC_IS_SWAPPER(pos->rq_task)) 1314 continue; 1315 /* Note: req is added _before_ pos */ 1316 list_add_tail(&req->rq_xmit, &pos->rq_xmit); 1317 INIT_LIST_HEAD(&req->rq_xmit2); 1318 goto out; 1319 } 1320 } else if (!req->rq_seqno) { 1321 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1322 if (pos->rq_task->tk_owner != task->tk_owner) 1323 continue; 1324 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2); 1325 INIT_LIST_HEAD(&req->rq_xmit); 1326 goto out; 1327 } 1328 } 1329 list_add_tail(&req->rq_xmit, &xprt->xmit_queue); 1330 INIT_LIST_HEAD(&req->rq_xmit2); 1331 out: 1332 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1333 spin_unlock(&xprt->queue_lock); 1334 } 1335 } 1336 1337 /** 1338 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue 1339 * @task: pointer to rpc_task 1340 * 1341 * Remove a task from the transmission queue 1342 * Caller must hold xprt->queue_lock 1343 */ 1344 static void 1345 xprt_request_dequeue_transmit_locked(struct rpc_task *task) 1346 { 1347 struct rpc_rqst *req = task->tk_rqstp; 1348 1349 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1350 return; 1351 if (!list_empty(&req->rq_xmit)) { 1352 list_del(&req->rq_xmit); 1353 if (!list_empty(&req->rq_xmit2)) { 1354 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2, 1355 struct rpc_rqst, rq_xmit2); 1356 list_del(&req->rq_xmit2); 1357 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue); 1358 } 1359 } else 1360 list_del(&req->rq_xmit2); 1361 } 1362 1363 /** 1364 * xprt_request_dequeue_transmit - remove a task from the transmission queue 1365 * @task: pointer to rpc_task 1366 * 1367 * Remove a task from the transmission queue 1368 */ 1369 static void 1370 xprt_request_dequeue_transmit(struct rpc_task *task) 1371 { 1372 struct rpc_rqst *req = task->tk_rqstp; 1373 struct rpc_xprt *xprt = req->rq_xprt; 1374 1375 spin_lock(&xprt->queue_lock); 1376 xprt_request_dequeue_transmit_locked(task); 1377 spin_unlock(&xprt->queue_lock); 1378 } 1379 1380 /** 1381 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue 1382 * @task: pointer to rpc_task 1383 * 1384 * Remove a task from the transmit and receive queues, and ensure that 1385 * it is not pinned by the receive work item. 1386 */ 1387 void 1388 xprt_request_dequeue_xprt(struct rpc_task *task) 1389 { 1390 struct rpc_rqst *req = task->tk_rqstp; 1391 struct rpc_xprt *xprt = req->rq_xprt; 1392 1393 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || 1394 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || 1395 xprt_is_pinned_rqst(req)) { 1396 spin_lock(&xprt->queue_lock); 1397 xprt_request_dequeue_transmit_locked(task); 1398 xprt_request_dequeue_receive_locked(task); 1399 while (xprt_is_pinned_rqst(req)) { 1400 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1401 spin_unlock(&xprt->queue_lock); 1402 xprt_wait_on_pinned_rqst(req); 1403 spin_lock(&xprt->queue_lock); 1404 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1405 } 1406 spin_unlock(&xprt->queue_lock); 1407 } 1408 } 1409 1410 /** 1411 * xprt_request_prepare - prepare an encoded request for transport 1412 * @req: pointer to rpc_rqst 1413 * 1414 * Calls into the transport layer to do whatever is needed to prepare 1415 * the request for transmission or receive. 1416 */ 1417 void 1418 xprt_request_prepare(struct rpc_rqst *req) 1419 { 1420 struct rpc_xprt *xprt = req->rq_xprt; 1421 1422 if (xprt->ops->prepare_request) 1423 xprt->ops->prepare_request(req); 1424 } 1425 1426 /** 1427 * xprt_request_need_retransmit - Test if a task needs retransmission 1428 * @task: pointer to rpc_task 1429 * 1430 * Test for whether a connection breakage requires the task to retransmit 1431 */ 1432 bool 1433 xprt_request_need_retransmit(struct rpc_task *task) 1434 { 1435 return xprt_request_retransmit_after_disconnect(task); 1436 } 1437 1438 /** 1439 * xprt_prepare_transmit - reserve the transport before sending a request 1440 * @task: RPC task about to send a request 1441 * 1442 */ 1443 bool xprt_prepare_transmit(struct rpc_task *task) 1444 { 1445 struct rpc_rqst *req = task->tk_rqstp; 1446 struct rpc_xprt *xprt = req->rq_xprt; 1447 1448 if (!xprt_lock_write(xprt, task)) { 1449 /* Race breaker: someone may have transmitted us */ 1450 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1451 rpc_wake_up_queued_task_set_status(&xprt->sending, 1452 task, 0); 1453 return false; 1454 1455 } 1456 return true; 1457 } 1458 1459 void xprt_end_transmit(struct rpc_task *task) 1460 { 1461 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 1462 1463 xprt_inject_disconnect(xprt); 1464 xprt_release_write(xprt, task); 1465 } 1466 1467 /** 1468 * xprt_request_transmit - send an RPC request on a transport 1469 * @req: pointer to request to transmit 1470 * @snd_task: RPC task that owns the transport lock 1471 * 1472 * This performs the transmission of a single request. 1473 * Note that if the request is not the same as snd_task, then it 1474 * does need to be pinned. 1475 * Returns '0' on success. 1476 */ 1477 static int 1478 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) 1479 { 1480 struct rpc_xprt *xprt = req->rq_xprt; 1481 struct rpc_task *task = req->rq_task; 1482 unsigned int connect_cookie; 1483 int is_retrans = RPC_WAS_SENT(task); 1484 int status; 1485 1486 if (!req->rq_bytes_sent) { 1487 if (xprt_request_data_received(task)) { 1488 status = 0; 1489 goto out_dequeue; 1490 } 1491 /* Verify that our message lies in the RPCSEC_GSS window */ 1492 if (rpcauth_xmit_need_reencode(task)) { 1493 status = -EBADMSG; 1494 goto out_dequeue; 1495 } 1496 if (RPC_SIGNALLED(task)) { 1497 status = -ERESTARTSYS; 1498 goto out_dequeue; 1499 } 1500 } 1501 1502 /* 1503 * Update req->rq_ntrans before transmitting to avoid races with 1504 * xprt_update_rtt(), which needs to know that it is recording a 1505 * reply to the first transmission. 1506 */ 1507 req->rq_ntrans++; 1508 1509 trace_rpc_xdr_sendto(task, &req->rq_snd_buf); 1510 connect_cookie = xprt->connect_cookie; 1511 status = xprt->ops->send_request(req); 1512 if (status != 0) { 1513 req->rq_ntrans--; 1514 trace_xprt_transmit(req, status); 1515 return status; 1516 } 1517 1518 if (is_retrans) 1519 task->tk_client->cl_stats->rpcretrans++; 1520 1521 xprt_inject_disconnect(xprt); 1522 1523 task->tk_flags |= RPC_TASK_SENT; 1524 spin_lock(&xprt->transport_lock); 1525 1526 xprt->stat.sends++; 1527 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 1528 xprt->stat.bklog_u += xprt->backlog.qlen; 1529 xprt->stat.sending_u += xprt->sending.qlen; 1530 xprt->stat.pending_u += xprt->pending.qlen; 1531 spin_unlock(&xprt->transport_lock); 1532 1533 req->rq_connect_cookie = connect_cookie; 1534 out_dequeue: 1535 trace_xprt_transmit(req, status); 1536 xprt_request_dequeue_transmit(task); 1537 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); 1538 return status; 1539 } 1540 1541 /** 1542 * xprt_transmit - send an RPC request on a transport 1543 * @task: controlling RPC task 1544 * 1545 * Attempts to drain the transmit queue. On exit, either the transport 1546 * signalled an error that needs to be handled before transmission can 1547 * resume, or @task finished transmitting, and detected that it already 1548 * received a reply. 1549 */ 1550 void 1551 xprt_transmit(struct rpc_task *task) 1552 { 1553 struct rpc_rqst *next, *req = task->tk_rqstp; 1554 struct rpc_xprt *xprt = req->rq_xprt; 1555 int counter, status; 1556 1557 spin_lock(&xprt->queue_lock); 1558 counter = 0; 1559 while (!list_empty(&xprt->xmit_queue)) { 1560 if (++counter == 20) 1561 break; 1562 next = list_first_entry(&xprt->xmit_queue, 1563 struct rpc_rqst, rq_xmit); 1564 xprt_pin_rqst(next); 1565 spin_unlock(&xprt->queue_lock); 1566 status = xprt_request_transmit(next, task); 1567 if (status == -EBADMSG && next != req) 1568 status = 0; 1569 spin_lock(&xprt->queue_lock); 1570 xprt_unpin_rqst(next); 1571 if (status == 0) { 1572 if (!xprt_request_data_received(task) || 1573 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1574 continue; 1575 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1576 task->tk_status = status; 1577 break; 1578 } 1579 spin_unlock(&xprt->queue_lock); 1580 } 1581 1582 static void xprt_complete_request_init(struct rpc_task *task) 1583 { 1584 if (task->tk_rqstp) 1585 xprt_request_init(task); 1586 } 1587 1588 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1589 { 1590 set_bit(XPRT_CONGESTED, &xprt->state); 1591 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init); 1592 } 1593 EXPORT_SYMBOL_GPL(xprt_add_backlog); 1594 1595 static bool __xprt_set_rq(struct rpc_task *task, void *data) 1596 { 1597 struct rpc_rqst *req = data; 1598 1599 if (task->tk_rqstp == NULL) { 1600 memset(req, 0, sizeof(*req)); /* mark unused */ 1601 task->tk_rqstp = req; 1602 return true; 1603 } 1604 return false; 1605 } 1606 1607 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req) 1608 { 1609 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) { 1610 clear_bit(XPRT_CONGESTED, &xprt->state); 1611 return false; 1612 } 1613 return true; 1614 } 1615 EXPORT_SYMBOL_GPL(xprt_wake_up_backlog); 1616 1617 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) 1618 { 1619 bool ret = false; 1620 1621 if (!test_bit(XPRT_CONGESTED, &xprt->state)) 1622 goto out; 1623 spin_lock(&xprt->reserve_lock); 1624 if (test_bit(XPRT_CONGESTED, &xprt->state)) { 1625 xprt_add_backlog(xprt, task); 1626 ret = true; 1627 } 1628 spin_unlock(&xprt->reserve_lock); 1629 out: 1630 return ret; 1631 } 1632 1633 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) 1634 { 1635 struct rpc_rqst *req = ERR_PTR(-EAGAIN); 1636 1637 if (xprt->num_reqs >= xprt->max_reqs) 1638 goto out; 1639 ++xprt->num_reqs; 1640 spin_unlock(&xprt->reserve_lock); 1641 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); 1642 spin_lock(&xprt->reserve_lock); 1643 if (req != NULL) 1644 goto out; 1645 --xprt->num_reqs; 1646 req = ERR_PTR(-ENOMEM); 1647 out: 1648 return req; 1649 } 1650 1651 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1652 { 1653 if (xprt->num_reqs > xprt->min_reqs) { 1654 --xprt->num_reqs; 1655 kfree(req); 1656 return true; 1657 } 1658 return false; 1659 } 1660 1661 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 1662 { 1663 struct rpc_rqst *req; 1664 1665 spin_lock(&xprt->reserve_lock); 1666 if (!list_empty(&xprt->free)) { 1667 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 1668 list_del(&req->rq_list); 1669 goto out_init_req; 1670 } 1671 req = xprt_dynamic_alloc_slot(xprt); 1672 if (!IS_ERR(req)) 1673 goto out_init_req; 1674 switch (PTR_ERR(req)) { 1675 case -ENOMEM: 1676 dprintk("RPC: dynamic allocation of request slot " 1677 "failed! Retrying\n"); 1678 task->tk_status = -ENOMEM; 1679 break; 1680 case -EAGAIN: 1681 xprt_add_backlog(xprt, task); 1682 dprintk("RPC: waiting for request slot\n"); 1683 fallthrough; 1684 default: 1685 task->tk_status = -EAGAIN; 1686 } 1687 spin_unlock(&xprt->reserve_lock); 1688 return; 1689 out_init_req: 1690 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots, 1691 xprt->num_reqs); 1692 spin_unlock(&xprt->reserve_lock); 1693 1694 task->tk_status = 0; 1695 task->tk_rqstp = req; 1696 } 1697 EXPORT_SYMBOL_GPL(xprt_alloc_slot); 1698 1699 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1700 { 1701 spin_lock(&xprt->reserve_lock); 1702 if (!xprt_wake_up_backlog(xprt, req) && 1703 !xprt_dynamic_free_slot(xprt, req)) { 1704 memset(req, 0, sizeof(*req)); /* mark unused */ 1705 list_add(&req->rq_list, &xprt->free); 1706 } 1707 spin_unlock(&xprt->reserve_lock); 1708 } 1709 EXPORT_SYMBOL_GPL(xprt_free_slot); 1710 1711 static void xprt_free_all_slots(struct rpc_xprt *xprt) 1712 { 1713 struct rpc_rqst *req; 1714 while (!list_empty(&xprt->free)) { 1715 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); 1716 list_del(&req->rq_list); 1717 kfree(req); 1718 } 1719 } 1720 1721 struct rpc_xprt *xprt_alloc(struct net *net, size_t size, 1722 unsigned int num_prealloc, 1723 unsigned int max_alloc) 1724 { 1725 struct rpc_xprt *xprt; 1726 struct rpc_rqst *req; 1727 int i; 1728 1729 xprt = kzalloc(size, GFP_KERNEL); 1730 if (xprt == NULL) 1731 goto out; 1732 1733 xprt_init(xprt, net); 1734 1735 for (i = 0; i < num_prealloc; i++) { 1736 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); 1737 if (!req) 1738 goto out_free; 1739 list_add(&req->rq_list, &xprt->free); 1740 } 1741 if (max_alloc > num_prealloc) 1742 xprt->max_reqs = max_alloc; 1743 else 1744 xprt->max_reqs = num_prealloc; 1745 xprt->min_reqs = num_prealloc; 1746 xprt->num_reqs = num_prealloc; 1747 1748 return xprt; 1749 1750 out_free: 1751 xprt_free(xprt); 1752 out: 1753 return NULL; 1754 } 1755 EXPORT_SYMBOL_GPL(xprt_alloc); 1756 1757 void xprt_free(struct rpc_xprt *xprt) 1758 { 1759 put_net(xprt->xprt_net); 1760 xprt_free_all_slots(xprt); 1761 kfree_rcu(xprt, rcu); 1762 } 1763 EXPORT_SYMBOL_GPL(xprt_free); 1764 1765 static void 1766 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt) 1767 { 1768 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1; 1769 } 1770 1771 static __be32 1772 xprt_alloc_xid(struct rpc_xprt *xprt) 1773 { 1774 __be32 xid; 1775 1776 spin_lock(&xprt->reserve_lock); 1777 xid = (__force __be32)xprt->xid++; 1778 spin_unlock(&xprt->reserve_lock); 1779 return xid; 1780 } 1781 1782 static void 1783 xprt_init_xid(struct rpc_xprt *xprt) 1784 { 1785 xprt->xid = prandom_u32(); 1786 } 1787 1788 static void 1789 xprt_request_init(struct rpc_task *task) 1790 { 1791 struct rpc_xprt *xprt = task->tk_xprt; 1792 struct rpc_rqst *req = task->tk_rqstp; 1793 1794 req->rq_task = task; 1795 req->rq_xprt = xprt; 1796 req->rq_buffer = NULL; 1797 req->rq_xid = xprt_alloc_xid(xprt); 1798 xprt_init_connect_cookie(req, xprt); 1799 req->rq_snd_buf.len = 0; 1800 req->rq_snd_buf.buflen = 0; 1801 req->rq_rcv_buf.len = 0; 1802 req->rq_rcv_buf.buflen = 0; 1803 req->rq_snd_buf.bvec = NULL; 1804 req->rq_rcv_buf.bvec = NULL; 1805 req->rq_release_snd_buf = NULL; 1806 xprt_init_majortimeo(task, req); 1807 1808 trace_xprt_reserve(req); 1809 } 1810 1811 static void 1812 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) 1813 { 1814 xprt->ops->alloc_slot(xprt, task); 1815 if (task->tk_rqstp != NULL) 1816 xprt_request_init(task); 1817 } 1818 1819 /** 1820 * xprt_reserve - allocate an RPC request slot 1821 * @task: RPC task requesting a slot allocation 1822 * 1823 * If the transport is marked as being congested, or if no more 1824 * slots are available, place the task on the transport's 1825 * backlog queue. 1826 */ 1827 void xprt_reserve(struct rpc_task *task) 1828 { 1829 struct rpc_xprt *xprt = task->tk_xprt; 1830 1831 task->tk_status = 0; 1832 if (task->tk_rqstp != NULL) 1833 return; 1834 1835 task->tk_status = -EAGAIN; 1836 if (!xprt_throttle_congested(xprt, task)) 1837 xprt_do_reserve(xprt, task); 1838 } 1839 1840 /** 1841 * xprt_retry_reserve - allocate an RPC request slot 1842 * @task: RPC task requesting a slot allocation 1843 * 1844 * If no more slots are available, place the task on the transport's 1845 * backlog queue. 1846 * Note that the only difference with xprt_reserve is that we now 1847 * ignore the value of the XPRT_CONGESTED flag. 1848 */ 1849 void xprt_retry_reserve(struct rpc_task *task) 1850 { 1851 struct rpc_xprt *xprt = task->tk_xprt; 1852 1853 task->tk_status = 0; 1854 if (task->tk_rqstp != NULL) 1855 return; 1856 1857 task->tk_status = -EAGAIN; 1858 xprt_do_reserve(xprt, task); 1859 } 1860 1861 /** 1862 * xprt_release - release an RPC request slot 1863 * @task: task which is finished with the slot 1864 * 1865 */ 1866 void xprt_release(struct rpc_task *task) 1867 { 1868 struct rpc_xprt *xprt; 1869 struct rpc_rqst *req = task->tk_rqstp; 1870 1871 if (req == NULL) { 1872 if (task->tk_client) { 1873 xprt = task->tk_xprt; 1874 xprt_release_write(xprt, task); 1875 } 1876 return; 1877 } 1878 1879 xprt = req->rq_xprt; 1880 xprt_request_dequeue_xprt(task); 1881 spin_lock(&xprt->transport_lock); 1882 xprt->ops->release_xprt(xprt, task); 1883 if (xprt->ops->release_request) 1884 xprt->ops->release_request(task); 1885 xprt_schedule_autodisconnect(xprt); 1886 spin_unlock(&xprt->transport_lock); 1887 if (req->rq_buffer) 1888 xprt->ops->buf_free(task); 1889 xdr_free_bvec(&req->rq_rcv_buf); 1890 xdr_free_bvec(&req->rq_snd_buf); 1891 if (req->rq_cred != NULL) 1892 put_rpccred(req->rq_cred); 1893 if (req->rq_release_snd_buf) 1894 req->rq_release_snd_buf(req); 1895 1896 task->tk_rqstp = NULL; 1897 if (likely(!bc_prealloc(req))) 1898 xprt->ops->free_slot(xprt, req); 1899 else 1900 xprt_free_bc_request(req); 1901 } 1902 1903 #ifdef CONFIG_SUNRPC_BACKCHANNEL 1904 void 1905 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) 1906 { 1907 struct xdr_buf *xbufp = &req->rq_snd_buf; 1908 1909 task->tk_rqstp = req; 1910 req->rq_task = task; 1911 xprt_init_connect_cookie(req, req->rq_xprt); 1912 /* 1913 * Set up the xdr_buf length. 1914 * This also indicates that the buffer is XDR encoded already. 1915 */ 1916 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 1917 xbufp->tail[0].iov_len; 1918 } 1919 #endif 1920 1921 static void xprt_init(struct rpc_xprt *xprt, struct net *net) 1922 { 1923 kref_init(&xprt->kref); 1924 1925 spin_lock_init(&xprt->transport_lock); 1926 spin_lock_init(&xprt->reserve_lock); 1927 spin_lock_init(&xprt->queue_lock); 1928 1929 INIT_LIST_HEAD(&xprt->free); 1930 xprt->recv_queue = RB_ROOT; 1931 INIT_LIST_HEAD(&xprt->xmit_queue); 1932 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1933 spin_lock_init(&xprt->bc_pa_lock); 1934 INIT_LIST_HEAD(&xprt->bc_pa_list); 1935 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1936 INIT_LIST_HEAD(&xprt->xprt_switch); 1937 1938 xprt->last_used = jiffies; 1939 xprt->cwnd = RPC_INITCWND; 1940 xprt->bind_index = 0; 1941 1942 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 1943 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 1944 rpc_init_wait_queue(&xprt->sending, "xprt_sending"); 1945 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 1946 1947 xprt_init_xid(xprt); 1948 1949 xprt->xprt_net = get_net(net); 1950 } 1951 1952 /** 1953 * xprt_create_transport - create an RPC transport 1954 * @args: rpc transport creation arguments 1955 * 1956 */ 1957 struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 1958 { 1959 struct rpc_xprt *xprt; 1960 struct xprt_class *t; 1961 1962 spin_lock(&xprt_list_lock); 1963 list_for_each_entry(t, &xprt_list, list) { 1964 if (t->ident == args->ident) { 1965 spin_unlock(&xprt_list_lock); 1966 goto found; 1967 } 1968 } 1969 spin_unlock(&xprt_list_lock); 1970 dprintk("RPC: transport (%d) not supported\n", args->ident); 1971 return ERR_PTR(-EIO); 1972 1973 found: 1974 xprt = t->setup(args); 1975 if (IS_ERR(xprt)) 1976 goto out; 1977 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) 1978 xprt->idle_timeout = 0; 1979 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1980 if (xprt_has_timer(xprt)) 1981 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); 1982 else 1983 timer_setup(&xprt->timer, NULL, 0); 1984 1985 if (strlen(args->servername) > RPC_MAXNETNAMELEN) { 1986 xprt_destroy(xprt); 1987 return ERR_PTR(-EINVAL); 1988 } 1989 xprt->servername = kstrdup(args->servername, GFP_KERNEL); 1990 if (xprt->servername == NULL) { 1991 xprt_destroy(xprt); 1992 return ERR_PTR(-ENOMEM); 1993 } 1994 1995 rpc_xprt_debugfs_register(xprt); 1996 1997 trace_xprt_create(xprt); 1998 out: 1999 return xprt; 2000 } 2001 2002 static void xprt_destroy_cb(struct work_struct *work) 2003 { 2004 struct rpc_xprt *xprt = 2005 container_of(work, struct rpc_xprt, task_cleanup); 2006 2007 trace_xprt_destroy(xprt); 2008 2009 rpc_xprt_debugfs_unregister(xprt); 2010 rpc_destroy_wait_queue(&xprt->binding); 2011 rpc_destroy_wait_queue(&xprt->pending); 2012 rpc_destroy_wait_queue(&xprt->sending); 2013 rpc_destroy_wait_queue(&xprt->backlog); 2014 kfree(xprt->servername); 2015 /* 2016 * Destroy any existing back channel 2017 */ 2018 xprt_destroy_backchannel(xprt, UINT_MAX); 2019 2020 /* 2021 * Tear down transport state and free the rpc_xprt 2022 */ 2023 xprt->ops->destroy(xprt); 2024 } 2025 2026 /** 2027 * xprt_destroy - destroy an RPC transport, killing off all requests. 2028 * @xprt: transport to destroy 2029 * 2030 */ 2031 static void xprt_destroy(struct rpc_xprt *xprt) 2032 { 2033 /* 2034 * Exclude transport connect/disconnect handlers and autoclose 2035 */ 2036 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); 2037 2038 del_timer_sync(&xprt->timer); 2039 2040 /* 2041 * Destroy sockets etc from the system workqueue so they can 2042 * safely flush receive work running on rpciod. 2043 */ 2044 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); 2045 schedule_work(&xprt->task_cleanup); 2046 } 2047 2048 static void xprt_destroy_kref(struct kref *kref) 2049 { 2050 xprt_destroy(container_of(kref, struct rpc_xprt, kref)); 2051 } 2052 2053 /** 2054 * xprt_get - return a reference to an RPC transport. 2055 * @xprt: pointer to the transport 2056 * 2057 */ 2058 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) 2059 { 2060 if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) 2061 return xprt; 2062 return NULL; 2063 } 2064 EXPORT_SYMBOL_GPL(xprt_get); 2065 2066 /** 2067 * xprt_put - release a reference to an RPC transport. 2068 * @xprt: pointer to the transport 2069 * 2070 */ 2071 void xprt_put(struct rpc_xprt *xprt) 2072 { 2073 if (xprt != NULL) 2074 kref_put(&xprt->kref, xprt_destroy_kref); 2075 } 2076 EXPORT_SYMBOL_GPL(xprt_put); 2077