1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/xprt.c 4 * 5 * This is a generic RPC call interface supporting congestion avoidance, 6 * and asynchronous calls. 7 * 8 * The interface works like this: 9 * 10 * - When a process places a call, it allocates a request slot if 11 * one is available. Otherwise, it sleeps on the backlog queue 12 * (xprt_reserve). 13 * - Next, the caller puts together the RPC message, stuffs it into 14 * the request struct, and calls xprt_transmit(). 15 * - xprt_transmit sends the message and installs the caller on the 16 * transport's wait list. At the same time, if a reply is expected, 17 * it installs a timer that is run after the packet's timeout has 18 * expired. 19 * - When a packet arrives, the data_ready handler walks the list of 20 * pending requests for that transport. If a matching XID is found, the 21 * caller is woken up, and the timer removed. 22 * - When no reply arrives within the timeout interval, the timer is 23 * fired by the kernel and runs xprt_timer(). It either adjusts the 24 * timeout values (minor timeout) or wakes up the caller with a status 25 * of -ETIMEDOUT. 26 * - When the caller receives a notification from RPC that a reply arrived, 27 * it should release the RPC slot, and process the reply. 28 * If the call timed out, it may choose to retry the operation by 29 * adjusting the initial timeout value, and simply calling rpc_call 30 * again. 31 * 32 * Support for async RPC is done through a set of RPC-specific scheduling 33 * primitives that `transparently' work for processes as well as async 34 * tasks that rely on callbacks. 35 * 36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 37 * 38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 39 */ 40 41 #include <linux/module.h> 42 43 #include <linux/types.h> 44 #include <linux/interrupt.h> 45 #include <linux/workqueue.h> 46 #include <linux/net.h> 47 #include <linux/ktime.h> 48 49 #include <linux/sunrpc/clnt.h> 50 #include <linux/sunrpc/metrics.h> 51 #include <linux/sunrpc/bc_xprt.h> 52 #include <linux/rcupdate.h> 53 #include <linux/sched/mm.h> 54 55 #include <trace/events/sunrpc.h> 56 57 #include "sunrpc.h" 58 59 /* 60 * Local variables 61 */ 62 63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 64 # define RPCDBG_FACILITY RPCDBG_XPRT 65 #endif 66 67 /* 68 * Local functions 69 */ 70 static void xprt_init(struct rpc_xprt *xprt, struct net *net); 71 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); 72 static void xprt_destroy(struct rpc_xprt *xprt); 73 74 static DEFINE_SPINLOCK(xprt_list_lock); 75 static LIST_HEAD(xprt_list); 76 77 static unsigned long xprt_request_timeout(const struct rpc_rqst *req) 78 { 79 unsigned long timeout = jiffies + req->rq_timeout; 80 81 if (time_before(timeout, req->rq_majortimeo)) 82 return timeout; 83 return req->rq_majortimeo; 84 } 85 86 /** 87 * xprt_register_transport - register a transport implementation 88 * @transport: transport to register 89 * 90 * If a transport implementation is loaded as a kernel module, it can 91 * call this interface to make itself known to the RPC client. 92 * 93 * Returns: 94 * 0: transport successfully registered 95 * -EEXIST: transport already registered 96 * -EINVAL: transport module being unloaded 97 */ 98 int xprt_register_transport(struct xprt_class *transport) 99 { 100 struct xprt_class *t; 101 int result; 102 103 result = -EEXIST; 104 spin_lock(&xprt_list_lock); 105 list_for_each_entry(t, &xprt_list, list) { 106 /* don't register the same transport class twice */ 107 if (t->ident == transport->ident) 108 goto out; 109 } 110 111 list_add_tail(&transport->list, &xprt_list); 112 printk(KERN_INFO "RPC: Registered %s transport module.\n", 113 transport->name); 114 result = 0; 115 116 out: 117 spin_unlock(&xprt_list_lock); 118 return result; 119 } 120 EXPORT_SYMBOL_GPL(xprt_register_transport); 121 122 /** 123 * xprt_unregister_transport - unregister a transport implementation 124 * @transport: transport to unregister 125 * 126 * Returns: 127 * 0: transport successfully unregistered 128 * -ENOENT: transport never registered 129 */ 130 int xprt_unregister_transport(struct xprt_class *transport) 131 { 132 struct xprt_class *t; 133 int result; 134 135 result = 0; 136 spin_lock(&xprt_list_lock); 137 list_for_each_entry(t, &xprt_list, list) { 138 if (t == transport) { 139 printk(KERN_INFO 140 "RPC: Unregistered %s transport module.\n", 141 transport->name); 142 list_del_init(&transport->list); 143 goto out; 144 } 145 } 146 result = -ENOENT; 147 148 out: 149 spin_unlock(&xprt_list_lock); 150 return result; 151 } 152 EXPORT_SYMBOL_GPL(xprt_unregister_transport); 153 154 /** 155 * xprt_load_transport - load a transport implementation 156 * @transport_name: transport to load 157 * 158 * Returns: 159 * 0: transport successfully loaded 160 * -ENOENT: transport module not available 161 */ 162 int xprt_load_transport(const char *transport_name) 163 { 164 struct xprt_class *t; 165 int result; 166 167 result = 0; 168 spin_lock(&xprt_list_lock); 169 list_for_each_entry(t, &xprt_list, list) { 170 if (strcmp(t->name, transport_name) == 0) { 171 spin_unlock(&xprt_list_lock); 172 goto out; 173 } 174 } 175 spin_unlock(&xprt_list_lock); 176 result = request_module("xprt%s", transport_name); 177 out: 178 return result; 179 } 180 EXPORT_SYMBOL_GPL(xprt_load_transport); 181 182 static void xprt_clear_locked(struct rpc_xprt *xprt) 183 { 184 xprt->snd_task = NULL; 185 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 186 smp_mb__before_atomic(); 187 clear_bit(XPRT_LOCKED, &xprt->state); 188 smp_mb__after_atomic(); 189 } else 190 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 191 } 192 193 /** 194 * xprt_reserve_xprt - serialize write access to transports 195 * @task: task that is requesting access to the transport 196 * @xprt: pointer to the target transport 197 * 198 * This prevents mixing the payload of separate requests, and prevents 199 * transport connects from colliding with writes. No congestion control 200 * is provided. 201 */ 202 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 203 { 204 struct rpc_rqst *req = task->tk_rqstp; 205 206 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 207 if (task == xprt->snd_task) 208 goto out_locked; 209 goto out_sleep; 210 } 211 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 212 goto out_unlock; 213 xprt->snd_task = task; 214 215 out_locked: 216 trace_xprt_reserve_xprt(xprt, task); 217 return 1; 218 219 out_unlock: 220 xprt_clear_locked(xprt); 221 out_sleep: 222 task->tk_status = -EAGAIN; 223 if (RPC_IS_SOFT(task)) 224 rpc_sleep_on_timeout(&xprt->sending, task, NULL, 225 xprt_request_timeout(req)); 226 else 227 rpc_sleep_on(&xprt->sending, task, NULL); 228 return 0; 229 } 230 EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 231 232 static bool 233 xprt_need_congestion_window_wait(struct rpc_xprt *xprt) 234 { 235 return test_bit(XPRT_CWND_WAIT, &xprt->state); 236 } 237 238 static void 239 xprt_set_congestion_window_wait(struct rpc_xprt *xprt) 240 { 241 if (!list_empty(&xprt->xmit_queue)) { 242 /* Peek at head of queue to see if it can make progress */ 243 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst, 244 rq_xmit)->rq_cong) 245 return; 246 } 247 set_bit(XPRT_CWND_WAIT, &xprt->state); 248 } 249 250 static void 251 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) 252 { 253 if (!RPCXPRT_CONGESTED(xprt)) 254 clear_bit(XPRT_CWND_WAIT, &xprt->state); 255 } 256 257 /* 258 * xprt_reserve_xprt_cong - serialize write access to transports 259 * @task: task that is requesting access to the transport 260 * 261 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 262 * integrated into the decision of whether a request is allowed to be 263 * woken up and given access to the transport. 264 * Note that the lock is only granted if we know there are free slots. 265 */ 266 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 267 { 268 struct rpc_rqst *req = task->tk_rqstp; 269 270 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 271 if (task == xprt->snd_task) 272 goto out_locked; 273 goto out_sleep; 274 } 275 if (req == NULL) { 276 xprt->snd_task = task; 277 goto out_locked; 278 } 279 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 280 goto out_unlock; 281 if (!xprt_need_congestion_window_wait(xprt)) { 282 xprt->snd_task = task; 283 goto out_locked; 284 } 285 out_unlock: 286 xprt_clear_locked(xprt); 287 out_sleep: 288 task->tk_status = -EAGAIN; 289 if (RPC_IS_SOFT(task)) 290 rpc_sleep_on_timeout(&xprt->sending, task, NULL, 291 xprt_request_timeout(req)); 292 else 293 rpc_sleep_on(&xprt->sending, task, NULL); 294 return 0; 295 out_locked: 296 trace_xprt_reserve_cong(xprt, task); 297 return 1; 298 } 299 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 300 301 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 302 { 303 int retval; 304 305 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) 306 return 1; 307 spin_lock(&xprt->transport_lock); 308 retval = xprt->ops->reserve_xprt(xprt, task); 309 spin_unlock(&xprt->transport_lock); 310 return retval; 311 } 312 313 static bool __xprt_lock_write_func(struct rpc_task *task, void *data) 314 { 315 struct rpc_xprt *xprt = data; 316 317 xprt->snd_task = task; 318 return true; 319 } 320 321 static void __xprt_lock_write_next(struct rpc_xprt *xprt) 322 { 323 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 324 return; 325 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 326 goto out_unlock; 327 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 328 __xprt_lock_write_func, xprt)) 329 return; 330 out_unlock: 331 xprt_clear_locked(xprt); 332 } 333 334 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 335 { 336 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 337 return; 338 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) 339 goto out_unlock; 340 if (xprt_need_congestion_window_wait(xprt)) 341 goto out_unlock; 342 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 343 __xprt_lock_write_func, xprt)) 344 return; 345 out_unlock: 346 xprt_clear_locked(xprt); 347 } 348 349 /** 350 * xprt_release_xprt - allow other requests to use a transport 351 * @xprt: transport with other tasks potentially waiting 352 * @task: task that is releasing access to the transport 353 * 354 * Note that "task" can be NULL. No congestion control is provided. 355 */ 356 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 357 { 358 if (xprt->snd_task == task) { 359 xprt_clear_locked(xprt); 360 __xprt_lock_write_next(xprt); 361 } 362 trace_xprt_release_xprt(xprt, task); 363 } 364 EXPORT_SYMBOL_GPL(xprt_release_xprt); 365 366 /** 367 * xprt_release_xprt_cong - allow other requests to use a transport 368 * @xprt: transport with other tasks potentially waiting 369 * @task: task that is releasing access to the transport 370 * 371 * Note that "task" can be NULL. Another task is awoken to use the 372 * transport if the transport's congestion window allows it. 373 */ 374 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 375 { 376 if (xprt->snd_task == task) { 377 xprt_clear_locked(xprt); 378 __xprt_lock_write_next_cong(xprt); 379 } 380 trace_xprt_release_cong(xprt, task); 381 } 382 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 383 384 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 385 { 386 if (xprt->snd_task != task) 387 return; 388 spin_lock(&xprt->transport_lock); 389 xprt->ops->release_xprt(xprt, task); 390 spin_unlock(&xprt->transport_lock); 391 } 392 393 /* 394 * Van Jacobson congestion avoidance. Check if the congestion window 395 * overflowed. Put the task to sleep if this is the case. 396 */ 397 static int 398 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 399 { 400 if (req->rq_cong) 401 return 1; 402 trace_xprt_get_cong(xprt, req->rq_task); 403 if (RPCXPRT_CONGESTED(xprt)) { 404 xprt_set_congestion_window_wait(xprt); 405 return 0; 406 } 407 req->rq_cong = 1; 408 xprt->cong += RPC_CWNDSCALE; 409 return 1; 410 } 411 412 /* 413 * Adjust the congestion window, and wake up the next task 414 * that has been sleeping due to congestion 415 */ 416 static void 417 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 418 { 419 if (!req->rq_cong) 420 return; 421 req->rq_cong = 0; 422 xprt->cong -= RPC_CWNDSCALE; 423 xprt_test_and_clear_congestion_window_wait(xprt); 424 trace_xprt_put_cong(xprt, req->rq_task); 425 __xprt_lock_write_next_cong(xprt); 426 } 427 428 /** 429 * xprt_request_get_cong - Request congestion control credits 430 * @xprt: pointer to transport 431 * @req: pointer to RPC request 432 * 433 * Useful for transports that require congestion control. 434 */ 435 bool 436 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 437 { 438 bool ret = false; 439 440 if (req->rq_cong) 441 return true; 442 spin_lock(&xprt->transport_lock); 443 ret = __xprt_get_cong(xprt, req) != 0; 444 spin_unlock(&xprt->transport_lock); 445 return ret; 446 } 447 EXPORT_SYMBOL_GPL(xprt_request_get_cong); 448 449 /** 450 * xprt_release_rqst_cong - housekeeping when request is complete 451 * @task: RPC request that recently completed 452 * 453 * Useful for transports that require congestion control. 454 */ 455 void xprt_release_rqst_cong(struct rpc_task *task) 456 { 457 struct rpc_rqst *req = task->tk_rqstp; 458 459 __xprt_put_cong(req->rq_xprt, req); 460 } 461 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 462 463 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt) 464 { 465 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) 466 __xprt_lock_write_next_cong(xprt); 467 } 468 469 /* 470 * Clear the congestion window wait flag and wake up the next 471 * entry on xprt->sending 472 */ 473 static void 474 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) 475 { 476 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { 477 spin_lock(&xprt->transport_lock); 478 __xprt_lock_write_next_cong(xprt); 479 spin_unlock(&xprt->transport_lock); 480 } 481 } 482 483 /** 484 * xprt_adjust_cwnd - adjust transport congestion window 485 * @xprt: pointer to xprt 486 * @task: recently completed RPC request used to adjust window 487 * @result: result code of completed RPC request 488 * 489 * The transport code maintains an estimate on the maximum number of out- 490 * standing RPC requests, using a smoothed version of the congestion 491 * avoidance implemented in 44BSD. This is basically the Van Jacobson 492 * congestion algorithm: If a retransmit occurs, the congestion window is 493 * halved; otherwise, it is incremented by 1/cwnd when 494 * 495 * - a reply is received and 496 * - a full number of requests are outstanding and 497 * - the congestion window hasn't been updated recently. 498 */ 499 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) 500 { 501 struct rpc_rqst *req = task->tk_rqstp; 502 unsigned long cwnd = xprt->cwnd; 503 504 if (result >= 0 && cwnd <= xprt->cong) { 505 /* The (cwnd >> 1) term makes sure 506 * the result gets rounded properly. */ 507 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 508 if (cwnd > RPC_MAXCWND(xprt)) 509 cwnd = RPC_MAXCWND(xprt); 510 __xprt_lock_write_next_cong(xprt); 511 } else if (result == -ETIMEDOUT) { 512 cwnd >>= 1; 513 if (cwnd < RPC_CWNDSCALE) 514 cwnd = RPC_CWNDSCALE; 515 } 516 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 517 xprt->cong, xprt->cwnd, cwnd); 518 xprt->cwnd = cwnd; 519 __xprt_put_cong(xprt, req); 520 } 521 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 522 523 /** 524 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 525 * @xprt: transport with waiting tasks 526 * @status: result code to plant in each task before waking it 527 * 528 */ 529 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 530 { 531 if (status < 0) 532 rpc_wake_up_status(&xprt->pending, status); 533 else 534 rpc_wake_up(&xprt->pending); 535 } 536 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 537 538 /** 539 * xprt_wait_for_buffer_space - wait for transport output buffer to clear 540 * @xprt: transport 541 * 542 * Note that we only set the timer for the case of RPC_IS_SOFT(), since 543 * we don't in general want to force a socket disconnection due to 544 * an incomplete RPC call transmission. 545 */ 546 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt) 547 { 548 set_bit(XPRT_WRITE_SPACE, &xprt->state); 549 } 550 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 551 552 static bool 553 xprt_clear_write_space_locked(struct rpc_xprt *xprt) 554 { 555 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) { 556 __xprt_lock_write_next(xprt); 557 dprintk("RPC: write space: waking waiting task on " 558 "xprt %p\n", xprt); 559 return true; 560 } 561 return false; 562 } 563 564 /** 565 * xprt_write_space - wake the task waiting for transport output buffer space 566 * @xprt: transport with waiting tasks 567 * 568 * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 569 */ 570 bool xprt_write_space(struct rpc_xprt *xprt) 571 { 572 bool ret; 573 574 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) 575 return false; 576 spin_lock(&xprt->transport_lock); 577 ret = xprt_clear_write_space_locked(xprt); 578 spin_unlock(&xprt->transport_lock); 579 return ret; 580 } 581 EXPORT_SYMBOL_GPL(xprt_write_space); 582 583 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime) 584 { 585 s64 delta = ktime_to_ns(ktime_get() - abstime); 586 return likely(delta >= 0) ? 587 jiffies - nsecs_to_jiffies(delta) : 588 jiffies + nsecs_to_jiffies(-delta); 589 } 590 591 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req) 592 { 593 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 594 unsigned long majortimeo = req->rq_timeout; 595 596 if (to->to_exponential) 597 majortimeo <<= to->to_retries; 598 else 599 majortimeo += to->to_increment * to->to_retries; 600 if (majortimeo > to->to_maxval || majortimeo == 0) 601 majortimeo = to->to_maxval; 602 return majortimeo; 603 } 604 605 static void xprt_reset_majortimeo(struct rpc_rqst *req) 606 { 607 req->rq_majortimeo += xprt_calc_majortimeo(req); 608 } 609 610 static void xprt_reset_minortimeo(struct rpc_rqst *req) 611 { 612 req->rq_minortimeo += req->rq_timeout; 613 } 614 615 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) 616 { 617 unsigned long time_init; 618 struct rpc_xprt *xprt = req->rq_xprt; 619 620 if (likely(xprt && xprt_connected(xprt))) 621 time_init = jiffies; 622 else 623 time_init = xprt_abs_ktime_to_jiffies(task->tk_start); 624 req->rq_timeout = task->tk_client->cl_timeout->to_initval; 625 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req); 626 req->rq_minortimeo = time_init + req->rq_timeout; 627 } 628 629 /** 630 * xprt_adjust_timeout - adjust timeout values for next retransmit 631 * @req: RPC request containing parameters to use for the adjustment 632 * 633 */ 634 int xprt_adjust_timeout(struct rpc_rqst *req) 635 { 636 struct rpc_xprt *xprt = req->rq_xprt; 637 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 638 int status = 0; 639 640 if (time_before(jiffies, req->rq_minortimeo)) 641 return status; 642 if (time_before(jiffies, req->rq_majortimeo)) { 643 if (to->to_exponential) 644 req->rq_timeout <<= 1; 645 else 646 req->rq_timeout += to->to_increment; 647 if (to->to_maxval && req->rq_timeout >= to->to_maxval) 648 req->rq_timeout = to->to_maxval; 649 req->rq_retries++; 650 } else { 651 req->rq_timeout = to->to_initval; 652 req->rq_retries = 0; 653 xprt_reset_majortimeo(req); 654 /* Reset the RTT counters == "slow start" */ 655 spin_lock(&xprt->transport_lock); 656 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 657 spin_unlock(&xprt->transport_lock); 658 status = -ETIMEDOUT; 659 } 660 xprt_reset_minortimeo(req); 661 662 if (req->rq_timeout == 0) { 663 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 664 req->rq_timeout = 5 * HZ; 665 } 666 return status; 667 } 668 669 static void xprt_autoclose(struct work_struct *work) 670 { 671 struct rpc_xprt *xprt = 672 container_of(work, struct rpc_xprt, task_cleanup); 673 unsigned int pflags = memalloc_nofs_save(); 674 675 trace_xprt_disconnect_auto(xprt); 676 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 677 xprt->ops->close(xprt); 678 xprt_release_write(xprt, NULL); 679 wake_up_bit(&xprt->state, XPRT_LOCKED); 680 memalloc_nofs_restore(pflags); 681 } 682 683 /** 684 * xprt_disconnect_done - mark a transport as disconnected 685 * @xprt: transport to flag for disconnect 686 * 687 */ 688 void xprt_disconnect_done(struct rpc_xprt *xprt) 689 { 690 trace_xprt_disconnect_done(xprt); 691 spin_lock(&xprt->transport_lock); 692 xprt_clear_connected(xprt); 693 xprt_clear_write_space_locked(xprt); 694 xprt_clear_congestion_window_wait_locked(xprt); 695 xprt_wake_pending_tasks(xprt, -ENOTCONN); 696 spin_unlock(&xprt->transport_lock); 697 } 698 EXPORT_SYMBOL_GPL(xprt_disconnect_done); 699 700 /** 701 * xprt_force_disconnect - force a transport to disconnect 702 * @xprt: transport to disconnect 703 * 704 */ 705 void xprt_force_disconnect(struct rpc_xprt *xprt) 706 { 707 trace_xprt_disconnect_force(xprt); 708 709 /* Don't race with the test_bit() in xprt_clear_locked() */ 710 spin_lock(&xprt->transport_lock); 711 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 712 /* Try to schedule an autoclose RPC call */ 713 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 714 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 715 else if (xprt->snd_task) 716 rpc_wake_up_queued_task_set_status(&xprt->pending, 717 xprt->snd_task, -ENOTCONN); 718 spin_unlock(&xprt->transport_lock); 719 } 720 EXPORT_SYMBOL_GPL(xprt_force_disconnect); 721 722 static unsigned int 723 xprt_connect_cookie(struct rpc_xprt *xprt) 724 { 725 return READ_ONCE(xprt->connect_cookie); 726 } 727 728 static bool 729 xprt_request_retransmit_after_disconnect(struct rpc_task *task) 730 { 731 struct rpc_rqst *req = task->tk_rqstp; 732 struct rpc_xprt *xprt = req->rq_xprt; 733 734 return req->rq_connect_cookie != xprt_connect_cookie(xprt) || 735 !xprt_connected(xprt); 736 } 737 738 /** 739 * xprt_conditional_disconnect - force a transport to disconnect 740 * @xprt: transport to disconnect 741 * @cookie: 'connection cookie' 742 * 743 * This attempts to break the connection if and only if 'cookie' matches 744 * the current transport 'connection cookie'. It ensures that we don't 745 * try to break the connection more than once when we need to retransmit 746 * a batch of RPC requests. 747 * 748 */ 749 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 750 { 751 /* Don't race with the test_bit() in xprt_clear_locked() */ 752 spin_lock(&xprt->transport_lock); 753 if (cookie != xprt->connect_cookie) 754 goto out; 755 if (test_bit(XPRT_CLOSING, &xprt->state)) 756 goto out; 757 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 758 /* Try to schedule an autoclose RPC call */ 759 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 760 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 761 xprt_wake_pending_tasks(xprt, -EAGAIN); 762 out: 763 spin_unlock(&xprt->transport_lock); 764 } 765 766 static bool 767 xprt_has_timer(const struct rpc_xprt *xprt) 768 { 769 return xprt->idle_timeout != 0; 770 } 771 772 static void 773 xprt_schedule_autodisconnect(struct rpc_xprt *xprt) 774 __must_hold(&xprt->transport_lock) 775 { 776 xprt->last_used = jiffies; 777 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt)) 778 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); 779 } 780 781 static void 782 xprt_init_autodisconnect(struct timer_list *t) 783 { 784 struct rpc_xprt *xprt = from_timer(xprt, t, timer); 785 786 if (!RB_EMPTY_ROOT(&xprt->recv_queue)) 787 return; 788 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ 789 xprt->last_used = jiffies; 790 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 791 return; 792 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 793 } 794 795 bool xprt_lock_connect(struct rpc_xprt *xprt, 796 struct rpc_task *task, 797 void *cookie) 798 { 799 bool ret = false; 800 801 spin_lock(&xprt->transport_lock); 802 if (!test_bit(XPRT_LOCKED, &xprt->state)) 803 goto out; 804 if (xprt->snd_task != task) 805 goto out; 806 xprt->snd_task = cookie; 807 ret = true; 808 out: 809 spin_unlock(&xprt->transport_lock); 810 return ret; 811 } 812 813 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) 814 { 815 spin_lock(&xprt->transport_lock); 816 if (xprt->snd_task != cookie) 817 goto out; 818 if (!test_bit(XPRT_LOCKED, &xprt->state)) 819 goto out; 820 xprt->snd_task =NULL; 821 xprt->ops->release_xprt(xprt, NULL); 822 xprt_schedule_autodisconnect(xprt); 823 out: 824 spin_unlock(&xprt->transport_lock); 825 wake_up_bit(&xprt->state, XPRT_LOCKED); 826 } 827 828 /** 829 * xprt_connect - schedule a transport connect operation 830 * @task: RPC task that is requesting the connect 831 * 832 */ 833 void xprt_connect(struct rpc_task *task) 834 { 835 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 836 837 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, 838 xprt, (xprt_connected(xprt) ? "is" : "is not")); 839 840 if (!xprt_bound(xprt)) { 841 task->tk_status = -EAGAIN; 842 return; 843 } 844 if (!xprt_lock_write(xprt, task)) 845 return; 846 847 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 848 trace_xprt_disconnect_cleanup(xprt); 849 xprt->ops->close(xprt); 850 } 851 852 if (!xprt_connected(xprt)) { 853 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; 854 rpc_sleep_on_timeout(&xprt->pending, task, NULL, 855 xprt_request_timeout(task->tk_rqstp)); 856 857 if (test_bit(XPRT_CLOSING, &xprt->state)) 858 return; 859 if (xprt_test_and_set_connecting(xprt)) 860 return; 861 /* Race breaker */ 862 if (!xprt_connected(xprt)) { 863 xprt->stat.connect_start = jiffies; 864 xprt->ops->connect(xprt, task); 865 } else { 866 xprt_clear_connecting(xprt); 867 task->tk_status = 0; 868 rpc_wake_up_queued_task(&xprt->pending, task); 869 } 870 } 871 xprt_release_write(xprt, task); 872 } 873 874 /** 875 * xprt_reconnect_delay - compute the wait before scheduling a connect 876 * @xprt: transport instance 877 * 878 */ 879 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt) 880 { 881 unsigned long start, now = jiffies; 882 883 start = xprt->stat.connect_start + xprt->reestablish_timeout; 884 if (time_after(start, now)) 885 return start - now; 886 return 0; 887 } 888 EXPORT_SYMBOL_GPL(xprt_reconnect_delay); 889 890 /** 891 * xprt_reconnect_backoff - compute the new re-establish timeout 892 * @xprt: transport instance 893 * @init_to: initial reestablish timeout 894 * 895 */ 896 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to) 897 { 898 xprt->reestablish_timeout <<= 1; 899 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) 900 xprt->reestablish_timeout = xprt->max_reconnect_timeout; 901 if (xprt->reestablish_timeout < init_to) 902 xprt->reestablish_timeout = init_to; 903 } 904 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff); 905 906 enum xprt_xid_rb_cmp { 907 XID_RB_EQUAL, 908 XID_RB_LEFT, 909 XID_RB_RIGHT, 910 }; 911 static enum xprt_xid_rb_cmp 912 xprt_xid_cmp(__be32 xid1, __be32 xid2) 913 { 914 if (xid1 == xid2) 915 return XID_RB_EQUAL; 916 if ((__force u32)xid1 < (__force u32)xid2) 917 return XID_RB_LEFT; 918 return XID_RB_RIGHT; 919 } 920 921 static struct rpc_rqst * 922 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid) 923 { 924 struct rb_node *n = xprt->recv_queue.rb_node; 925 struct rpc_rqst *req; 926 927 while (n != NULL) { 928 req = rb_entry(n, struct rpc_rqst, rq_recv); 929 switch (xprt_xid_cmp(xid, req->rq_xid)) { 930 case XID_RB_LEFT: 931 n = n->rb_left; 932 break; 933 case XID_RB_RIGHT: 934 n = n->rb_right; 935 break; 936 case XID_RB_EQUAL: 937 return req; 938 } 939 } 940 return NULL; 941 } 942 943 static void 944 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new) 945 { 946 struct rb_node **p = &xprt->recv_queue.rb_node; 947 struct rb_node *n = NULL; 948 struct rpc_rqst *req; 949 950 while (*p != NULL) { 951 n = *p; 952 req = rb_entry(n, struct rpc_rqst, rq_recv); 953 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) { 954 case XID_RB_LEFT: 955 p = &n->rb_left; 956 break; 957 case XID_RB_RIGHT: 958 p = &n->rb_right; 959 break; 960 case XID_RB_EQUAL: 961 WARN_ON_ONCE(new != req); 962 return; 963 } 964 } 965 rb_link_node(&new->rq_recv, n, p); 966 rb_insert_color(&new->rq_recv, &xprt->recv_queue); 967 } 968 969 static void 970 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req) 971 { 972 rb_erase(&req->rq_recv, &xprt->recv_queue); 973 } 974 975 /** 976 * xprt_lookup_rqst - find an RPC request corresponding to an XID 977 * @xprt: transport on which the original request was transmitted 978 * @xid: RPC XID of incoming reply 979 * 980 * Caller holds xprt->queue_lock. 981 */ 982 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 983 { 984 struct rpc_rqst *entry; 985 986 entry = xprt_request_rb_find(xprt, xid); 987 if (entry != NULL) { 988 trace_xprt_lookup_rqst(xprt, xid, 0); 989 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime); 990 return entry; 991 } 992 993 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 994 ntohl(xid)); 995 trace_xprt_lookup_rqst(xprt, xid, -ENOENT); 996 xprt->stat.bad_xids++; 997 return NULL; 998 } 999 EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 1000 1001 static bool 1002 xprt_is_pinned_rqst(struct rpc_rqst *req) 1003 { 1004 return atomic_read(&req->rq_pin) != 0; 1005 } 1006 1007 /** 1008 * xprt_pin_rqst - Pin a request on the transport receive list 1009 * @req: Request to pin 1010 * 1011 * Caller must ensure this is atomic with the call to xprt_lookup_rqst() 1012 * so should be holding xprt->queue_lock. 1013 */ 1014 void xprt_pin_rqst(struct rpc_rqst *req) 1015 { 1016 atomic_inc(&req->rq_pin); 1017 } 1018 EXPORT_SYMBOL_GPL(xprt_pin_rqst); 1019 1020 /** 1021 * xprt_unpin_rqst - Unpin a request on the transport receive list 1022 * @req: Request to pin 1023 * 1024 * Caller should be holding xprt->queue_lock. 1025 */ 1026 void xprt_unpin_rqst(struct rpc_rqst *req) 1027 { 1028 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) { 1029 atomic_dec(&req->rq_pin); 1030 return; 1031 } 1032 if (atomic_dec_and_test(&req->rq_pin)) 1033 wake_up_var(&req->rq_pin); 1034 } 1035 EXPORT_SYMBOL_GPL(xprt_unpin_rqst); 1036 1037 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) 1038 { 1039 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req)); 1040 } 1041 1042 static bool 1043 xprt_request_data_received(struct rpc_task *task) 1044 { 1045 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1046 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0; 1047 } 1048 1049 static bool 1050 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) 1051 { 1052 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && 1053 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0; 1054 } 1055 1056 /** 1057 * xprt_request_enqueue_receive - Add an request to the receive queue 1058 * @task: RPC task 1059 * 1060 */ 1061 void 1062 xprt_request_enqueue_receive(struct rpc_task *task) 1063 { 1064 struct rpc_rqst *req = task->tk_rqstp; 1065 struct rpc_xprt *xprt = req->rq_xprt; 1066 1067 if (!xprt_request_need_enqueue_receive(task, req)) 1068 return; 1069 1070 xprt_request_prepare(task->tk_rqstp); 1071 spin_lock(&xprt->queue_lock); 1072 1073 /* Update the softirq receive buffer */ 1074 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 1075 sizeof(req->rq_private_buf)); 1076 1077 /* Add request to the receive list */ 1078 xprt_request_rb_insert(xprt, req); 1079 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); 1080 spin_unlock(&xprt->queue_lock); 1081 1082 /* Turn off autodisconnect */ 1083 del_singleshot_timer_sync(&xprt->timer); 1084 } 1085 1086 /** 1087 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue 1088 * @task: RPC task 1089 * 1090 * Caller must hold xprt->queue_lock. 1091 */ 1092 static void 1093 xprt_request_dequeue_receive_locked(struct rpc_task *task) 1094 { 1095 struct rpc_rqst *req = task->tk_rqstp; 1096 1097 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 1098 xprt_request_rb_remove(req->rq_xprt, req); 1099 } 1100 1101 /** 1102 * xprt_update_rtt - Update RPC RTT statistics 1103 * @task: RPC request that recently completed 1104 * 1105 * Caller holds xprt->queue_lock. 1106 */ 1107 void xprt_update_rtt(struct rpc_task *task) 1108 { 1109 struct rpc_rqst *req = task->tk_rqstp; 1110 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 1111 unsigned int timer = task->tk_msg.rpc_proc->p_timer; 1112 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 1113 1114 if (timer) { 1115 if (req->rq_ntrans == 1) 1116 rpc_update_rtt(rtt, timer, m); 1117 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 1118 } 1119 } 1120 EXPORT_SYMBOL_GPL(xprt_update_rtt); 1121 1122 /** 1123 * xprt_complete_rqst - called when reply processing is complete 1124 * @task: RPC request that recently completed 1125 * @copied: actual number of bytes received from the transport 1126 * 1127 * Caller holds xprt->queue_lock. 1128 */ 1129 void xprt_complete_rqst(struct rpc_task *task, int copied) 1130 { 1131 struct rpc_rqst *req = task->tk_rqstp; 1132 struct rpc_xprt *xprt = req->rq_xprt; 1133 1134 xprt->stat.recvs++; 1135 1136 req->rq_private_buf.len = copied; 1137 /* Ensure all writes are done before we update */ 1138 /* req->rq_reply_bytes_recvd */ 1139 smp_wmb(); 1140 req->rq_reply_bytes_recvd = copied; 1141 xprt_request_dequeue_receive_locked(task); 1142 rpc_wake_up_queued_task(&xprt->pending, task); 1143 } 1144 EXPORT_SYMBOL_GPL(xprt_complete_rqst); 1145 1146 static void xprt_timer(struct rpc_task *task) 1147 { 1148 struct rpc_rqst *req = task->tk_rqstp; 1149 struct rpc_xprt *xprt = req->rq_xprt; 1150 1151 if (task->tk_status != -ETIMEDOUT) 1152 return; 1153 1154 trace_xprt_timer(xprt, req->rq_xid, task->tk_status); 1155 if (!req->rq_reply_bytes_recvd) { 1156 if (xprt->ops->timer) 1157 xprt->ops->timer(xprt, task); 1158 } else 1159 task->tk_status = 0; 1160 } 1161 1162 /** 1163 * xprt_wait_for_reply_request_def - wait for reply 1164 * @task: pointer to rpc_task 1165 * 1166 * Set a request's retransmit timeout based on the transport's 1167 * default timeout parameters. Used by transports that don't adjust 1168 * the retransmit timeout based on round-trip time estimation, 1169 * and put the task to sleep on the pending queue. 1170 */ 1171 void xprt_wait_for_reply_request_def(struct rpc_task *task) 1172 { 1173 struct rpc_rqst *req = task->tk_rqstp; 1174 1175 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 1176 xprt_request_timeout(req)); 1177 } 1178 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def); 1179 1180 /** 1181 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator 1182 * @task: pointer to rpc_task 1183 * 1184 * Set a request's retransmit timeout using the RTT estimator, 1185 * and put the task to sleep on the pending queue. 1186 */ 1187 void xprt_wait_for_reply_request_rtt(struct rpc_task *task) 1188 { 1189 int timer = task->tk_msg.rpc_proc->p_timer; 1190 struct rpc_clnt *clnt = task->tk_client; 1191 struct rpc_rtt *rtt = clnt->cl_rtt; 1192 struct rpc_rqst *req = task->tk_rqstp; 1193 unsigned long max_timeout = clnt->cl_timeout->to_maxval; 1194 unsigned long timeout; 1195 1196 timeout = rpc_calc_rto(rtt, timer); 1197 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 1198 if (timeout > max_timeout || timeout == 0) 1199 timeout = max_timeout; 1200 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer, 1201 jiffies + timeout); 1202 } 1203 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt); 1204 1205 /** 1206 * xprt_request_wait_receive - wait for the reply to an RPC request 1207 * @task: RPC task about to send a request 1208 * 1209 */ 1210 void xprt_request_wait_receive(struct rpc_task *task) 1211 { 1212 struct rpc_rqst *req = task->tk_rqstp; 1213 struct rpc_xprt *xprt = req->rq_xprt; 1214 1215 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) 1216 return; 1217 /* 1218 * Sleep on the pending queue if we're expecting a reply. 1219 * The spinlock ensures atomicity between the test of 1220 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). 1221 */ 1222 spin_lock(&xprt->queue_lock); 1223 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { 1224 xprt->ops->wait_for_reply_request(task); 1225 /* 1226 * Send an extra queue wakeup call if the 1227 * connection was dropped in case the call to 1228 * rpc_sleep_on() raced. 1229 */ 1230 if (xprt_request_retransmit_after_disconnect(task)) 1231 rpc_wake_up_queued_task_set_status(&xprt->pending, 1232 task, -ENOTCONN); 1233 } 1234 spin_unlock(&xprt->queue_lock); 1235 } 1236 1237 static bool 1238 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req) 1239 { 1240 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1241 } 1242 1243 /** 1244 * xprt_request_enqueue_transmit - queue a task for transmission 1245 * @task: pointer to rpc_task 1246 * 1247 * Add a task to the transmission queue. 1248 */ 1249 void 1250 xprt_request_enqueue_transmit(struct rpc_task *task) 1251 { 1252 struct rpc_rqst *pos, *req = task->tk_rqstp; 1253 struct rpc_xprt *xprt = req->rq_xprt; 1254 1255 if (xprt_request_need_enqueue_transmit(task, req)) { 1256 req->rq_bytes_sent = 0; 1257 spin_lock(&xprt->queue_lock); 1258 /* 1259 * Requests that carry congestion control credits are added 1260 * to the head of the list to avoid starvation issues. 1261 */ 1262 if (req->rq_cong) { 1263 xprt_clear_congestion_window_wait(xprt); 1264 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1265 if (pos->rq_cong) 1266 continue; 1267 /* Note: req is added _before_ pos */ 1268 list_add_tail(&req->rq_xmit, &pos->rq_xmit); 1269 INIT_LIST_HEAD(&req->rq_xmit2); 1270 goto out; 1271 } 1272 } else if (RPC_IS_SWAPPER(task)) { 1273 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1274 if (pos->rq_cong || pos->rq_bytes_sent) 1275 continue; 1276 if (RPC_IS_SWAPPER(pos->rq_task)) 1277 continue; 1278 /* Note: req is added _before_ pos */ 1279 list_add_tail(&req->rq_xmit, &pos->rq_xmit); 1280 INIT_LIST_HEAD(&req->rq_xmit2); 1281 goto out; 1282 } 1283 } else if (!req->rq_seqno) { 1284 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1285 if (pos->rq_task->tk_owner != task->tk_owner) 1286 continue; 1287 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2); 1288 INIT_LIST_HEAD(&req->rq_xmit); 1289 goto out; 1290 } 1291 } 1292 list_add_tail(&req->rq_xmit, &xprt->xmit_queue); 1293 INIT_LIST_HEAD(&req->rq_xmit2); 1294 out: 1295 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1296 spin_unlock(&xprt->queue_lock); 1297 } 1298 } 1299 1300 /** 1301 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue 1302 * @task: pointer to rpc_task 1303 * 1304 * Remove a task from the transmission queue 1305 * Caller must hold xprt->queue_lock 1306 */ 1307 static void 1308 xprt_request_dequeue_transmit_locked(struct rpc_task *task) 1309 { 1310 struct rpc_rqst *req = task->tk_rqstp; 1311 1312 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1313 return; 1314 if (!list_empty(&req->rq_xmit)) { 1315 list_del(&req->rq_xmit); 1316 if (!list_empty(&req->rq_xmit2)) { 1317 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2, 1318 struct rpc_rqst, rq_xmit2); 1319 list_del(&req->rq_xmit2); 1320 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue); 1321 } 1322 } else 1323 list_del(&req->rq_xmit2); 1324 } 1325 1326 /** 1327 * xprt_request_dequeue_transmit - remove a task from the transmission queue 1328 * @task: pointer to rpc_task 1329 * 1330 * Remove a task from the transmission queue 1331 */ 1332 static void 1333 xprt_request_dequeue_transmit(struct rpc_task *task) 1334 { 1335 struct rpc_rqst *req = task->tk_rqstp; 1336 struct rpc_xprt *xprt = req->rq_xprt; 1337 1338 spin_lock(&xprt->queue_lock); 1339 xprt_request_dequeue_transmit_locked(task); 1340 spin_unlock(&xprt->queue_lock); 1341 } 1342 1343 /** 1344 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue 1345 * @task: pointer to rpc_task 1346 * 1347 * Remove a task from the transmit and receive queues, and ensure that 1348 * it is not pinned by the receive work item. 1349 */ 1350 void 1351 xprt_request_dequeue_xprt(struct rpc_task *task) 1352 { 1353 struct rpc_rqst *req = task->tk_rqstp; 1354 struct rpc_xprt *xprt = req->rq_xprt; 1355 1356 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || 1357 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || 1358 xprt_is_pinned_rqst(req)) { 1359 spin_lock(&xprt->queue_lock); 1360 xprt_request_dequeue_transmit_locked(task); 1361 xprt_request_dequeue_receive_locked(task); 1362 while (xprt_is_pinned_rqst(req)) { 1363 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1364 spin_unlock(&xprt->queue_lock); 1365 xprt_wait_on_pinned_rqst(req); 1366 spin_lock(&xprt->queue_lock); 1367 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); 1368 } 1369 spin_unlock(&xprt->queue_lock); 1370 } 1371 } 1372 1373 /** 1374 * xprt_request_prepare - prepare an encoded request for transport 1375 * @req: pointer to rpc_rqst 1376 * 1377 * Calls into the transport layer to do whatever is needed to prepare 1378 * the request for transmission or receive. 1379 */ 1380 void 1381 xprt_request_prepare(struct rpc_rqst *req) 1382 { 1383 struct rpc_xprt *xprt = req->rq_xprt; 1384 1385 if (xprt->ops->prepare_request) 1386 xprt->ops->prepare_request(req); 1387 } 1388 1389 /** 1390 * xprt_request_need_retransmit - Test if a task needs retransmission 1391 * @task: pointer to rpc_task 1392 * 1393 * Test for whether a connection breakage requires the task to retransmit 1394 */ 1395 bool 1396 xprt_request_need_retransmit(struct rpc_task *task) 1397 { 1398 return xprt_request_retransmit_after_disconnect(task); 1399 } 1400 1401 /** 1402 * xprt_prepare_transmit - reserve the transport before sending a request 1403 * @task: RPC task about to send a request 1404 * 1405 */ 1406 bool xprt_prepare_transmit(struct rpc_task *task) 1407 { 1408 struct rpc_rqst *req = task->tk_rqstp; 1409 struct rpc_xprt *xprt = req->rq_xprt; 1410 1411 if (!xprt_lock_write(xprt, task)) { 1412 trace_xprt_transmit_queued(xprt, task); 1413 1414 /* Race breaker: someone may have transmitted us */ 1415 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1416 rpc_wake_up_queued_task_set_status(&xprt->sending, 1417 task, 0); 1418 return false; 1419 1420 } 1421 return true; 1422 } 1423 1424 void xprt_end_transmit(struct rpc_task *task) 1425 { 1426 xprt_release_write(task->tk_rqstp->rq_xprt, task); 1427 } 1428 1429 /** 1430 * xprt_request_transmit - send an RPC request on a transport 1431 * @req: pointer to request to transmit 1432 * @snd_task: RPC task that owns the transport lock 1433 * 1434 * This performs the transmission of a single request. 1435 * Note that if the request is not the same as snd_task, then it 1436 * does need to be pinned. 1437 * Returns '0' on success. 1438 */ 1439 static int 1440 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) 1441 { 1442 struct rpc_xprt *xprt = req->rq_xprt; 1443 struct rpc_task *task = req->rq_task; 1444 unsigned int connect_cookie; 1445 int is_retrans = RPC_WAS_SENT(task); 1446 int status; 1447 1448 if (!req->rq_bytes_sent) { 1449 if (xprt_request_data_received(task)) { 1450 status = 0; 1451 goto out_dequeue; 1452 } 1453 /* Verify that our message lies in the RPCSEC_GSS window */ 1454 if (rpcauth_xmit_need_reencode(task)) { 1455 status = -EBADMSG; 1456 goto out_dequeue; 1457 } 1458 if (RPC_SIGNALLED(task)) { 1459 status = -ERESTARTSYS; 1460 goto out_dequeue; 1461 } 1462 } 1463 1464 /* 1465 * Update req->rq_ntrans before transmitting to avoid races with 1466 * xprt_update_rtt(), which needs to know that it is recording a 1467 * reply to the first transmission. 1468 */ 1469 req->rq_ntrans++; 1470 1471 trace_rpc_xdr_sendto(task, &req->rq_snd_buf); 1472 connect_cookie = xprt->connect_cookie; 1473 status = xprt->ops->send_request(req); 1474 if (status != 0) { 1475 req->rq_ntrans--; 1476 trace_xprt_transmit(req, status); 1477 return status; 1478 } 1479 1480 if (is_retrans) 1481 task->tk_client->cl_stats->rpcretrans++; 1482 1483 xprt_inject_disconnect(xprt); 1484 1485 task->tk_flags |= RPC_TASK_SENT; 1486 spin_lock(&xprt->transport_lock); 1487 1488 xprt->stat.sends++; 1489 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 1490 xprt->stat.bklog_u += xprt->backlog.qlen; 1491 xprt->stat.sending_u += xprt->sending.qlen; 1492 xprt->stat.pending_u += xprt->pending.qlen; 1493 spin_unlock(&xprt->transport_lock); 1494 1495 req->rq_connect_cookie = connect_cookie; 1496 out_dequeue: 1497 trace_xprt_transmit(req, status); 1498 xprt_request_dequeue_transmit(task); 1499 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); 1500 return status; 1501 } 1502 1503 /** 1504 * xprt_transmit - send an RPC request on a transport 1505 * @task: controlling RPC task 1506 * 1507 * Attempts to drain the transmit queue. On exit, either the transport 1508 * signalled an error that needs to be handled before transmission can 1509 * resume, or @task finished transmitting, and detected that it already 1510 * received a reply. 1511 */ 1512 void 1513 xprt_transmit(struct rpc_task *task) 1514 { 1515 struct rpc_rqst *next, *req = task->tk_rqstp; 1516 struct rpc_xprt *xprt = req->rq_xprt; 1517 int status; 1518 1519 spin_lock(&xprt->queue_lock); 1520 while (!list_empty(&xprt->xmit_queue)) { 1521 next = list_first_entry(&xprt->xmit_queue, 1522 struct rpc_rqst, rq_xmit); 1523 xprt_pin_rqst(next); 1524 spin_unlock(&xprt->queue_lock); 1525 status = xprt_request_transmit(next, task); 1526 if (status == -EBADMSG && next != req) 1527 status = 0; 1528 cond_resched(); 1529 spin_lock(&xprt->queue_lock); 1530 xprt_unpin_rqst(next); 1531 if (status == 0) { 1532 if (!xprt_request_data_received(task) || 1533 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1534 continue; 1535 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) 1536 task->tk_status = status; 1537 break; 1538 } 1539 spin_unlock(&xprt->queue_lock); 1540 } 1541 1542 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1543 { 1544 set_bit(XPRT_CONGESTED, &xprt->state); 1545 rpc_sleep_on(&xprt->backlog, task, NULL); 1546 } 1547 1548 static void xprt_wake_up_backlog(struct rpc_xprt *xprt) 1549 { 1550 if (rpc_wake_up_next(&xprt->backlog) == NULL) 1551 clear_bit(XPRT_CONGESTED, &xprt->state); 1552 } 1553 1554 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) 1555 { 1556 bool ret = false; 1557 1558 if (!test_bit(XPRT_CONGESTED, &xprt->state)) 1559 goto out; 1560 spin_lock(&xprt->reserve_lock); 1561 if (test_bit(XPRT_CONGESTED, &xprt->state)) { 1562 rpc_sleep_on(&xprt->backlog, task, NULL); 1563 ret = true; 1564 } 1565 spin_unlock(&xprt->reserve_lock); 1566 out: 1567 return ret; 1568 } 1569 1570 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) 1571 { 1572 struct rpc_rqst *req = ERR_PTR(-EAGAIN); 1573 1574 if (xprt->num_reqs >= xprt->max_reqs) 1575 goto out; 1576 ++xprt->num_reqs; 1577 spin_unlock(&xprt->reserve_lock); 1578 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); 1579 spin_lock(&xprt->reserve_lock); 1580 if (req != NULL) 1581 goto out; 1582 --xprt->num_reqs; 1583 req = ERR_PTR(-ENOMEM); 1584 out: 1585 return req; 1586 } 1587 1588 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1589 { 1590 if (xprt->num_reqs > xprt->min_reqs) { 1591 --xprt->num_reqs; 1592 kfree(req); 1593 return true; 1594 } 1595 return false; 1596 } 1597 1598 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 1599 { 1600 struct rpc_rqst *req; 1601 1602 spin_lock(&xprt->reserve_lock); 1603 if (!list_empty(&xprt->free)) { 1604 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 1605 list_del(&req->rq_list); 1606 goto out_init_req; 1607 } 1608 req = xprt_dynamic_alloc_slot(xprt); 1609 if (!IS_ERR(req)) 1610 goto out_init_req; 1611 switch (PTR_ERR(req)) { 1612 case -ENOMEM: 1613 dprintk("RPC: dynamic allocation of request slot " 1614 "failed! Retrying\n"); 1615 task->tk_status = -ENOMEM; 1616 break; 1617 case -EAGAIN: 1618 xprt_add_backlog(xprt, task); 1619 dprintk("RPC: waiting for request slot\n"); 1620 fallthrough; 1621 default: 1622 task->tk_status = -EAGAIN; 1623 } 1624 spin_unlock(&xprt->reserve_lock); 1625 return; 1626 out_init_req: 1627 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots, 1628 xprt->num_reqs); 1629 spin_unlock(&xprt->reserve_lock); 1630 1631 task->tk_status = 0; 1632 task->tk_rqstp = req; 1633 } 1634 EXPORT_SYMBOL_GPL(xprt_alloc_slot); 1635 1636 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1637 { 1638 spin_lock(&xprt->reserve_lock); 1639 if (!xprt_dynamic_free_slot(xprt, req)) { 1640 memset(req, 0, sizeof(*req)); /* mark unused */ 1641 list_add(&req->rq_list, &xprt->free); 1642 } 1643 xprt_wake_up_backlog(xprt); 1644 spin_unlock(&xprt->reserve_lock); 1645 } 1646 EXPORT_SYMBOL_GPL(xprt_free_slot); 1647 1648 static void xprt_free_all_slots(struct rpc_xprt *xprt) 1649 { 1650 struct rpc_rqst *req; 1651 while (!list_empty(&xprt->free)) { 1652 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); 1653 list_del(&req->rq_list); 1654 kfree(req); 1655 } 1656 } 1657 1658 struct rpc_xprt *xprt_alloc(struct net *net, size_t size, 1659 unsigned int num_prealloc, 1660 unsigned int max_alloc) 1661 { 1662 struct rpc_xprt *xprt; 1663 struct rpc_rqst *req; 1664 int i; 1665 1666 xprt = kzalloc(size, GFP_KERNEL); 1667 if (xprt == NULL) 1668 goto out; 1669 1670 xprt_init(xprt, net); 1671 1672 for (i = 0; i < num_prealloc; i++) { 1673 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); 1674 if (!req) 1675 goto out_free; 1676 list_add(&req->rq_list, &xprt->free); 1677 } 1678 if (max_alloc > num_prealloc) 1679 xprt->max_reqs = max_alloc; 1680 else 1681 xprt->max_reqs = num_prealloc; 1682 xprt->min_reqs = num_prealloc; 1683 xprt->num_reqs = num_prealloc; 1684 1685 return xprt; 1686 1687 out_free: 1688 xprt_free(xprt); 1689 out: 1690 return NULL; 1691 } 1692 EXPORT_SYMBOL_GPL(xprt_alloc); 1693 1694 void xprt_free(struct rpc_xprt *xprt) 1695 { 1696 put_net(xprt->xprt_net); 1697 xprt_free_all_slots(xprt); 1698 kfree_rcu(xprt, rcu); 1699 } 1700 EXPORT_SYMBOL_GPL(xprt_free); 1701 1702 static void 1703 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt) 1704 { 1705 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1; 1706 } 1707 1708 static __be32 1709 xprt_alloc_xid(struct rpc_xprt *xprt) 1710 { 1711 __be32 xid; 1712 1713 spin_lock(&xprt->reserve_lock); 1714 xid = (__force __be32)xprt->xid++; 1715 spin_unlock(&xprt->reserve_lock); 1716 return xid; 1717 } 1718 1719 static void 1720 xprt_init_xid(struct rpc_xprt *xprt) 1721 { 1722 xprt->xid = prandom_u32(); 1723 } 1724 1725 static void 1726 xprt_request_init(struct rpc_task *task) 1727 { 1728 struct rpc_xprt *xprt = task->tk_xprt; 1729 struct rpc_rqst *req = task->tk_rqstp; 1730 1731 req->rq_task = task; 1732 req->rq_xprt = xprt; 1733 req->rq_buffer = NULL; 1734 req->rq_xid = xprt_alloc_xid(xprt); 1735 xprt_init_connect_cookie(req, xprt); 1736 req->rq_snd_buf.len = 0; 1737 req->rq_snd_buf.buflen = 0; 1738 req->rq_rcv_buf.len = 0; 1739 req->rq_rcv_buf.buflen = 0; 1740 req->rq_snd_buf.bvec = NULL; 1741 req->rq_rcv_buf.bvec = NULL; 1742 req->rq_release_snd_buf = NULL; 1743 xprt_init_majortimeo(task, req); 1744 1745 trace_xprt_reserve(req); 1746 } 1747 1748 static void 1749 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) 1750 { 1751 xprt->ops->alloc_slot(xprt, task); 1752 if (task->tk_rqstp != NULL) 1753 xprt_request_init(task); 1754 } 1755 1756 /** 1757 * xprt_reserve - allocate an RPC request slot 1758 * @task: RPC task requesting a slot allocation 1759 * 1760 * If the transport is marked as being congested, or if no more 1761 * slots are available, place the task on the transport's 1762 * backlog queue. 1763 */ 1764 void xprt_reserve(struct rpc_task *task) 1765 { 1766 struct rpc_xprt *xprt = task->tk_xprt; 1767 1768 task->tk_status = 0; 1769 if (task->tk_rqstp != NULL) 1770 return; 1771 1772 task->tk_status = -EAGAIN; 1773 if (!xprt_throttle_congested(xprt, task)) 1774 xprt_do_reserve(xprt, task); 1775 } 1776 1777 /** 1778 * xprt_retry_reserve - allocate an RPC request slot 1779 * @task: RPC task requesting a slot allocation 1780 * 1781 * If no more slots are available, place the task on the transport's 1782 * backlog queue. 1783 * Note that the only difference with xprt_reserve is that we now 1784 * ignore the value of the XPRT_CONGESTED flag. 1785 */ 1786 void xprt_retry_reserve(struct rpc_task *task) 1787 { 1788 struct rpc_xprt *xprt = task->tk_xprt; 1789 1790 task->tk_status = 0; 1791 if (task->tk_rqstp != NULL) 1792 return; 1793 1794 task->tk_status = -EAGAIN; 1795 xprt_do_reserve(xprt, task); 1796 } 1797 1798 /** 1799 * xprt_release - release an RPC request slot 1800 * @task: task which is finished with the slot 1801 * 1802 */ 1803 void xprt_release(struct rpc_task *task) 1804 { 1805 struct rpc_xprt *xprt; 1806 struct rpc_rqst *req = task->tk_rqstp; 1807 1808 if (req == NULL) { 1809 if (task->tk_client) { 1810 xprt = task->tk_xprt; 1811 xprt_release_write(xprt, task); 1812 } 1813 return; 1814 } 1815 1816 xprt = req->rq_xprt; 1817 xprt_request_dequeue_xprt(task); 1818 spin_lock(&xprt->transport_lock); 1819 xprt->ops->release_xprt(xprt, task); 1820 if (xprt->ops->release_request) 1821 xprt->ops->release_request(task); 1822 xprt_schedule_autodisconnect(xprt); 1823 spin_unlock(&xprt->transport_lock); 1824 if (req->rq_buffer) 1825 xprt->ops->buf_free(task); 1826 xprt_inject_disconnect(xprt); 1827 xdr_free_bvec(&req->rq_rcv_buf); 1828 xdr_free_bvec(&req->rq_snd_buf); 1829 if (req->rq_cred != NULL) 1830 put_rpccred(req->rq_cred); 1831 task->tk_rqstp = NULL; 1832 if (req->rq_release_snd_buf) 1833 req->rq_release_snd_buf(req); 1834 1835 if (likely(!bc_prealloc(req))) 1836 xprt->ops->free_slot(xprt, req); 1837 else 1838 xprt_free_bc_request(req); 1839 } 1840 1841 #ifdef CONFIG_SUNRPC_BACKCHANNEL 1842 void 1843 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) 1844 { 1845 struct xdr_buf *xbufp = &req->rq_snd_buf; 1846 1847 task->tk_rqstp = req; 1848 req->rq_task = task; 1849 xprt_init_connect_cookie(req, req->rq_xprt); 1850 /* 1851 * Set up the xdr_buf length. 1852 * This also indicates that the buffer is XDR encoded already. 1853 */ 1854 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 1855 xbufp->tail[0].iov_len; 1856 } 1857 #endif 1858 1859 static void xprt_init(struct rpc_xprt *xprt, struct net *net) 1860 { 1861 kref_init(&xprt->kref); 1862 1863 spin_lock_init(&xprt->transport_lock); 1864 spin_lock_init(&xprt->reserve_lock); 1865 spin_lock_init(&xprt->queue_lock); 1866 1867 INIT_LIST_HEAD(&xprt->free); 1868 xprt->recv_queue = RB_ROOT; 1869 INIT_LIST_HEAD(&xprt->xmit_queue); 1870 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1871 spin_lock_init(&xprt->bc_pa_lock); 1872 INIT_LIST_HEAD(&xprt->bc_pa_list); 1873 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1874 INIT_LIST_HEAD(&xprt->xprt_switch); 1875 1876 xprt->last_used = jiffies; 1877 xprt->cwnd = RPC_INITCWND; 1878 xprt->bind_index = 0; 1879 1880 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 1881 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 1882 rpc_init_wait_queue(&xprt->sending, "xprt_sending"); 1883 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 1884 1885 xprt_init_xid(xprt); 1886 1887 xprt->xprt_net = get_net(net); 1888 } 1889 1890 /** 1891 * xprt_create_transport - create an RPC transport 1892 * @args: rpc transport creation arguments 1893 * 1894 */ 1895 struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 1896 { 1897 struct rpc_xprt *xprt; 1898 struct xprt_class *t; 1899 1900 spin_lock(&xprt_list_lock); 1901 list_for_each_entry(t, &xprt_list, list) { 1902 if (t->ident == args->ident) { 1903 spin_unlock(&xprt_list_lock); 1904 goto found; 1905 } 1906 } 1907 spin_unlock(&xprt_list_lock); 1908 dprintk("RPC: transport (%d) not supported\n", args->ident); 1909 return ERR_PTR(-EIO); 1910 1911 found: 1912 xprt = t->setup(args); 1913 if (IS_ERR(xprt)) 1914 goto out; 1915 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) 1916 xprt->idle_timeout = 0; 1917 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1918 if (xprt_has_timer(xprt)) 1919 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); 1920 else 1921 timer_setup(&xprt->timer, NULL, 0); 1922 1923 if (strlen(args->servername) > RPC_MAXNETNAMELEN) { 1924 xprt_destroy(xprt); 1925 return ERR_PTR(-EINVAL); 1926 } 1927 xprt->servername = kstrdup(args->servername, GFP_KERNEL); 1928 if (xprt->servername == NULL) { 1929 xprt_destroy(xprt); 1930 return ERR_PTR(-ENOMEM); 1931 } 1932 1933 rpc_xprt_debugfs_register(xprt); 1934 1935 trace_xprt_create(xprt); 1936 out: 1937 return xprt; 1938 } 1939 1940 static void xprt_destroy_cb(struct work_struct *work) 1941 { 1942 struct rpc_xprt *xprt = 1943 container_of(work, struct rpc_xprt, task_cleanup); 1944 1945 trace_xprt_destroy(xprt); 1946 1947 rpc_xprt_debugfs_unregister(xprt); 1948 rpc_destroy_wait_queue(&xprt->binding); 1949 rpc_destroy_wait_queue(&xprt->pending); 1950 rpc_destroy_wait_queue(&xprt->sending); 1951 rpc_destroy_wait_queue(&xprt->backlog); 1952 kfree(xprt->servername); 1953 /* 1954 * Destroy any existing back channel 1955 */ 1956 xprt_destroy_backchannel(xprt, UINT_MAX); 1957 1958 /* 1959 * Tear down transport state and free the rpc_xprt 1960 */ 1961 xprt->ops->destroy(xprt); 1962 } 1963 1964 /** 1965 * xprt_destroy - destroy an RPC transport, killing off all requests. 1966 * @xprt: transport to destroy 1967 * 1968 */ 1969 static void xprt_destroy(struct rpc_xprt *xprt) 1970 { 1971 /* 1972 * Exclude transport connect/disconnect handlers and autoclose 1973 */ 1974 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); 1975 1976 del_timer_sync(&xprt->timer); 1977 1978 /* 1979 * Destroy sockets etc from the system workqueue so they can 1980 * safely flush receive work running on rpciod. 1981 */ 1982 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); 1983 schedule_work(&xprt->task_cleanup); 1984 } 1985 1986 static void xprt_destroy_kref(struct kref *kref) 1987 { 1988 xprt_destroy(container_of(kref, struct rpc_xprt, kref)); 1989 } 1990 1991 /** 1992 * xprt_get - return a reference to an RPC transport. 1993 * @xprt: pointer to the transport 1994 * 1995 */ 1996 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) 1997 { 1998 if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) 1999 return xprt; 2000 return NULL; 2001 } 2002 EXPORT_SYMBOL_GPL(xprt_get); 2003 2004 /** 2005 * xprt_put - release a reference to an RPC transport. 2006 * @xprt: pointer to the transport 2007 * 2008 */ 2009 void xprt_put(struct rpc_xprt *xprt) 2010 { 2011 if (xprt != NULL) 2012 kref_put(&xprt->kref, xprt_destroy_kref); 2013 } 2014 EXPORT_SYMBOL_GPL(xprt_put); 2015