1 /* 2 * linux/net/sunrpc/xprt.c 3 * 4 * This is a generic RPC call interface supporting congestion avoidance, 5 * and asynchronous calls. 6 * 7 * The interface works like this: 8 * 9 * - When a process places a call, it allocates a request slot if 10 * one is available. Otherwise, it sleeps on the backlog queue 11 * (xprt_reserve). 12 * - Next, the caller puts together the RPC message, stuffs it into 13 * the request struct, and calls xprt_transmit(). 14 * - xprt_transmit sends the message and installs the caller on the 15 * transport's wait list. At the same time, if a reply is expected, 16 * it installs a timer that is run after the packet's timeout has 17 * expired. 18 * - When a packet arrives, the data_ready handler walks the list of 19 * pending requests for that transport. If a matching XID is found, the 20 * caller is woken up, and the timer removed. 21 * - When no reply arrives within the timeout interval, the timer is 22 * fired by the kernel and runs xprt_timer(). It either adjusts the 23 * timeout values (minor timeout) or wakes up the caller with a status 24 * of -ETIMEDOUT. 25 * - When the caller receives a notification from RPC that a reply arrived, 26 * it should release the RPC slot, and process the reply. 27 * If the call timed out, it may choose to retry the operation by 28 * adjusting the initial timeout value, and simply calling rpc_call 29 * again. 30 * 31 * Support for async RPC is done through a set of RPC-specific scheduling 32 * primitives that `transparently' work for processes as well as async 33 * tasks that rely on callbacks. 34 * 35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 36 * 37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 38 */ 39 40 #include <linux/module.h> 41 42 #include <linux/types.h> 43 #include <linux/interrupt.h> 44 #include <linux/workqueue.h> 45 #include <linux/net.h> 46 #include <linux/ktime.h> 47 48 #include <linux/sunrpc/clnt.h> 49 #include <linux/sunrpc/metrics.h> 50 #include <linux/sunrpc/bc_xprt.h> 51 #include <linux/rcupdate.h> 52 53 #include <trace/events/sunrpc.h> 54 55 #include "sunrpc.h" 56 57 /* 58 * Local variables 59 */ 60 61 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 62 # define RPCDBG_FACILITY RPCDBG_XPRT 63 #endif 64 65 /* 66 * Local functions 67 */ 68 static void xprt_init(struct rpc_xprt *xprt, struct net *net); 69 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); 70 static void xprt_connect_status(struct rpc_task *task); 71 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 72 static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *); 73 static void xprt_destroy(struct rpc_xprt *xprt); 74 75 static DEFINE_SPINLOCK(xprt_list_lock); 76 static LIST_HEAD(xprt_list); 77 78 /** 79 * xprt_register_transport - register a transport implementation 80 * @transport: transport to register 81 * 82 * If a transport implementation is loaded as a kernel module, it can 83 * call this interface to make itself known to the RPC client. 84 * 85 * Returns: 86 * 0: transport successfully registered 87 * -EEXIST: transport already registered 88 * -EINVAL: transport module being unloaded 89 */ 90 int xprt_register_transport(struct xprt_class *transport) 91 { 92 struct xprt_class *t; 93 int result; 94 95 result = -EEXIST; 96 spin_lock(&xprt_list_lock); 97 list_for_each_entry(t, &xprt_list, list) { 98 /* don't register the same transport class twice */ 99 if (t->ident == transport->ident) 100 goto out; 101 } 102 103 list_add_tail(&transport->list, &xprt_list); 104 printk(KERN_INFO "RPC: Registered %s transport module.\n", 105 transport->name); 106 result = 0; 107 108 out: 109 spin_unlock(&xprt_list_lock); 110 return result; 111 } 112 EXPORT_SYMBOL_GPL(xprt_register_transport); 113 114 /** 115 * xprt_unregister_transport - unregister a transport implementation 116 * @transport: transport to unregister 117 * 118 * Returns: 119 * 0: transport successfully unregistered 120 * -ENOENT: transport never registered 121 */ 122 int xprt_unregister_transport(struct xprt_class *transport) 123 { 124 struct xprt_class *t; 125 int result; 126 127 result = 0; 128 spin_lock(&xprt_list_lock); 129 list_for_each_entry(t, &xprt_list, list) { 130 if (t == transport) { 131 printk(KERN_INFO 132 "RPC: Unregistered %s transport module.\n", 133 transport->name); 134 list_del_init(&transport->list); 135 goto out; 136 } 137 } 138 result = -ENOENT; 139 140 out: 141 spin_unlock(&xprt_list_lock); 142 return result; 143 } 144 EXPORT_SYMBOL_GPL(xprt_unregister_transport); 145 146 /** 147 * xprt_load_transport - load a transport implementation 148 * @transport_name: transport to load 149 * 150 * Returns: 151 * 0: transport successfully loaded 152 * -ENOENT: transport module not available 153 */ 154 int xprt_load_transport(const char *transport_name) 155 { 156 struct xprt_class *t; 157 int result; 158 159 result = 0; 160 spin_lock(&xprt_list_lock); 161 list_for_each_entry(t, &xprt_list, list) { 162 if (strcmp(t->name, transport_name) == 0) { 163 spin_unlock(&xprt_list_lock); 164 goto out; 165 } 166 } 167 spin_unlock(&xprt_list_lock); 168 result = request_module("xprt%s", transport_name); 169 out: 170 return result; 171 } 172 EXPORT_SYMBOL_GPL(xprt_load_transport); 173 174 /** 175 * xprt_reserve_xprt - serialize write access to transports 176 * @task: task that is requesting access to the transport 177 * @xprt: pointer to the target transport 178 * 179 * This prevents mixing the payload of separate requests, and prevents 180 * transport connects from colliding with writes. No congestion control 181 * is provided. 182 */ 183 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 184 { 185 struct rpc_rqst *req = task->tk_rqstp; 186 int priority; 187 188 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 189 if (task == xprt->snd_task) 190 return 1; 191 goto out_sleep; 192 } 193 xprt->snd_task = task; 194 if (req != NULL) 195 req->rq_ntrans++; 196 197 return 1; 198 199 out_sleep: 200 dprintk("RPC: %5u failed to lock transport %p\n", 201 task->tk_pid, xprt); 202 task->tk_timeout = 0; 203 task->tk_status = -EAGAIN; 204 if (req == NULL) 205 priority = RPC_PRIORITY_LOW; 206 else if (!req->rq_ntrans) 207 priority = RPC_PRIORITY_NORMAL; 208 else 209 priority = RPC_PRIORITY_HIGH; 210 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); 211 return 0; 212 } 213 EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 214 215 static void xprt_clear_locked(struct rpc_xprt *xprt) 216 { 217 xprt->snd_task = NULL; 218 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 219 smp_mb__before_atomic(); 220 clear_bit(XPRT_LOCKED, &xprt->state); 221 smp_mb__after_atomic(); 222 } else 223 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 224 } 225 226 /* 227 * xprt_reserve_xprt_cong - serialize write access to transports 228 * @task: task that is requesting access to the transport 229 * 230 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 231 * integrated into the decision of whether a request is allowed to be 232 * woken up and given access to the transport. 233 */ 234 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 235 { 236 struct rpc_rqst *req = task->tk_rqstp; 237 int priority; 238 239 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 240 if (task == xprt->snd_task) 241 return 1; 242 goto out_sleep; 243 } 244 if (req == NULL) { 245 xprt->snd_task = task; 246 return 1; 247 } 248 if (__xprt_get_cong(xprt, task)) { 249 xprt->snd_task = task; 250 req->rq_ntrans++; 251 return 1; 252 } 253 xprt_clear_locked(xprt); 254 out_sleep: 255 if (req) 256 __xprt_put_cong(xprt, req); 257 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); 258 task->tk_timeout = 0; 259 task->tk_status = -EAGAIN; 260 if (req == NULL) 261 priority = RPC_PRIORITY_LOW; 262 else if (!req->rq_ntrans) 263 priority = RPC_PRIORITY_NORMAL; 264 else 265 priority = RPC_PRIORITY_HIGH; 266 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); 267 return 0; 268 } 269 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 270 271 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 272 { 273 int retval; 274 275 spin_lock_bh(&xprt->transport_lock); 276 retval = xprt->ops->reserve_xprt(xprt, task); 277 spin_unlock_bh(&xprt->transport_lock); 278 return retval; 279 } 280 281 static bool __xprt_lock_write_func(struct rpc_task *task, void *data) 282 { 283 struct rpc_xprt *xprt = data; 284 struct rpc_rqst *req; 285 286 req = task->tk_rqstp; 287 xprt->snd_task = task; 288 if (req) 289 req->rq_ntrans++; 290 return true; 291 } 292 293 static void __xprt_lock_write_next(struct rpc_xprt *xprt) 294 { 295 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 296 return; 297 298 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 299 __xprt_lock_write_func, xprt)) 300 return; 301 xprt_clear_locked(xprt); 302 } 303 304 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data) 305 { 306 struct rpc_xprt *xprt = data; 307 struct rpc_rqst *req; 308 309 req = task->tk_rqstp; 310 if (req == NULL) { 311 xprt->snd_task = task; 312 return true; 313 } 314 if (__xprt_get_cong(xprt, task)) { 315 xprt->snd_task = task; 316 req->rq_ntrans++; 317 return true; 318 } 319 return false; 320 } 321 322 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 323 { 324 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 325 return; 326 if (RPCXPRT_CONGESTED(xprt)) 327 goto out_unlock; 328 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, 329 __xprt_lock_write_cong_func, xprt)) 330 return; 331 out_unlock: 332 xprt_clear_locked(xprt); 333 } 334 335 static void xprt_task_clear_bytes_sent(struct rpc_task *task) 336 { 337 if (task != NULL) { 338 struct rpc_rqst *req = task->tk_rqstp; 339 if (req != NULL) 340 req->rq_bytes_sent = 0; 341 } 342 } 343 344 /** 345 * xprt_release_xprt - allow other requests to use a transport 346 * @xprt: transport with other tasks potentially waiting 347 * @task: task that is releasing access to the transport 348 * 349 * Note that "task" can be NULL. No congestion control is provided. 350 */ 351 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 352 { 353 if (xprt->snd_task == task) { 354 xprt_task_clear_bytes_sent(task); 355 xprt_clear_locked(xprt); 356 __xprt_lock_write_next(xprt); 357 } 358 } 359 EXPORT_SYMBOL_GPL(xprt_release_xprt); 360 361 /** 362 * xprt_release_xprt_cong - allow other requests to use a transport 363 * @xprt: transport with other tasks potentially waiting 364 * @task: task that is releasing access to the transport 365 * 366 * Note that "task" can be NULL. Another task is awoken to use the 367 * transport if the transport's congestion window allows it. 368 */ 369 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 370 { 371 if (xprt->snd_task == task) { 372 xprt_task_clear_bytes_sent(task); 373 xprt_clear_locked(xprt); 374 __xprt_lock_write_next_cong(xprt); 375 } 376 } 377 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 378 379 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 380 { 381 spin_lock_bh(&xprt->transport_lock); 382 xprt->ops->release_xprt(xprt, task); 383 spin_unlock_bh(&xprt->transport_lock); 384 } 385 386 /* 387 * Van Jacobson congestion avoidance. Check if the congestion window 388 * overflowed. Put the task to sleep if this is the case. 389 */ 390 static int 391 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) 392 { 393 struct rpc_rqst *req = task->tk_rqstp; 394 395 if (req->rq_cong) 396 return 1; 397 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n", 398 task->tk_pid, xprt->cong, xprt->cwnd); 399 if (RPCXPRT_CONGESTED(xprt)) 400 return 0; 401 req->rq_cong = 1; 402 xprt->cong += RPC_CWNDSCALE; 403 return 1; 404 } 405 406 /* 407 * Adjust the congestion window, and wake up the next task 408 * that has been sleeping due to congestion 409 */ 410 static void 411 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 412 { 413 if (!req->rq_cong) 414 return; 415 req->rq_cong = 0; 416 xprt->cong -= RPC_CWNDSCALE; 417 __xprt_lock_write_next_cong(xprt); 418 } 419 420 /** 421 * xprt_release_rqst_cong - housekeeping when request is complete 422 * @task: RPC request that recently completed 423 * 424 * Useful for transports that require congestion control. 425 */ 426 void xprt_release_rqst_cong(struct rpc_task *task) 427 { 428 struct rpc_rqst *req = task->tk_rqstp; 429 430 __xprt_put_cong(req->rq_xprt, req); 431 } 432 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 433 434 /** 435 * xprt_adjust_cwnd - adjust transport congestion window 436 * @xprt: pointer to xprt 437 * @task: recently completed RPC request used to adjust window 438 * @result: result code of completed RPC request 439 * 440 * The transport code maintains an estimate on the maximum number of out- 441 * standing RPC requests, using a smoothed version of the congestion 442 * avoidance implemented in 44BSD. This is basically the Van Jacobson 443 * congestion algorithm: If a retransmit occurs, the congestion window is 444 * halved; otherwise, it is incremented by 1/cwnd when 445 * 446 * - a reply is received and 447 * - a full number of requests are outstanding and 448 * - the congestion window hasn't been updated recently. 449 */ 450 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) 451 { 452 struct rpc_rqst *req = task->tk_rqstp; 453 unsigned long cwnd = xprt->cwnd; 454 455 if (result >= 0 && cwnd <= xprt->cong) { 456 /* The (cwnd >> 1) term makes sure 457 * the result gets rounded properly. */ 458 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 459 if (cwnd > RPC_MAXCWND(xprt)) 460 cwnd = RPC_MAXCWND(xprt); 461 __xprt_lock_write_next_cong(xprt); 462 } else if (result == -ETIMEDOUT) { 463 cwnd >>= 1; 464 if (cwnd < RPC_CWNDSCALE) 465 cwnd = RPC_CWNDSCALE; 466 } 467 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 468 xprt->cong, xprt->cwnd, cwnd); 469 xprt->cwnd = cwnd; 470 __xprt_put_cong(xprt, req); 471 } 472 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 473 474 /** 475 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 476 * @xprt: transport with waiting tasks 477 * @status: result code to plant in each task before waking it 478 * 479 */ 480 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 481 { 482 if (status < 0) 483 rpc_wake_up_status(&xprt->pending, status); 484 else 485 rpc_wake_up(&xprt->pending); 486 } 487 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 488 489 /** 490 * xprt_wait_for_buffer_space - wait for transport output buffer to clear 491 * @task: task to be put to sleep 492 * @action: function pointer to be executed after wait 493 * 494 * Note that we only set the timer for the case of RPC_IS_SOFT(), since 495 * we don't in general want to force a socket disconnection due to 496 * an incomplete RPC call transmission. 497 */ 498 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action) 499 { 500 struct rpc_rqst *req = task->tk_rqstp; 501 struct rpc_xprt *xprt = req->rq_xprt; 502 503 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; 504 rpc_sleep_on(&xprt->pending, task, action); 505 } 506 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 507 508 /** 509 * xprt_write_space - wake the task waiting for transport output buffer space 510 * @xprt: transport with waiting tasks 511 * 512 * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 513 */ 514 void xprt_write_space(struct rpc_xprt *xprt) 515 { 516 spin_lock_bh(&xprt->transport_lock); 517 if (xprt->snd_task) { 518 dprintk("RPC: write space: waking waiting task on " 519 "xprt %p\n", xprt); 520 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task); 521 } 522 spin_unlock_bh(&xprt->transport_lock); 523 } 524 EXPORT_SYMBOL_GPL(xprt_write_space); 525 526 /** 527 * xprt_set_retrans_timeout_def - set a request's retransmit timeout 528 * @task: task whose timeout is to be set 529 * 530 * Set a request's retransmit timeout based on the transport's 531 * default timeout parameters. Used by transports that don't adjust 532 * the retransmit timeout based on round-trip time estimation. 533 */ 534 void xprt_set_retrans_timeout_def(struct rpc_task *task) 535 { 536 task->tk_timeout = task->tk_rqstp->rq_timeout; 537 } 538 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def); 539 540 /** 541 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 542 * @task: task whose timeout is to be set 543 * 544 * Set a request's retransmit timeout using the RTT estimator. 545 */ 546 void xprt_set_retrans_timeout_rtt(struct rpc_task *task) 547 { 548 int timer = task->tk_msg.rpc_proc->p_timer; 549 struct rpc_clnt *clnt = task->tk_client; 550 struct rpc_rtt *rtt = clnt->cl_rtt; 551 struct rpc_rqst *req = task->tk_rqstp; 552 unsigned long max_timeout = clnt->cl_timeout->to_maxval; 553 554 task->tk_timeout = rpc_calc_rto(rtt, timer); 555 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 556 if (task->tk_timeout > max_timeout || task->tk_timeout == 0) 557 task->tk_timeout = max_timeout; 558 } 559 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt); 560 561 static void xprt_reset_majortimeo(struct rpc_rqst *req) 562 { 563 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 564 565 req->rq_majortimeo = req->rq_timeout; 566 if (to->to_exponential) 567 req->rq_majortimeo <<= to->to_retries; 568 else 569 req->rq_majortimeo += to->to_increment * to->to_retries; 570 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0) 571 req->rq_majortimeo = to->to_maxval; 572 req->rq_majortimeo += jiffies; 573 } 574 575 /** 576 * xprt_adjust_timeout - adjust timeout values for next retransmit 577 * @req: RPC request containing parameters to use for the adjustment 578 * 579 */ 580 int xprt_adjust_timeout(struct rpc_rqst *req) 581 { 582 struct rpc_xprt *xprt = req->rq_xprt; 583 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 584 int status = 0; 585 586 if (time_before(jiffies, req->rq_majortimeo)) { 587 if (to->to_exponential) 588 req->rq_timeout <<= 1; 589 else 590 req->rq_timeout += to->to_increment; 591 if (to->to_maxval && req->rq_timeout >= to->to_maxval) 592 req->rq_timeout = to->to_maxval; 593 req->rq_retries++; 594 } else { 595 req->rq_timeout = to->to_initval; 596 req->rq_retries = 0; 597 xprt_reset_majortimeo(req); 598 /* Reset the RTT counters == "slow start" */ 599 spin_lock_bh(&xprt->transport_lock); 600 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 601 spin_unlock_bh(&xprt->transport_lock); 602 status = -ETIMEDOUT; 603 } 604 605 if (req->rq_timeout == 0) { 606 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 607 req->rq_timeout = 5 * HZ; 608 } 609 return status; 610 } 611 612 static void xprt_autoclose(struct work_struct *work) 613 { 614 struct rpc_xprt *xprt = 615 container_of(work, struct rpc_xprt, task_cleanup); 616 617 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 618 xprt->ops->close(xprt); 619 xprt_release_write(xprt, NULL); 620 wake_up_bit(&xprt->state, XPRT_LOCKED); 621 } 622 623 /** 624 * xprt_disconnect_done - mark a transport as disconnected 625 * @xprt: transport to flag for disconnect 626 * 627 */ 628 void xprt_disconnect_done(struct rpc_xprt *xprt) 629 { 630 dprintk("RPC: disconnected transport %p\n", xprt); 631 spin_lock_bh(&xprt->transport_lock); 632 xprt_clear_connected(xprt); 633 xprt_wake_pending_tasks(xprt, -EAGAIN); 634 spin_unlock_bh(&xprt->transport_lock); 635 } 636 EXPORT_SYMBOL_GPL(xprt_disconnect_done); 637 638 /** 639 * xprt_force_disconnect - force a transport to disconnect 640 * @xprt: transport to disconnect 641 * 642 */ 643 void xprt_force_disconnect(struct rpc_xprt *xprt) 644 { 645 /* Don't race with the test_bit() in xprt_clear_locked() */ 646 spin_lock_bh(&xprt->transport_lock); 647 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 648 /* Try to schedule an autoclose RPC call */ 649 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 650 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 651 xprt_wake_pending_tasks(xprt, -EAGAIN); 652 spin_unlock_bh(&xprt->transport_lock); 653 } 654 EXPORT_SYMBOL_GPL(xprt_force_disconnect); 655 656 /** 657 * xprt_conditional_disconnect - force a transport to disconnect 658 * @xprt: transport to disconnect 659 * @cookie: 'connection cookie' 660 * 661 * This attempts to break the connection if and only if 'cookie' matches 662 * the current transport 'connection cookie'. It ensures that we don't 663 * try to break the connection more than once when we need to retransmit 664 * a batch of RPC requests. 665 * 666 */ 667 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 668 { 669 /* Don't race with the test_bit() in xprt_clear_locked() */ 670 spin_lock_bh(&xprt->transport_lock); 671 if (cookie != xprt->connect_cookie) 672 goto out; 673 if (test_bit(XPRT_CLOSING, &xprt->state)) 674 goto out; 675 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 676 /* Try to schedule an autoclose RPC call */ 677 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 678 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 679 xprt_wake_pending_tasks(xprt, -EAGAIN); 680 out: 681 spin_unlock_bh(&xprt->transport_lock); 682 } 683 684 static bool 685 xprt_has_timer(const struct rpc_xprt *xprt) 686 { 687 return xprt->idle_timeout != 0; 688 } 689 690 static void 691 xprt_schedule_autodisconnect(struct rpc_xprt *xprt) 692 __must_hold(&xprt->transport_lock) 693 { 694 if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) 695 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); 696 } 697 698 static void 699 xprt_init_autodisconnect(struct timer_list *t) 700 { 701 struct rpc_xprt *xprt = from_timer(xprt, t, timer); 702 703 spin_lock(&xprt->transport_lock); 704 if (!list_empty(&xprt->recv)) 705 goto out_abort; 706 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ 707 xprt->last_used = jiffies; 708 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 709 goto out_abort; 710 spin_unlock(&xprt->transport_lock); 711 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 712 return; 713 out_abort: 714 spin_unlock(&xprt->transport_lock); 715 } 716 717 bool xprt_lock_connect(struct rpc_xprt *xprt, 718 struct rpc_task *task, 719 void *cookie) 720 { 721 bool ret = false; 722 723 spin_lock_bh(&xprt->transport_lock); 724 if (!test_bit(XPRT_LOCKED, &xprt->state)) 725 goto out; 726 if (xprt->snd_task != task) 727 goto out; 728 xprt_task_clear_bytes_sent(task); 729 xprt->snd_task = cookie; 730 ret = true; 731 out: 732 spin_unlock_bh(&xprt->transport_lock); 733 return ret; 734 } 735 736 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) 737 { 738 spin_lock_bh(&xprt->transport_lock); 739 if (xprt->snd_task != cookie) 740 goto out; 741 if (!test_bit(XPRT_LOCKED, &xprt->state)) 742 goto out; 743 xprt->snd_task =NULL; 744 xprt->ops->release_xprt(xprt, NULL); 745 xprt_schedule_autodisconnect(xprt); 746 out: 747 spin_unlock_bh(&xprt->transport_lock); 748 wake_up_bit(&xprt->state, XPRT_LOCKED); 749 } 750 751 /** 752 * xprt_connect - schedule a transport connect operation 753 * @task: RPC task that is requesting the connect 754 * 755 */ 756 void xprt_connect(struct rpc_task *task) 757 { 758 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 759 760 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, 761 xprt, (xprt_connected(xprt) ? "is" : "is not")); 762 763 if (!xprt_bound(xprt)) { 764 task->tk_status = -EAGAIN; 765 return; 766 } 767 if (!xprt_lock_write(xprt, task)) 768 return; 769 770 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) 771 xprt->ops->close(xprt); 772 773 if (!xprt_connected(xprt)) { 774 task->tk_rqstp->rq_bytes_sent = 0; 775 task->tk_timeout = task->tk_rqstp->rq_timeout; 776 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; 777 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); 778 779 if (test_bit(XPRT_CLOSING, &xprt->state)) 780 return; 781 if (xprt_test_and_set_connecting(xprt)) 782 return; 783 xprt->stat.connect_start = jiffies; 784 xprt->ops->connect(xprt, task); 785 } 786 xprt_release_write(xprt, task); 787 } 788 789 static void xprt_connect_status(struct rpc_task *task) 790 { 791 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 792 793 if (task->tk_status == 0) { 794 xprt->stat.connect_count++; 795 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; 796 dprintk("RPC: %5u xprt_connect_status: connection established\n", 797 task->tk_pid); 798 return; 799 } 800 801 switch (task->tk_status) { 802 case -ECONNREFUSED: 803 case -ECONNRESET: 804 case -ECONNABORTED: 805 case -ENETUNREACH: 806 case -EHOSTUNREACH: 807 case -EPIPE: 808 case -EAGAIN: 809 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid); 810 break; 811 case -ETIMEDOUT: 812 dprintk("RPC: %5u xprt_connect_status: connect attempt timed " 813 "out\n", task->tk_pid); 814 break; 815 default: 816 dprintk("RPC: %5u xprt_connect_status: error %d connecting to " 817 "server %s\n", task->tk_pid, -task->tk_status, 818 xprt->servername); 819 task->tk_status = -EIO; 820 } 821 } 822 823 /** 824 * xprt_lookup_rqst - find an RPC request corresponding to an XID 825 * @xprt: transport on which the original request was transmitted 826 * @xid: RPC XID of incoming reply 827 * 828 */ 829 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 830 { 831 struct rpc_rqst *entry; 832 833 list_for_each_entry(entry, &xprt->recv, rq_list) 834 if (entry->rq_xid == xid) { 835 trace_xprt_lookup_rqst(xprt, xid, 0); 836 return entry; 837 } 838 839 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 840 ntohl(xid)); 841 trace_xprt_lookup_rqst(xprt, xid, -ENOENT); 842 xprt->stat.bad_xids++; 843 return NULL; 844 } 845 EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 846 847 /** 848 * xprt_pin_rqst - Pin a request on the transport receive list 849 * @req: Request to pin 850 * 851 * Caller must ensure this is atomic with the call to xprt_lookup_rqst() 852 * so should be holding the xprt transport lock. 853 */ 854 void xprt_pin_rqst(struct rpc_rqst *req) 855 { 856 set_bit(RPC_TASK_MSG_RECV, &req->rq_task->tk_runstate); 857 } 858 EXPORT_SYMBOL_GPL(xprt_pin_rqst); 859 860 /** 861 * xprt_unpin_rqst - Unpin a request on the transport receive list 862 * @req: Request to pin 863 * 864 * Caller should be holding the xprt transport lock. 865 */ 866 void xprt_unpin_rqst(struct rpc_rqst *req) 867 { 868 struct rpc_task *task = req->rq_task; 869 870 clear_bit(RPC_TASK_MSG_RECV, &task->tk_runstate); 871 if (test_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate)) 872 wake_up_bit(&task->tk_runstate, RPC_TASK_MSG_RECV); 873 } 874 EXPORT_SYMBOL_GPL(xprt_unpin_rqst); 875 876 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) 877 __must_hold(&req->rq_xprt->recv_lock) 878 { 879 struct rpc_task *task = req->rq_task; 880 881 if (task && test_bit(RPC_TASK_MSG_RECV, &task->tk_runstate)) { 882 spin_unlock(&req->rq_xprt->recv_lock); 883 set_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate); 884 wait_on_bit(&task->tk_runstate, RPC_TASK_MSG_RECV, 885 TASK_UNINTERRUPTIBLE); 886 clear_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate); 887 spin_lock(&req->rq_xprt->recv_lock); 888 } 889 } 890 891 static void xprt_update_rtt(struct rpc_task *task) 892 { 893 struct rpc_rqst *req = task->tk_rqstp; 894 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 895 unsigned int timer = task->tk_msg.rpc_proc->p_timer; 896 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 897 898 if (timer) { 899 if (req->rq_ntrans == 1) 900 rpc_update_rtt(rtt, timer, m); 901 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 902 } 903 } 904 905 /** 906 * xprt_complete_rqst - called when reply processing is complete 907 * @task: RPC request that recently completed 908 * @copied: actual number of bytes received from the transport 909 * 910 * Caller holds transport lock. 911 */ 912 void xprt_complete_rqst(struct rpc_task *task, int copied) 913 { 914 struct rpc_rqst *req = task->tk_rqstp; 915 struct rpc_xprt *xprt = req->rq_xprt; 916 917 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", 918 task->tk_pid, ntohl(req->rq_xid), copied); 919 trace_xprt_complete_rqst(xprt, req->rq_xid, copied); 920 921 xprt->stat.recvs++; 922 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime); 923 if (xprt->ops->timer != NULL) 924 xprt_update_rtt(task); 925 926 list_del_init(&req->rq_list); 927 req->rq_private_buf.len = copied; 928 /* Ensure all writes are done before we update */ 929 /* req->rq_reply_bytes_recvd */ 930 smp_wmb(); 931 req->rq_reply_bytes_recvd = copied; 932 rpc_wake_up_queued_task(&xprt->pending, task); 933 } 934 EXPORT_SYMBOL_GPL(xprt_complete_rqst); 935 936 static void xprt_timer(struct rpc_task *task) 937 { 938 struct rpc_rqst *req = task->tk_rqstp; 939 struct rpc_xprt *xprt = req->rq_xprt; 940 941 if (task->tk_status != -ETIMEDOUT) 942 return; 943 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); 944 945 if (!req->rq_reply_bytes_recvd) { 946 if (xprt->ops->timer) 947 xprt->ops->timer(xprt, task); 948 } else 949 task->tk_status = 0; 950 } 951 952 /** 953 * xprt_prepare_transmit - reserve the transport before sending a request 954 * @task: RPC task about to send a request 955 * 956 */ 957 bool xprt_prepare_transmit(struct rpc_task *task) 958 { 959 struct rpc_rqst *req = task->tk_rqstp; 960 struct rpc_xprt *xprt = req->rq_xprt; 961 bool ret = false; 962 963 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); 964 965 spin_lock_bh(&xprt->transport_lock); 966 if (!req->rq_bytes_sent) { 967 if (req->rq_reply_bytes_recvd) { 968 task->tk_status = req->rq_reply_bytes_recvd; 969 goto out_unlock; 970 } 971 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) 972 && xprt_connected(xprt) 973 && req->rq_connect_cookie == xprt->connect_cookie) { 974 xprt->ops->set_retrans_timeout(task); 975 rpc_sleep_on(&xprt->pending, task, xprt_timer); 976 goto out_unlock; 977 } 978 } 979 if (!xprt->ops->reserve_xprt(xprt, task)) { 980 task->tk_status = -EAGAIN; 981 goto out_unlock; 982 } 983 ret = true; 984 out_unlock: 985 spin_unlock_bh(&xprt->transport_lock); 986 return ret; 987 } 988 989 void xprt_end_transmit(struct rpc_task *task) 990 { 991 xprt_release_write(task->tk_rqstp->rq_xprt, task); 992 } 993 994 /** 995 * xprt_transmit - send an RPC request on a transport 996 * @task: controlling RPC task 997 * 998 * We have to copy the iovec because sendmsg fiddles with its contents. 999 */ 1000 void xprt_transmit(struct rpc_task *task) 1001 { 1002 struct rpc_rqst *req = task->tk_rqstp; 1003 struct rpc_xprt *xprt = req->rq_xprt; 1004 unsigned int connect_cookie; 1005 int status, numreqs; 1006 1007 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 1008 1009 if (!req->rq_reply_bytes_recvd) { 1010 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) { 1011 /* 1012 * Add to the list only if we're expecting a reply 1013 */ 1014 /* Update the softirq receive buffer */ 1015 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 1016 sizeof(req->rq_private_buf)); 1017 /* Add request to the receive list */ 1018 spin_lock(&xprt->recv_lock); 1019 list_add_tail(&req->rq_list, &xprt->recv); 1020 spin_unlock(&xprt->recv_lock); 1021 xprt_reset_majortimeo(req); 1022 /* Turn off autodisconnect */ 1023 del_singleshot_timer_sync(&xprt->timer); 1024 } 1025 } else if (!req->rq_bytes_sent) 1026 return; 1027 1028 connect_cookie = xprt->connect_cookie; 1029 req->rq_xtime = ktime_get(); 1030 status = xprt->ops->send_request(task); 1031 trace_xprt_transmit(xprt, req->rq_xid, status); 1032 if (status != 0) { 1033 task->tk_status = status; 1034 return; 1035 } 1036 xprt_inject_disconnect(xprt); 1037 1038 dprintk("RPC: %5u xmit complete\n", task->tk_pid); 1039 task->tk_flags |= RPC_TASK_SENT; 1040 spin_lock_bh(&xprt->transport_lock); 1041 1042 xprt->ops->set_retrans_timeout(task); 1043 1044 numreqs = atomic_read(&xprt->num_reqs); 1045 if (numreqs > xprt->stat.max_slots) 1046 xprt->stat.max_slots = numreqs; 1047 xprt->stat.sends++; 1048 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 1049 xprt->stat.bklog_u += xprt->backlog.qlen; 1050 xprt->stat.sending_u += xprt->sending.qlen; 1051 xprt->stat.pending_u += xprt->pending.qlen; 1052 spin_unlock_bh(&xprt->transport_lock); 1053 1054 req->rq_connect_cookie = connect_cookie; 1055 if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) { 1056 /* 1057 * Sleep on the pending queue if we're expecting a reply. 1058 * The spinlock ensures atomicity between the test of 1059 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). 1060 */ 1061 spin_lock(&xprt->recv_lock); 1062 if (!req->rq_reply_bytes_recvd) { 1063 rpc_sleep_on(&xprt->pending, task, xprt_timer); 1064 /* 1065 * Send an extra queue wakeup call if the 1066 * connection was dropped in case the call to 1067 * rpc_sleep_on() raced. 1068 */ 1069 if (!xprt_connected(xprt)) 1070 xprt_wake_pending_tasks(xprt, -ENOTCONN); 1071 } 1072 spin_unlock(&xprt->recv_lock); 1073 } 1074 } 1075 1076 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1077 { 1078 set_bit(XPRT_CONGESTED, &xprt->state); 1079 rpc_sleep_on(&xprt->backlog, task, NULL); 1080 } 1081 1082 static void xprt_wake_up_backlog(struct rpc_xprt *xprt) 1083 { 1084 if (rpc_wake_up_next(&xprt->backlog) == NULL) 1085 clear_bit(XPRT_CONGESTED, &xprt->state); 1086 } 1087 1088 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) 1089 { 1090 bool ret = false; 1091 1092 if (!test_bit(XPRT_CONGESTED, &xprt->state)) 1093 goto out; 1094 spin_lock(&xprt->reserve_lock); 1095 if (test_bit(XPRT_CONGESTED, &xprt->state)) { 1096 rpc_sleep_on(&xprt->backlog, task, NULL); 1097 ret = true; 1098 } 1099 spin_unlock(&xprt->reserve_lock); 1100 out: 1101 return ret; 1102 } 1103 1104 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) 1105 { 1106 struct rpc_rqst *req = ERR_PTR(-EAGAIN); 1107 1108 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs)) 1109 goto out; 1110 spin_unlock(&xprt->reserve_lock); 1111 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); 1112 spin_lock(&xprt->reserve_lock); 1113 if (req != NULL) 1114 goto out; 1115 atomic_dec(&xprt->num_reqs); 1116 req = ERR_PTR(-ENOMEM); 1117 out: 1118 return req; 1119 } 1120 1121 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1122 { 1123 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) { 1124 kfree(req); 1125 return true; 1126 } 1127 return false; 1128 } 1129 1130 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 1131 { 1132 struct rpc_rqst *req; 1133 1134 spin_lock(&xprt->reserve_lock); 1135 if (!list_empty(&xprt->free)) { 1136 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 1137 list_del(&req->rq_list); 1138 goto out_init_req; 1139 } 1140 req = xprt_dynamic_alloc_slot(xprt); 1141 if (!IS_ERR(req)) 1142 goto out_init_req; 1143 switch (PTR_ERR(req)) { 1144 case -ENOMEM: 1145 dprintk("RPC: dynamic allocation of request slot " 1146 "failed! Retrying\n"); 1147 task->tk_status = -ENOMEM; 1148 break; 1149 case -EAGAIN: 1150 xprt_add_backlog(xprt, task); 1151 dprintk("RPC: waiting for request slot\n"); 1152 /* fall through */ 1153 default: 1154 task->tk_status = -EAGAIN; 1155 } 1156 spin_unlock(&xprt->reserve_lock); 1157 return; 1158 out_init_req: 1159 task->tk_status = 0; 1160 task->tk_rqstp = req; 1161 xprt_request_init(task, xprt); 1162 spin_unlock(&xprt->reserve_lock); 1163 } 1164 EXPORT_SYMBOL_GPL(xprt_alloc_slot); 1165 1166 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 1167 { 1168 /* Note: grabbing the xprt_lock_write() ensures that we throttle 1169 * new slot allocation if the transport is congested (i.e. when 1170 * reconnecting a stream transport or when out of socket write 1171 * buffer space). 1172 */ 1173 if (xprt_lock_write(xprt, task)) { 1174 xprt_alloc_slot(xprt, task); 1175 xprt_release_write(xprt, task); 1176 } 1177 } 1178 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot); 1179 1180 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1181 { 1182 spin_lock(&xprt->reserve_lock); 1183 if (!xprt_dynamic_free_slot(xprt, req)) { 1184 memset(req, 0, sizeof(*req)); /* mark unused */ 1185 list_add(&req->rq_list, &xprt->free); 1186 } 1187 xprt_wake_up_backlog(xprt); 1188 spin_unlock(&xprt->reserve_lock); 1189 } 1190 1191 static void xprt_free_all_slots(struct rpc_xprt *xprt) 1192 { 1193 struct rpc_rqst *req; 1194 while (!list_empty(&xprt->free)) { 1195 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); 1196 list_del(&req->rq_list); 1197 kfree(req); 1198 } 1199 } 1200 1201 struct rpc_xprt *xprt_alloc(struct net *net, size_t size, 1202 unsigned int num_prealloc, 1203 unsigned int max_alloc) 1204 { 1205 struct rpc_xprt *xprt; 1206 struct rpc_rqst *req; 1207 int i; 1208 1209 xprt = kzalloc(size, GFP_KERNEL); 1210 if (xprt == NULL) 1211 goto out; 1212 1213 xprt_init(xprt, net); 1214 1215 for (i = 0; i < num_prealloc; i++) { 1216 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); 1217 if (!req) 1218 goto out_free; 1219 list_add(&req->rq_list, &xprt->free); 1220 } 1221 if (max_alloc > num_prealloc) 1222 xprt->max_reqs = max_alloc; 1223 else 1224 xprt->max_reqs = num_prealloc; 1225 xprt->min_reqs = num_prealloc; 1226 atomic_set(&xprt->num_reqs, num_prealloc); 1227 1228 return xprt; 1229 1230 out_free: 1231 xprt_free(xprt); 1232 out: 1233 return NULL; 1234 } 1235 EXPORT_SYMBOL_GPL(xprt_alloc); 1236 1237 void xprt_free(struct rpc_xprt *xprt) 1238 { 1239 put_net(xprt->xprt_net); 1240 xprt_free_all_slots(xprt); 1241 kfree_rcu(xprt, rcu); 1242 } 1243 EXPORT_SYMBOL_GPL(xprt_free); 1244 1245 /** 1246 * xprt_reserve - allocate an RPC request slot 1247 * @task: RPC task requesting a slot allocation 1248 * 1249 * If the transport is marked as being congested, or if no more 1250 * slots are available, place the task on the transport's 1251 * backlog queue. 1252 */ 1253 void xprt_reserve(struct rpc_task *task) 1254 { 1255 struct rpc_xprt *xprt = task->tk_xprt; 1256 1257 task->tk_status = 0; 1258 if (task->tk_rqstp != NULL) 1259 return; 1260 1261 task->tk_timeout = 0; 1262 task->tk_status = -EAGAIN; 1263 if (!xprt_throttle_congested(xprt, task)) 1264 xprt->ops->alloc_slot(xprt, task); 1265 } 1266 1267 /** 1268 * xprt_retry_reserve - allocate an RPC request slot 1269 * @task: RPC task requesting a slot allocation 1270 * 1271 * If no more slots are available, place the task on the transport's 1272 * backlog queue. 1273 * Note that the only difference with xprt_reserve is that we now 1274 * ignore the value of the XPRT_CONGESTED flag. 1275 */ 1276 void xprt_retry_reserve(struct rpc_task *task) 1277 { 1278 struct rpc_xprt *xprt = task->tk_xprt; 1279 1280 task->tk_status = 0; 1281 if (task->tk_rqstp != NULL) 1282 return; 1283 1284 task->tk_timeout = 0; 1285 task->tk_status = -EAGAIN; 1286 xprt->ops->alloc_slot(xprt, task); 1287 } 1288 1289 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 1290 { 1291 return (__force __be32)xprt->xid++; 1292 } 1293 1294 static inline void xprt_init_xid(struct rpc_xprt *xprt) 1295 { 1296 xprt->xid = prandom_u32(); 1297 } 1298 1299 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) 1300 { 1301 struct rpc_rqst *req = task->tk_rqstp; 1302 1303 INIT_LIST_HEAD(&req->rq_list); 1304 req->rq_timeout = task->tk_client->cl_timeout->to_initval; 1305 req->rq_task = task; 1306 req->rq_xprt = xprt; 1307 req->rq_buffer = NULL; 1308 req->rq_xid = xprt_alloc_xid(xprt); 1309 req->rq_connect_cookie = xprt->connect_cookie - 1; 1310 req->rq_bytes_sent = 0; 1311 req->rq_snd_buf.len = 0; 1312 req->rq_snd_buf.buflen = 0; 1313 req->rq_rcv_buf.len = 0; 1314 req->rq_rcv_buf.buflen = 0; 1315 req->rq_release_snd_buf = NULL; 1316 xprt_reset_majortimeo(req); 1317 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, 1318 req, ntohl(req->rq_xid)); 1319 } 1320 1321 /** 1322 * xprt_release - release an RPC request slot 1323 * @task: task which is finished with the slot 1324 * 1325 */ 1326 void xprt_release(struct rpc_task *task) 1327 { 1328 struct rpc_xprt *xprt; 1329 struct rpc_rqst *req = task->tk_rqstp; 1330 1331 if (req == NULL) { 1332 if (task->tk_client) { 1333 xprt = task->tk_xprt; 1334 if (xprt->snd_task == task) 1335 xprt_release_write(xprt, task); 1336 } 1337 return; 1338 } 1339 1340 xprt = req->rq_xprt; 1341 if (task->tk_ops->rpc_count_stats != NULL) 1342 task->tk_ops->rpc_count_stats(task, task->tk_calldata); 1343 else if (task->tk_client) 1344 rpc_count_iostats(task, task->tk_client->cl_metrics); 1345 spin_lock(&xprt->recv_lock); 1346 if (!list_empty(&req->rq_list)) { 1347 list_del_init(&req->rq_list); 1348 xprt_wait_on_pinned_rqst(req); 1349 } 1350 spin_unlock(&xprt->recv_lock); 1351 spin_lock_bh(&xprt->transport_lock); 1352 xprt->ops->release_xprt(xprt, task); 1353 if (xprt->ops->release_request) 1354 xprt->ops->release_request(task); 1355 xprt->last_used = jiffies; 1356 xprt_schedule_autodisconnect(xprt); 1357 spin_unlock_bh(&xprt->transport_lock); 1358 if (req->rq_buffer) 1359 xprt->ops->buf_free(task); 1360 xprt_inject_disconnect(xprt); 1361 if (req->rq_cred != NULL) 1362 put_rpccred(req->rq_cred); 1363 task->tk_rqstp = NULL; 1364 if (req->rq_release_snd_buf) 1365 req->rq_release_snd_buf(req); 1366 1367 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1368 if (likely(!bc_prealloc(req))) 1369 xprt_free_slot(xprt, req); 1370 else 1371 xprt_free_bc_request(req); 1372 } 1373 1374 static void xprt_init(struct rpc_xprt *xprt, struct net *net) 1375 { 1376 kref_init(&xprt->kref); 1377 1378 spin_lock_init(&xprt->transport_lock); 1379 spin_lock_init(&xprt->reserve_lock); 1380 spin_lock_init(&xprt->recv_lock); 1381 1382 INIT_LIST_HEAD(&xprt->free); 1383 INIT_LIST_HEAD(&xprt->recv); 1384 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1385 spin_lock_init(&xprt->bc_pa_lock); 1386 INIT_LIST_HEAD(&xprt->bc_pa_list); 1387 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1388 INIT_LIST_HEAD(&xprt->xprt_switch); 1389 1390 xprt->last_used = jiffies; 1391 xprt->cwnd = RPC_INITCWND; 1392 xprt->bind_index = 0; 1393 1394 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 1395 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 1396 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending"); 1397 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 1398 1399 xprt_init_xid(xprt); 1400 1401 xprt->xprt_net = get_net(net); 1402 } 1403 1404 /** 1405 * xprt_create_transport - create an RPC transport 1406 * @args: rpc transport creation arguments 1407 * 1408 */ 1409 struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 1410 { 1411 struct rpc_xprt *xprt; 1412 struct xprt_class *t; 1413 1414 spin_lock(&xprt_list_lock); 1415 list_for_each_entry(t, &xprt_list, list) { 1416 if (t->ident == args->ident) { 1417 spin_unlock(&xprt_list_lock); 1418 goto found; 1419 } 1420 } 1421 spin_unlock(&xprt_list_lock); 1422 dprintk("RPC: transport (%d) not supported\n", args->ident); 1423 return ERR_PTR(-EIO); 1424 1425 found: 1426 xprt = t->setup(args); 1427 if (IS_ERR(xprt)) { 1428 dprintk("RPC: xprt_create_transport: failed, %ld\n", 1429 -PTR_ERR(xprt)); 1430 goto out; 1431 } 1432 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) 1433 xprt->idle_timeout = 0; 1434 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1435 if (xprt_has_timer(xprt)) 1436 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); 1437 else 1438 timer_setup(&xprt->timer, NULL, 0); 1439 1440 if (strlen(args->servername) > RPC_MAXNETNAMELEN) { 1441 xprt_destroy(xprt); 1442 return ERR_PTR(-EINVAL); 1443 } 1444 xprt->servername = kstrdup(args->servername, GFP_KERNEL); 1445 if (xprt->servername == NULL) { 1446 xprt_destroy(xprt); 1447 return ERR_PTR(-ENOMEM); 1448 } 1449 1450 rpc_xprt_debugfs_register(xprt); 1451 1452 dprintk("RPC: created transport %p with %u slots\n", xprt, 1453 xprt->max_reqs); 1454 out: 1455 return xprt; 1456 } 1457 1458 static void xprt_destroy_cb(struct work_struct *work) 1459 { 1460 struct rpc_xprt *xprt = 1461 container_of(work, struct rpc_xprt, task_cleanup); 1462 1463 rpc_xprt_debugfs_unregister(xprt); 1464 rpc_destroy_wait_queue(&xprt->binding); 1465 rpc_destroy_wait_queue(&xprt->pending); 1466 rpc_destroy_wait_queue(&xprt->sending); 1467 rpc_destroy_wait_queue(&xprt->backlog); 1468 kfree(xprt->servername); 1469 /* 1470 * Tear down transport state and free the rpc_xprt 1471 */ 1472 xprt->ops->destroy(xprt); 1473 } 1474 1475 /** 1476 * xprt_destroy - destroy an RPC transport, killing off all requests. 1477 * @xprt: transport to destroy 1478 * 1479 */ 1480 static void xprt_destroy(struct rpc_xprt *xprt) 1481 { 1482 dprintk("RPC: destroying transport %p\n", xprt); 1483 1484 /* 1485 * Exclude transport connect/disconnect handlers and autoclose 1486 */ 1487 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); 1488 1489 del_timer_sync(&xprt->timer); 1490 1491 /* 1492 * Destroy sockets etc from the system workqueue so they can 1493 * safely flush receive work running on rpciod. 1494 */ 1495 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); 1496 schedule_work(&xprt->task_cleanup); 1497 } 1498 1499 static void xprt_destroy_kref(struct kref *kref) 1500 { 1501 xprt_destroy(container_of(kref, struct rpc_xprt, kref)); 1502 } 1503 1504 /** 1505 * xprt_get - return a reference to an RPC transport. 1506 * @xprt: pointer to the transport 1507 * 1508 */ 1509 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) 1510 { 1511 if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) 1512 return xprt; 1513 return NULL; 1514 } 1515 EXPORT_SYMBOL_GPL(xprt_get); 1516 1517 /** 1518 * xprt_put - release a reference to an RPC transport. 1519 * @xprt: pointer to the transport 1520 * 1521 */ 1522 void xprt_put(struct rpc_xprt *xprt) 1523 { 1524 if (xprt != NULL) 1525 kref_put(&xprt->kref, xprt_destroy_kref); 1526 } 1527 EXPORT_SYMBOL_GPL(xprt_put); 1528