1 /* 2 * linux/net/sunrpc/xprt.c 3 * 4 * This is a generic RPC call interface supporting congestion avoidance, 5 * and asynchronous calls. 6 * 7 * The interface works like this: 8 * 9 * - When a process places a call, it allocates a request slot if 10 * one is available. Otherwise, it sleeps on the backlog queue 11 * (xprt_reserve). 12 * - Next, the caller puts together the RPC message, stuffs it into 13 * the request struct, and calls xprt_transmit(). 14 * - xprt_transmit sends the message and installs the caller on the 15 * transport's wait list. At the same time, if a reply is expected, 16 * it installs a timer that is run after the packet's timeout has 17 * expired. 18 * - When a packet arrives, the data_ready handler walks the list of 19 * pending requests for that transport. If a matching XID is found, the 20 * caller is woken up, and the timer removed. 21 * - When no reply arrives within the timeout interval, the timer is 22 * fired by the kernel and runs xprt_timer(). It either adjusts the 23 * timeout values (minor timeout) or wakes up the caller with a status 24 * of -ETIMEDOUT. 25 * - When the caller receives a notification from RPC that a reply arrived, 26 * it should release the RPC slot, and process the reply. 27 * If the call timed out, it may choose to retry the operation by 28 * adjusting the initial timeout value, and simply calling rpc_call 29 * again. 30 * 31 * Support for async RPC is done through a set of RPC-specific scheduling 32 * primitives that `transparently' work for processes as well as async 33 * tasks that rely on callbacks. 34 * 35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 36 * 37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 38 */ 39 40 #include <linux/module.h> 41 42 #include <linux/types.h> 43 #include <linux/interrupt.h> 44 #include <linux/workqueue.h> 45 #include <linux/net.h> 46 #include <linux/ktime.h> 47 48 #include <linux/sunrpc/clnt.h> 49 #include <linux/sunrpc/metrics.h> 50 #include <linux/sunrpc/bc_xprt.h> 51 52 #include <trace/events/sunrpc.h> 53 54 #include "sunrpc.h" 55 56 /* 57 * Local variables 58 */ 59 60 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 61 # define RPCDBG_FACILITY RPCDBG_XPRT 62 #endif 63 64 /* 65 * Local functions 66 */ 67 static void xprt_init(struct rpc_xprt *xprt, struct net *net); 68 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); 69 static void xprt_connect_status(struct rpc_task *task); 70 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 71 static void xprt_destroy(struct rpc_xprt *xprt); 72 73 static DEFINE_SPINLOCK(xprt_list_lock); 74 static LIST_HEAD(xprt_list); 75 76 /** 77 * xprt_register_transport - register a transport implementation 78 * @transport: transport to register 79 * 80 * If a transport implementation is loaded as a kernel module, it can 81 * call this interface to make itself known to the RPC client. 82 * 83 * Returns: 84 * 0: transport successfully registered 85 * -EEXIST: transport already registered 86 * -EINVAL: transport module being unloaded 87 */ 88 int xprt_register_transport(struct xprt_class *transport) 89 { 90 struct xprt_class *t; 91 int result; 92 93 result = -EEXIST; 94 spin_lock(&xprt_list_lock); 95 list_for_each_entry(t, &xprt_list, list) { 96 /* don't register the same transport class twice */ 97 if (t->ident == transport->ident) 98 goto out; 99 } 100 101 list_add_tail(&transport->list, &xprt_list); 102 printk(KERN_INFO "RPC: Registered %s transport module.\n", 103 transport->name); 104 result = 0; 105 106 out: 107 spin_unlock(&xprt_list_lock); 108 return result; 109 } 110 EXPORT_SYMBOL_GPL(xprt_register_transport); 111 112 /** 113 * xprt_unregister_transport - unregister a transport implementation 114 * @transport: transport to unregister 115 * 116 * Returns: 117 * 0: transport successfully unregistered 118 * -ENOENT: transport never registered 119 */ 120 int xprt_unregister_transport(struct xprt_class *transport) 121 { 122 struct xprt_class *t; 123 int result; 124 125 result = 0; 126 spin_lock(&xprt_list_lock); 127 list_for_each_entry(t, &xprt_list, list) { 128 if (t == transport) { 129 printk(KERN_INFO 130 "RPC: Unregistered %s transport module.\n", 131 transport->name); 132 list_del_init(&transport->list); 133 goto out; 134 } 135 } 136 result = -ENOENT; 137 138 out: 139 spin_unlock(&xprt_list_lock); 140 return result; 141 } 142 EXPORT_SYMBOL_GPL(xprt_unregister_transport); 143 144 /** 145 * xprt_load_transport - load a transport implementation 146 * @transport_name: transport to load 147 * 148 * Returns: 149 * 0: transport successfully loaded 150 * -ENOENT: transport module not available 151 */ 152 int xprt_load_transport(const char *transport_name) 153 { 154 struct xprt_class *t; 155 int result; 156 157 result = 0; 158 spin_lock(&xprt_list_lock); 159 list_for_each_entry(t, &xprt_list, list) { 160 if (strcmp(t->name, transport_name) == 0) { 161 spin_unlock(&xprt_list_lock); 162 goto out; 163 } 164 } 165 spin_unlock(&xprt_list_lock); 166 result = request_module("xprt%s", transport_name); 167 out: 168 return result; 169 } 170 EXPORT_SYMBOL_GPL(xprt_load_transport); 171 172 /** 173 * xprt_reserve_xprt - serialize write access to transports 174 * @task: task that is requesting access to the transport 175 * @xprt: pointer to the target transport 176 * 177 * This prevents mixing the payload of separate requests, and prevents 178 * transport connects from colliding with writes. No congestion control 179 * is provided. 180 */ 181 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 182 { 183 struct rpc_rqst *req = task->tk_rqstp; 184 int priority; 185 186 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 187 if (task == xprt->snd_task) 188 return 1; 189 goto out_sleep; 190 } 191 xprt->snd_task = task; 192 if (req != NULL) 193 req->rq_ntrans++; 194 195 return 1; 196 197 out_sleep: 198 dprintk("RPC: %5u failed to lock transport %p\n", 199 task->tk_pid, xprt); 200 task->tk_timeout = 0; 201 task->tk_status = -EAGAIN; 202 if (req == NULL) 203 priority = RPC_PRIORITY_LOW; 204 else if (!req->rq_ntrans) 205 priority = RPC_PRIORITY_NORMAL; 206 else 207 priority = RPC_PRIORITY_HIGH; 208 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); 209 return 0; 210 } 211 EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 212 213 static void xprt_clear_locked(struct rpc_xprt *xprt) 214 { 215 xprt->snd_task = NULL; 216 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 217 smp_mb__before_atomic(); 218 clear_bit(XPRT_LOCKED, &xprt->state); 219 smp_mb__after_atomic(); 220 } else 221 queue_work(rpciod_workqueue, &xprt->task_cleanup); 222 } 223 224 /* 225 * xprt_reserve_xprt_cong - serialize write access to transports 226 * @task: task that is requesting access to the transport 227 * 228 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 229 * integrated into the decision of whether a request is allowed to be 230 * woken up and given access to the transport. 231 */ 232 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 233 { 234 struct rpc_rqst *req = task->tk_rqstp; 235 int priority; 236 237 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 238 if (task == xprt->snd_task) 239 return 1; 240 goto out_sleep; 241 } 242 if (req == NULL) { 243 xprt->snd_task = task; 244 return 1; 245 } 246 if (__xprt_get_cong(xprt, task)) { 247 xprt->snd_task = task; 248 req->rq_ntrans++; 249 return 1; 250 } 251 xprt_clear_locked(xprt); 252 out_sleep: 253 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); 254 task->tk_timeout = 0; 255 task->tk_status = -EAGAIN; 256 if (req == NULL) 257 priority = RPC_PRIORITY_LOW; 258 else if (!req->rq_ntrans) 259 priority = RPC_PRIORITY_NORMAL; 260 else 261 priority = RPC_PRIORITY_HIGH; 262 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); 263 return 0; 264 } 265 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 266 267 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 268 { 269 int retval; 270 271 spin_lock_bh(&xprt->transport_lock); 272 retval = xprt->ops->reserve_xprt(xprt, task); 273 spin_unlock_bh(&xprt->transport_lock); 274 return retval; 275 } 276 277 static bool __xprt_lock_write_func(struct rpc_task *task, void *data) 278 { 279 struct rpc_xprt *xprt = data; 280 struct rpc_rqst *req; 281 282 req = task->tk_rqstp; 283 xprt->snd_task = task; 284 if (req) 285 req->rq_ntrans++; 286 return true; 287 } 288 289 static void __xprt_lock_write_next(struct rpc_xprt *xprt) 290 { 291 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 292 return; 293 294 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt)) 295 return; 296 xprt_clear_locked(xprt); 297 } 298 299 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data) 300 { 301 struct rpc_xprt *xprt = data; 302 struct rpc_rqst *req; 303 304 req = task->tk_rqstp; 305 if (req == NULL) { 306 xprt->snd_task = task; 307 return true; 308 } 309 if (__xprt_get_cong(xprt, task)) { 310 xprt->snd_task = task; 311 req->rq_ntrans++; 312 return true; 313 } 314 return false; 315 } 316 317 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 318 { 319 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 320 return; 321 if (RPCXPRT_CONGESTED(xprt)) 322 goto out_unlock; 323 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt)) 324 return; 325 out_unlock: 326 xprt_clear_locked(xprt); 327 } 328 329 /** 330 * xprt_release_xprt - allow other requests to use a transport 331 * @xprt: transport with other tasks potentially waiting 332 * @task: task that is releasing access to the transport 333 * 334 * Note that "task" can be NULL. No congestion control is provided. 335 */ 336 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 337 { 338 if (xprt->snd_task == task) { 339 if (task != NULL) { 340 struct rpc_rqst *req = task->tk_rqstp; 341 if (req != NULL) 342 req->rq_bytes_sent = 0; 343 } 344 xprt_clear_locked(xprt); 345 __xprt_lock_write_next(xprt); 346 } 347 } 348 EXPORT_SYMBOL_GPL(xprt_release_xprt); 349 350 /** 351 * xprt_release_xprt_cong - allow other requests to use a transport 352 * @xprt: transport with other tasks potentially waiting 353 * @task: task that is releasing access to the transport 354 * 355 * Note that "task" can be NULL. Another task is awoken to use the 356 * transport if the transport's congestion window allows it. 357 */ 358 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 359 { 360 if (xprt->snd_task == task) { 361 if (task != NULL) { 362 struct rpc_rqst *req = task->tk_rqstp; 363 if (req != NULL) 364 req->rq_bytes_sent = 0; 365 } 366 xprt_clear_locked(xprt); 367 __xprt_lock_write_next_cong(xprt); 368 } 369 } 370 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 371 372 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 373 { 374 spin_lock_bh(&xprt->transport_lock); 375 xprt->ops->release_xprt(xprt, task); 376 spin_unlock_bh(&xprt->transport_lock); 377 } 378 379 /* 380 * Van Jacobson congestion avoidance. Check if the congestion window 381 * overflowed. Put the task to sleep if this is the case. 382 */ 383 static int 384 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) 385 { 386 struct rpc_rqst *req = task->tk_rqstp; 387 388 if (req->rq_cong) 389 return 1; 390 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n", 391 task->tk_pid, xprt->cong, xprt->cwnd); 392 if (RPCXPRT_CONGESTED(xprt)) 393 return 0; 394 req->rq_cong = 1; 395 xprt->cong += RPC_CWNDSCALE; 396 return 1; 397 } 398 399 /* 400 * Adjust the congestion window, and wake up the next task 401 * that has been sleeping due to congestion 402 */ 403 static void 404 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 405 { 406 if (!req->rq_cong) 407 return; 408 req->rq_cong = 0; 409 xprt->cong -= RPC_CWNDSCALE; 410 __xprt_lock_write_next_cong(xprt); 411 } 412 413 /** 414 * xprt_release_rqst_cong - housekeeping when request is complete 415 * @task: RPC request that recently completed 416 * 417 * Useful for transports that require congestion control. 418 */ 419 void xprt_release_rqst_cong(struct rpc_task *task) 420 { 421 struct rpc_rqst *req = task->tk_rqstp; 422 423 __xprt_put_cong(req->rq_xprt, req); 424 } 425 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 426 427 /** 428 * xprt_adjust_cwnd - adjust transport congestion window 429 * @xprt: pointer to xprt 430 * @task: recently completed RPC request used to adjust window 431 * @result: result code of completed RPC request 432 * 433 * The transport code maintains an estimate on the maximum number of out- 434 * standing RPC requests, using a smoothed version of the congestion 435 * avoidance implemented in 44BSD. This is basically the Van Jacobson 436 * congestion algorithm: If a retransmit occurs, the congestion window is 437 * halved; otherwise, it is incremented by 1/cwnd when 438 * 439 * - a reply is received and 440 * - a full number of requests are outstanding and 441 * - the congestion window hasn't been updated recently. 442 */ 443 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) 444 { 445 struct rpc_rqst *req = task->tk_rqstp; 446 unsigned long cwnd = xprt->cwnd; 447 448 if (result >= 0 && cwnd <= xprt->cong) { 449 /* The (cwnd >> 1) term makes sure 450 * the result gets rounded properly. */ 451 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 452 if (cwnd > RPC_MAXCWND(xprt)) 453 cwnd = RPC_MAXCWND(xprt); 454 __xprt_lock_write_next_cong(xprt); 455 } else if (result == -ETIMEDOUT) { 456 cwnd >>= 1; 457 if (cwnd < RPC_CWNDSCALE) 458 cwnd = RPC_CWNDSCALE; 459 } 460 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 461 xprt->cong, xprt->cwnd, cwnd); 462 xprt->cwnd = cwnd; 463 __xprt_put_cong(xprt, req); 464 } 465 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 466 467 /** 468 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 469 * @xprt: transport with waiting tasks 470 * @status: result code to plant in each task before waking it 471 * 472 */ 473 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 474 { 475 if (status < 0) 476 rpc_wake_up_status(&xprt->pending, status); 477 else 478 rpc_wake_up(&xprt->pending); 479 } 480 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 481 482 /** 483 * xprt_wait_for_buffer_space - wait for transport output buffer to clear 484 * @task: task to be put to sleep 485 * @action: function pointer to be executed after wait 486 * 487 * Note that we only set the timer for the case of RPC_IS_SOFT(), since 488 * we don't in general want to force a socket disconnection due to 489 * an incomplete RPC call transmission. 490 */ 491 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action) 492 { 493 struct rpc_rqst *req = task->tk_rqstp; 494 struct rpc_xprt *xprt = req->rq_xprt; 495 496 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; 497 rpc_sleep_on(&xprt->pending, task, action); 498 } 499 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 500 501 /** 502 * xprt_write_space - wake the task waiting for transport output buffer space 503 * @xprt: transport with waiting tasks 504 * 505 * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 506 */ 507 void xprt_write_space(struct rpc_xprt *xprt) 508 { 509 spin_lock_bh(&xprt->transport_lock); 510 if (xprt->snd_task) { 511 dprintk("RPC: write space: waking waiting task on " 512 "xprt %p\n", xprt); 513 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task); 514 } 515 spin_unlock_bh(&xprt->transport_lock); 516 } 517 EXPORT_SYMBOL_GPL(xprt_write_space); 518 519 /** 520 * xprt_set_retrans_timeout_def - set a request's retransmit timeout 521 * @task: task whose timeout is to be set 522 * 523 * Set a request's retransmit timeout based on the transport's 524 * default timeout parameters. Used by transports that don't adjust 525 * the retransmit timeout based on round-trip time estimation. 526 */ 527 void xprt_set_retrans_timeout_def(struct rpc_task *task) 528 { 529 task->tk_timeout = task->tk_rqstp->rq_timeout; 530 } 531 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def); 532 533 /** 534 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 535 * @task: task whose timeout is to be set 536 * 537 * Set a request's retransmit timeout using the RTT estimator. 538 */ 539 void xprt_set_retrans_timeout_rtt(struct rpc_task *task) 540 { 541 int timer = task->tk_msg.rpc_proc->p_timer; 542 struct rpc_clnt *clnt = task->tk_client; 543 struct rpc_rtt *rtt = clnt->cl_rtt; 544 struct rpc_rqst *req = task->tk_rqstp; 545 unsigned long max_timeout = clnt->cl_timeout->to_maxval; 546 547 task->tk_timeout = rpc_calc_rto(rtt, timer); 548 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 549 if (task->tk_timeout > max_timeout || task->tk_timeout == 0) 550 task->tk_timeout = max_timeout; 551 } 552 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt); 553 554 static void xprt_reset_majortimeo(struct rpc_rqst *req) 555 { 556 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 557 558 req->rq_majortimeo = req->rq_timeout; 559 if (to->to_exponential) 560 req->rq_majortimeo <<= to->to_retries; 561 else 562 req->rq_majortimeo += to->to_increment * to->to_retries; 563 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0) 564 req->rq_majortimeo = to->to_maxval; 565 req->rq_majortimeo += jiffies; 566 } 567 568 /** 569 * xprt_adjust_timeout - adjust timeout values for next retransmit 570 * @req: RPC request containing parameters to use for the adjustment 571 * 572 */ 573 int xprt_adjust_timeout(struct rpc_rqst *req) 574 { 575 struct rpc_xprt *xprt = req->rq_xprt; 576 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 577 int status = 0; 578 579 if (time_before(jiffies, req->rq_majortimeo)) { 580 if (to->to_exponential) 581 req->rq_timeout <<= 1; 582 else 583 req->rq_timeout += to->to_increment; 584 if (to->to_maxval && req->rq_timeout >= to->to_maxval) 585 req->rq_timeout = to->to_maxval; 586 req->rq_retries++; 587 } else { 588 req->rq_timeout = to->to_initval; 589 req->rq_retries = 0; 590 xprt_reset_majortimeo(req); 591 /* Reset the RTT counters == "slow start" */ 592 spin_lock_bh(&xprt->transport_lock); 593 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 594 spin_unlock_bh(&xprt->transport_lock); 595 status = -ETIMEDOUT; 596 } 597 598 if (req->rq_timeout == 0) { 599 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 600 req->rq_timeout = 5 * HZ; 601 } 602 return status; 603 } 604 605 static void xprt_autoclose(struct work_struct *work) 606 { 607 struct rpc_xprt *xprt = 608 container_of(work, struct rpc_xprt, task_cleanup); 609 610 xprt->ops->close(xprt); 611 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 612 xprt_release_write(xprt, NULL); 613 } 614 615 /** 616 * xprt_disconnect_done - mark a transport as disconnected 617 * @xprt: transport to flag for disconnect 618 * 619 */ 620 void xprt_disconnect_done(struct rpc_xprt *xprt) 621 { 622 dprintk("RPC: disconnected transport %p\n", xprt); 623 spin_lock_bh(&xprt->transport_lock); 624 xprt_clear_connected(xprt); 625 xprt_wake_pending_tasks(xprt, -EAGAIN); 626 spin_unlock_bh(&xprt->transport_lock); 627 } 628 EXPORT_SYMBOL_GPL(xprt_disconnect_done); 629 630 /** 631 * xprt_force_disconnect - force a transport to disconnect 632 * @xprt: transport to disconnect 633 * 634 */ 635 void xprt_force_disconnect(struct rpc_xprt *xprt) 636 { 637 /* Don't race with the test_bit() in xprt_clear_locked() */ 638 spin_lock_bh(&xprt->transport_lock); 639 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 640 /* Try to schedule an autoclose RPC call */ 641 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 642 queue_work(rpciod_workqueue, &xprt->task_cleanup); 643 xprt_wake_pending_tasks(xprt, -EAGAIN); 644 spin_unlock_bh(&xprt->transport_lock); 645 } 646 647 /** 648 * xprt_conditional_disconnect - force a transport to disconnect 649 * @xprt: transport to disconnect 650 * @cookie: 'connection cookie' 651 * 652 * This attempts to break the connection if and only if 'cookie' matches 653 * the current transport 'connection cookie'. It ensures that we don't 654 * try to break the connection more than once when we need to retransmit 655 * a batch of RPC requests. 656 * 657 */ 658 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 659 { 660 /* Don't race with the test_bit() in xprt_clear_locked() */ 661 spin_lock_bh(&xprt->transport_lock); 662 if (cookie != xprt->connect_cookie) 663 goto out; 664 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt)) 665 goto out; 666 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 667 /* Try to schedule an autoclose RPC call */ 668 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 669 queue_work(rpciod_workqueue, &xprt->task_cleanup); 670 xprt_wake_pending_tasks(xprt, -EAGAIN); 671 out: 672 spin_unlock_bh(&xprt->transport_lock); 673 } 674 675 static void 676 xprt_init_autodisconnect(unsigned long data) 677 { 678 struct rpc_xprt *xprt = (struct rpc_xprt *)data; 679 680 spin_lock(&xprt->transport_lock); 681 if (!list_empty(&xprt->recv)) 682 goto out_abort; 683 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 684 goto out_abort; 685 spin_unlock(&xprt->transport_lock); 686 queue_work(rpciod_workqueue, &xprt->task_cleanup); 687 return; 688 out_abort: 689 spin_unlock(&xprt->transport_lock); 690 } 691 692 bool xprt_lock_connect(struct rpc_xprt *xprt, 693 struct rpc_task *task, 694 void *cookie) 695 { 696 bool ret = false; 697 698 spin_lock_bh(&xprt->transport_lock); 699 if (!test_bit(XPRT_LOCKED, &xprt->state)) 700 goto out; 701 if (xprt->snd_task != task) 702 goto out; 703 xprt->snd_task = cookie; 704 ret = true; 705 out: 706 spin_unlock_bh(&xprt->transport_lock); 707 return ret; 708 } 709 710 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) 711 { 712 spin_lock_bh(&xprt->transport_lock); 713 if (xprt->snd_task != cookie) 714 goto out; 715 if (!test_bit(XPRT_LOCKED, &xprt->state)) 716 goto out; 717 xprt->snd_task =NULL; 718 xprt->ops->release_xprt(xprt, NULL); 719 out: 720 spin_unlock_bh(&xprt->transport_lock); 721 } 722 723 /** 724 * xprt_connect - schedule a transport connect operation 725 * @task: RPC task that is requesting the connect 726 * 727 */ 728 void xprt_connect(struct rpc_task *task) 729 { 730 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 731 732 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, 733 xprt, (xprt_connected(xprt) ? "is" : "is not")); 734 735 if (!xprt_bound(xprt)) { 736 task->tk_status = -EAGAIN; 737 return; 738 } 739 if (!xprt_lock_write(xprt, task)) 740 return; 741 742 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) 743 xprt->ops->close(xprt); 744 745 if (!xprt_connected(xprt)) { 746 task->tk_rqstp->rq_bytes_sent = 0; 747 task->tk_timeout = task->tk_rqstp->rq_timeout; 748 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); 749 750 if (test_bit(XPRT_CLOSING, &xprt->state)) 751 return; 752 if (xprt_test_and_set_connecting(xprt)) 753 return; 754 xprt->stat.connect_start = jiffies; 755 xprt->ops->connect(xprt, task); 756 } 757 xprt_release_write(xprt, task); 758 } 759 760 static void xprt_connect_status(struct rpc_task *task) 761 { 762 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 763 764 if (task->tk_status == 0) { 765 xprt->stat.connect_count++; 766 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; 767 dprintk("RPC: %5u xprt_connect_status: connection established\n", 768 task->tk_pid); 769 return; 770 } 771 772 switch (task->tk_status) { 773 case -ECONNREFUSED: 774 case -ECONNRESET: 775 case -ECONNABORTED: 776 case -ENETUNREACH: 777 case -EHOSTUNREACH: 778 case -EPIPE: 779 case -EAGAIN: 780 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid); 781 break; 782 case -ETIMEDOUT: 783 dprintk("RPC: %5u xprt_connect_status: connect attempt timed " 784 "out\n", task->tk_pid); 785 break; 786 default: 787 dprintk("RPC: %5u xprt_connect_status: error %d connecting to " 788 "server %s\n", task->tk_pid, -task->tk_status, 789 xprt->servername); 790 task->tk_status = -EIO; 791 } 792 } 793 794 /** 795 * xprt_lookup_rqst - find an RPC request corresponding to an XID 796 * @xprt: transport on which the original request was transmitted 797 * @xid: RPC XID of incoming reply 798 * 799 */ 800 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 801 { 802 struct rpc_rqst *entry; 803 804 list_for_each_entry(entry, &xprt->recv, rq_list) 805 if (entry->rq_xid == xid) { 806 trace_xprt_lookup_rqst(xprt, xid, 0); 807 return entry; 808 } 809 810 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 811 ntohl(xid)); 812 trace_xprt_lookup_rqst(xprt, xid, -ENOENT); 813 xprt->stat.bad_xids++; 814 return NULL; 815 } 816 EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 817 818 static void xprt_update_rtt(struct rpc_task *task) 819 { 820 struct rpc_rqst *req = task->tk_rqstp; 821 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 822 unsigned int timer = task->tk_msg.rpc_proc->p_timer; 823 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 824 825 if (timer) { 826 if (req->rq_ntrans == 1) 827 rpc_update_rtt(rtt, timer, m); 828 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 829 } 830 } 831 832 /** 833 * xprt_complete_rqst - called when reply processing is complete 834 * @task: RPC request that recently completed 835 * @copied: actual number of bytes received from the transport 836 * 837 * Caller holds transport lock. 838 */ 839 void xprt_complete_rqst(struct rpc_task *task, int copied) 840 { 841 struct rpc_rqst *req = task->tk_rqstp; 842 struct rpc_xprt *xprt = req->rq_xprt; 843 844 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", 845 task->tk_pid, ntohl(req->rq_xid), copied); 846 trace_xprt_complete_rqst(xprt, req->rq_xid, copied); 847 848 xprt->stat.recvs++; 849 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime); 850 if (xprt->ops->timer != NULL) 851 xprt_update_rtt(task); 852 853 list_del_init(&req->rq_list); 854 req->rq_private_buf.len = copied; 855 /* Ensure all writes are done before we update */ 856 /* req->rq_reply_bytes_recvd */ 857 smp_wmb(); 858 req->rq_reply_bytes_recvd = copied; 859 rpc_wake_up_queued_task(&xprt->pending, task); 860 } 861 EXPORT_SYMBOL_GPL(xprt_complete_rqst); 862 863 static void xprt_timer(struct rpc_task *task) 864 { 865 struct rpc_rqst *req = task->tk_rqstp; 866 struct rpc_xprt *xprt = req->rq_xprt; 867 868 if (task->tk_status != -ETIMEDOUT) 869 return; 870 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); 871 872 spin_lock_bh(&xprt->transport_lock); 873 if (!req->rq_reply_bytes_recvd) { 874 if (xprt->ops->timer) 875 xprt->ops->timer(xprt, task); 876 } else 877 task->tk_status = 0; 878 spin_unlock_bh(&xprt->transport_lock); 879 } 880 881 static inline int xprt_has_timer(struct rpc_xprt *xprt) 882 { 883 return xprt->idle_timeout != 0; 884 } 885 886 /** 887 * xprt_prepare_transmit - reserve the transport before sending a request 888 * @task: RPC task about to send a request 889 * 890 */ 891 bool xprt_prepare_transmit(struct rpc_task *task) 892 { 893 struct rpc_rqst *req = task->tk_rqstp; 894 struct rpc_xprt *xprt = req->rq_xprt; 895 bool ret = false; 896 897 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); 898 899 spin_lock_bh(&xprt->transport_lock); 900 if (!req->rq_bytes_sent) { 901 if (req->rq_reply_bytes_recvd) { 902 task->tk_status = req->rq_reply_bytes_recvd; 903 goto out_unlock; 904 } 905 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) 906 && xprt_connected(xprt) 907 && req->rq_connect_cookie == xprt->connect_cookie) { 908 xprt->ops->set_retrans_timeout(task); 909 rpc_sleep_on(&xprt->pending, task, xprt_timer); 910 goto out_unlock; 911 } 912 } 913 if (!xprt->ops->reserve_xprt(xprt, task)) { 914 task->tk_status = -EAGAIN; 915 goto out_unlock; 916 } 917 ret = true; 918 out_unlock: 919 spin_unlock_bh(&xprt->transport_lock); 920 return ret; 921 } 922 923 void xprt_end_transmit(struct rpc_task *task) 924 { 925 xprt_release_write(task->tk_rqstp->rq_xprt, task); 926 } 927 928 /** 929 * xprt_transmit - send an RPC request on a transport 930 * @task: controlling RPC task 931 * 932 * We have to copy the iovec because sendmsg fiddles with its contents. 933 */ 934 void xprt_transmit(struct rpc_task *task) 935 { 936 struct rpc_rqst *req = task->tk_rqstp; 937 struct rpc_xprt *xprt = req->rq_xprt; 938 int status, numreqs; 939 940 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 941 942 if (!req->rq_reply_bytes_recvd) { 943 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) { 944 /* 945 * Add to the list only if we're expecting a reply 946 */ 947 spin_lock_bh(&xprt->transport_lock); 948 /* Update the softirq receive buffer */ 949 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 950 sizeof(req->rq_private_buf)); 951 /* Add request to the receive list */ 952 list_add_tail(&req->rq_list, &xprt->recv); 953 spin_unlock_bh(&xprt->transport_lock); 954 xprt_reset_majortimeo(req); 955 /* Turn off autodisconnect */ 956 del_singleshot_timer_sync(&xprt->timer); 957 } 958 } else if (!req->rq_bytes_sent) 959 return; 960 961 req->rq_xtime = ktime_get(); 962 status = xprt->ops->send_request(task); 963 trace_xprt_transmit(xprt, req->rq_xid, status); 964 if (status != 0) { 965 task->tk_status = status; 966 return; 967 } 968 969 dprintk("RPC: %5u xmit complete\n", task->tk_pid); 970 task->tk_flags |= RPC_TASK_SENT; 971 spin_lock_bh(&xprt->transport_lock); 972 973 xprt->ops->set_retrans_timeout(task); 974 975 numreqs = atomic_read(&xprt->num_reqs); 976 if (numreqs > xprt->stat.max_slots) 977 xprt->stat.max_slots = numreqs; 978 xprt->stat.sends++; 979 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 980 xprt->stat.bklog_u += xprt->backlog.qlen; 981 xprt->stat.sending_u += xprt->sending.qlen; 982 xprt->stat.pending_u += xprt->pending.qlen; 983 984 /* Don't race with disconnect */ 985 if (!xprt_connected(xprt)) 986 task->tk_status = -ENOTCONN; 987 else { 988 /* 989 * Sleep on the pending queue since 990 * we're expecting a reply. 991 */ 992 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) 993 rpc_sleep_on(&xprt->pending, task, xprt_timer); 994 req->rq_connect_cookie = xprt->connect_cookie; 995 } 996 spin_unlock_bh(&xprt->transport_lock); 997 } 998 999 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1000 { 1001 set_bit(XPRT_CONGESTED, &xprt->state); 1002 rpc_sleep_on(&xprt->backlog, task, NULL); 1003 } 1004 1005 static void xprt_wake_up_backlog(struct rpc_xprt *xprt) 1006 { 1007 if (rpc_wake_up_next(&xprt->backlog) == NULL) 1008 clear_bit(XPRT_CONGESTED, &xprt->state); 1009 } 1010 1011 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) 1012 { 1013 bool ret = false; 1014 1015 if (!test_bit(XPRT_CONGESTED, &xprt->state)) 1016 goto out; 1017 spin_lock(&xprt->reserve_lock); 1018 if (test_bit(XPRT_CONGESTED, &xprt->state)) { 1019 rpc_sleep_on(&xprt->backlog, task, NULL); 1020 ret = true; 1021 } 1022 spin_unlock(&xprt->reserve_lock); 1023 out: 1024 return ret; 1025 } 1026 1027 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags) 1028 { 1029 struct rpc_rqst *req = ERR_PTR(-EAGAIN); 1030 1031 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs)) 1032 goto out; 1033 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags); 1034 if (req != NULL) 1035 goto out; 1036 atomic_dec(&xprt->num_reqs); 1037 req = ERR_PTR(-ENOMEM); 1038 out: 1039 return req; 1040 } 1041 1042 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1043 { 1044 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) { 1045 kfree(req); 1046 return true; 1047 } 1048 return false; 1049 } 1050 1051 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 1052 { 1053 struct rpc_rqst *req; 1054 1055 spin_lock(&xprt->reserve_lock); 1056 if (!list_empty(&xprt->free)) { 1057 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 1058 list_del(&req->rq_list); 1059 goto out_init_req; 1060 } 1061 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN); 1062 if (!IS_ERR(req)) 1063 goto out_init_req; 1064 switch (PTR_ERR(req)) { 1065 case -ENOMEM: 1066 dprintk("RPC: dynamic allocation of request slot " 1067 "failed! Retrying\n"); 1068 task->tk_status = -ENOMEM; 1069 break; 1070 case -EAGAIN: 1071 xprt_add_backlog(xprt, task); 1072 dprintk("RPC: waiting for request slot\n"); 1073 default: 1074 task->tk_status = -EAGAIN; 1075 } 1076 spin_unlock(&xprt->reserve_lock); 1077 return; 1078 out_init_req: 1079 task->tk_status = 0; 1080 task->tk_rqstp = req; 1081 xprt_request_init(task, xprt); 1082 spin_unlock(&xprt->reserve_lock); 1083 } 1084 EXPORT_SYMBOL_GPL(xprt_alloc_slot); 1085 1086 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 1087 { 1088 /* Note: grabbing the xprt_lock_write() ensures that we throttle 1089 * new slot allocation if the transport is congested (i.e. when 1090 * reconnecting a stream transport or when out of socket write 1091 * buffer space). 1092 */ 1093 if (xprt_lock_write(xprt, task)) { 1094 xprt_alloc_slot(xprt, task); 1095 xprt_release_write(xprt, task); 1096 } 1097 } 1098 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot); 1099 1100 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1101 { 1102 spin_lock(&xprt->reserve_lock); 1103 if (!xprt_dynamic_free_slot(xprt, req)) { 1104 memset(req, 0, sizeof(*req)); /* mark unused */ 1105 list_add(&req->rq_list, &xprt->free); 1106 } 1107 xprt_wake_up_backlog(xprt); 1108 spin_unlock(&xprt->reserve_lock); 1109 } 1110 1111 static void xprt_free_all_slots(struct rpc_xprt *xprt) 1112 { 1113 struct rpc_rqst *req; 1114 while (!list_empty(&xprt->free)) { 1115 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); 1116 list_del(&req->rq_list); 1117 kfree(req); 1118 } 1119 } 1120 1121 struct rpc_xprt *xprt_alloc(struct net *net, size_t size, 1122 unsigned int num_prealloc, 1123 unsigned int max_alloc) 1124 { 1125 struct rpc_xprt *xprt; 1126 struct rpc_rqst *req; 1127 int i; 1128 1129 xprt = kzalloc(size, GFP_KERNEL); 1130 if (xprt == NULL) 1131 goto out; 1132 1133 xprt_init(xprt, net); 1134 1135 for (i = 0; i < num_prealloc; i++) { 1136 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); 1137 if (!req) 1138 goto out_free; 1139 list_add(&req->rq_list, &xprt->free); 1140 } 1141 if (max_alloc > num_prealloc) 1142 xprt->max_reqs = max_alloc; 1143 else 1144 xprt->max_reqs = num_prealloc; 1145 xprt->min_reqs = num_prealloc; 1146 atomic_set(&xprt->num_reqs, num_prealloc); 1147 1148 return xprt; 1149 1150 out_free: 1151 xprt_free(xprt); 1152 out: 1153 return NULL; 1154 } 1155 EXPORT_SYMBOL_GPL(xprt_alloc); 1156 1157 void xprt_free(struct rpc_xprt *xprt) 1158 { 1159 put_net(xprt->xprt_net); 1160 xprt_free_all_slots(xprt); 1161 kfree(xprt); 1162 } 1163 EXPORT_SYMBOL_GPL(xprt_free); 1164 1165 /** 1166 * xprt_reserve - allocate an RPC request slot 1167 * @task: RPC task requesting a slot allocation 1168 * 1169 * If the transport is marked as being congested, or if no more 1170 * slots are available, place the task on the transport's 1171 * backlog queue. 1172 */ 1173 void xprt_reserve(struct rpc_task *task) 1174 { 1175 struct rpc_xprt *xprt; 1176 1177 task->tk_status = 0; 1178 if (task->tk_rqstp != NULL) 1179 return; 1180 1181 task->tk_timeout = 0; 1182 task->tk_status = -EAGAIN; 1183 rcu_read_lock(); 1184 xprt = rcu_dereference(task->tk_client->cl_xprt); 1185 if (!xprt_throttle_congested(xprt, task)) 1186 xprt->ops->alloc_slot(xprt, task); 1187 rcu_read_unlock(); 1188 } 1189 1190 /** 1191 * xprt_retry_reserve - allocate an RPC request slot 1192 * @task: RPC task requesting a slot allocation 1193 * 1194 * If no more slots are available, place the task on the transport's 1195 * backlog queue. 1196 * Note that the only difference with xprt_reserve is that we now 1197 * ignore the value of the XPRT_CONGESTED flag. 1198 */ 1199 void xprt_retry_reserve(struct rpc_task *task) 1200 { 1201 struct rpc_xprt *xprt; 1202 1203 task->tk_status = 0; 1204 if (task->tk_rqstp != NULL) 1205 return; 1206 1207 task->tk_timeout = 0; 1208 task->tk_status = -EAGAIN; 1209 rcu_read_lock(); 1210 xprt = rcu_dereference(task->tk_client->cl_xprt); 1211 xprt->ops->alloc_slot(xprt, task); 1212 rcu_read_unlock(); 1213 } 1214 1215 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 1216 { 1217 return (__force __be32)xprt->xid++; 1218 } 1219 1220 static inline void xprt_init_xid(struct rpc_xprt *xprt) 1221 { 1222 xprt->xid = prandom_u32(); 1223 } 1224 1225 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) 1226 { 1227 struct rpc_rqst *req = task->tk_rqstp; 1228 1229 INIT_LIST_HEAD(&req->rq_list); 1230 req->rq_timeout = task->tk_client->cl_timeout->to_initval; 1231 req->rq_task = task; 1232 req->rq_xprt = xprt; 1233 req->rq_buffer = NULL; 1234 req->rq_xid = xprt_alloc_xid(xprt); 1235 req->rq_connect_cookie = xprt->connect_cookie - 1; 1236 req->rq_bytes_sent = 0; 1237 req->rq_snd_buf.len = 0; 1238 req->rq_snd_buf.buflen = 0; 1239 req->rq_rcv_buf.len = 0; 1240 req->rq_rcv_buf.buflen = 0; 1241 req->rq_release_snd_buf = NULL; 1242 xprt_reset_majortimeo(req); 1243 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, 1244 req, ntohl(req->rq_xid)); 1245 } 1246 1247 /** 1248 * xprt_release - release an RPC request slot 1249 * @task: task which is finished with the slot 1250 * 1251 */ 1252 void xprt_release(struct rpc_task *task) 1253 { 1254 struct rpc_xprt *xprt; 1255 struct rpc_rqst *req = task->tk_rqstp; 1256 1257 if (req == NULL) { 1258 if (task->tk_client) { 1259 rcu_read_lock(); 1260 xprt = rcu_dereference(task->tk_client->cl_xprt); 1261 if (xprt->snd_task == task) 1262 xprt_release_write(xprt, task); 1263 rcu_read_unlock(); 1264 } 1265 return; 1266 } 1267 1268 xprt = req->rq_xprt; 1269 if (task->tk_ops->rpc_count_stats != NULL) 1270 task->tk_ops->rpc_count_stats(task, task->tk_calldata); 1271 else if (task->tk_client) 1272 rpc_count_iostats(task, task->tk_client->cl_metrics); 1273 spin_lock_bh(&xprt->transport_lock); 1274 xprt->ops->release_xprt(xprt, task); 1275 if (xprt->ops->release_request) 1276 xprt->ops->release_request(task); 1277 if (!list_empty(&req->rq_list)) 1278 list_del(&req->rq_list); 1279 xprt->last_used = jiffies; 1280 if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) 1281 mod_timer(&xprt->timer, 1282 xprt->last_used + xprt->idle_timeout); 1283 spin_unlock_bh(&xprt->transport_lock); 1284 if (req->rq_buffer) 1285 xprt->ops->buf_free(req->rq_buffer); 1286 if (req->rq_cred != NULL) 1287 put_rpccred(req->rq_cred); 1288 task->tk_rqstp = NULL; 1289 if (req->rq_release_snd_buf) 1290 req->rq_release_snd_buf(req); 1291 1292 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1293 if (likely(!bc_prealloc(req))) 1294 xprt_free_slot(xprt, req); 1295 else 1296 xprt_free_bc_request(req); 1297 } 1298 1299 static void xprt_init(struct rpc_xprt *xprt, struct net *net) 1300 { 1301 atomic_set(&xprt->count, 1); 1302 1303 spin_lock_init(&xprt->transport_lock); 1304 spin_lock_init(&xprt->reserve_lock); 1305 1306 INIT_LIST_HEAD(&xprt->free); 1307 INIT_LIST_HEAD(&xprt->recv); 1308 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1309 spin_lock_init(&xprt->bc_pa_lock); 1310 INIT_LIST_HEAD(&xprt->bc_pa_list); 1311 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1312 1313 xprt->last_used = jiffies; 1314 xprt->cwnd = RPC_INITCWND; 1315 xprt->bind_index = 0; 1316 1317 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 1318 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 1319 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending"); 1320 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 1321 1322 xprt_init_xid(xprt); 1323 1324 xprt->xprt_net = get_net(net); 1325 } 1326 1327 /** 1328 * xprt_create_transport - create an RPC transport 1329 * @args: rpc transport creation arguments 1330 * 1331 */ 1332 struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 1333 { 1334 int err; 1335 struct rpc_xprt *xprt; 1336 struct xprt_class *t; 1337 1338 spin_lock(&xprt_list_lock); 1339 list_for_each_entry(t, &xprt_list, list) { 1340 if (t->ident == args->ident) { 1341 spin_unlock(&xprt_list_lock); 1342 goto found; 1343 } 1344 } 1345 spin_unlock(&xprt_list_lock); 1346 dprintk("RPC: transport (%d) not supported\n", args->ident); 1347 return ERR_PTR(-EIO); 1348 1349 found: 1350 xprt = t->setup(args); 1351 if (IS_ERR(xprt)) { 1352 dprintk("RPC: xprt_create_transport: failed, %ld\n", 1353 -PTR_ERR(xprt)); 1354 goto out; 1355 } 1356 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) 1357 xprt->idle_timeout = 0; 1358 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1359 if (xprt_has_timer(xprt)) 1360 setup_timer(&xprt->timer, xprt_init_autodisconnect, 1361 (unsigned long)xprt); 1362 else 1363 init_timer(&xprt->timer); 1364 1365 if (strlen(args->servername) > RPC_MAXNETNAMELEN) { 1366 xprt_destroy(xprt); 1367 return ERR_PTR(-EINVAL); 1368 } 1369 xprt->servername = kstrdup(args->servername, GFP_KERNEL); 1370 if (xprt->servername == NULL) { 1371 xprt_destroy(xprt); 1372 return ERR_PTR(-ENOMEM); 1373 } 1374 1375 err = rpc_xprt_debugfs_register(xprt); 1376 if (err) { 1377 xprt_destroy(xprt); 1378 return ERR_PTR(err); 1379 } 1380 1381 dprintk("RPC: created transport %p with %u slots\n", xprt, 1382 xprt->max_reqs); 1383 out: 1384 return xprt; 1385 } 1386 1387 /** 1388 * xprt_destroy - destroy an RPC transport, killing off all requests. 1389 * @xprt: transport to destroy 1390 * 1391 */ 1392 static void xprt_destroy(struct rpc_xprt *xprt) 1393 { 1394 dprintk("RPC: destroying transport %p\n", xprt); 1395 del_timer_sync(&xprt->timer); 1396 1397 rpc_xprt_debugfs_unregister(xprt); 1398 rpc_destroy_wait_queue(&xprt->binding); 1399 rpc_destroy_wait_queue(&xprt->pending); 1400 rpc_destroy_wait_queue(&xprt->sending); 1401 rpc_destroy_wait_queue(&xprt->backlog); 1402 cancel_work_sync(&xprt->task_cleanup); 1403 kfree(xprt->servername); 1404 /* 1405 * Tear down transport state and free the rpc_xprt 1406 */ 1407 xprt->ops->destroy(xprt); 1408 } 1409 1410 /** 1411 * xprt_put - release a reference to an RPC transport. 1412 * @xprt: pointer to the transport 1413 * 1414 */ 1415 void xprt_put(struct rpc_xprt *xprt) 1416 { 1417 if (atomic_dec_and_test(&xprt->count)) 1418 xprt_destroy(xprt); 1419 } 1420