1 /* 2 * linux/net/sunrpc/xprt.c 3 * 4 * This is a generic RPC call interface supporting congestion avoidance, 5 * and asynchronous calls. 6 * 7 * The interface works like this: 8 * 9 * - When a process places a call, it allocates a request slot if 10 * one is available. Otherwise, it sleeps on the backlog queue 11 * (xprt_reserve). 12 * - Next, the caller puts together the RPC message, stuffs it into 13 * the request struct, and calls xprt_transmit(). 14 * - xprt_transmit sends the message and installs the caller on the 15 * transport's wait list. At the same time, it installs a timer that 16 * is run after the packet's timeout has expired. 17 * - When a packet arrives, the data_ready handler walks the list of 18 * pending requests for that transport. If a matching XID is found, the 19 * caller is woken up, and the timer removed. 20 * - When no reply arrives within the timeout interval, the timer is 21 * fired by the kernel and runs xprt_timer(). It either adjusts the 22 * timeout values (minor timeout) or wakes up the caller with a status 23 * of -ETIMEDOUT. 24 * - When the caller receives a notification from RPC that a reply arrived, 25 * it should release the RPC slot, and process the reply. 26 * If the call timed out, it may choose to retry the operation by 27 * adjusting the initial timeout value, and simply calling rpc_call 28 * again. 29 * 30 * Support for async RPC is done through a set of RPC-specific scheduling 31 * primitives that `transparently' work for processes as well as async 32 * tasks that rely on callbacks. 33 * 34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 35 * 36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> 37 */ 38 39 #include <linux/module.h> 40 41 #include <linux/types.h> 42 #include <linux/interrupt.h> 43 #include <linux/workqueue.h> 44 #include <linux/net.h> 45 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/sunrpc/metrics.h> 48 49 /* 50 * Local variables 51 */ 52 53 #ifdef RPC_DEBUG 54 # define RPCDBG_FACILITY RPCDBG_XPRT 55 #endif 56 57 /* 58 * Local functions 59 */ 60 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); 61 static inline void do_xprt_reserve(struct rpc_task *); 62 static void xprt_connect_status(struct rpc_task *task); 63 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 64 65 static DEFINE_SPINLOCK(xprt_list_lock); 66 static LIST_HEAD(xprt_list); 67 68 /* 69 * The transport code maintains an estimate on the maximum number of out- 70 * standing RPC requests, using a smoothed version of the congestion 71 * avoidance implemented in 44BSD. This is basically the Van Jacobson 72 * congestion algorithm: If a retransmit occurs, the congestion window is 73 * halved; otherwise, it is incremented by 1/cwnd when 74 * 75 * - a reply is received and 76 * - a full number of requests are outstanding and 77 * - the congestion window hasn't been updated recently. 78 */ 79 #define RPC_CWNDSHIFT (8U) 80 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) 81 #define RPC_INITCWND RPC_CWNDSCALE 82 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) 83 84 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) 85 86 /** 87 * xprt_register_transport - register a transport implementation 88 * @transport: transport to register 89 * 90 * If a transport implementation is loaded as a kernel module, it can 91 * call this interface to make itself known to the RPC client. 92 * 93 * Returns: 94 * 0: transport successfully registered 95 * -EEXIST: transport already registered 96 * -EINVAL: transport module being unloaded 97 */ 98 int xprt_register_transport(struct xprt_class *transport) 99 { 100 struct xprt_class *t; 101 int result; 102 103 result = -EEXIST; 104 spin_lock(&xprt_list_lock); 105 list_for_each_entry(t, &xprt_list, list) { 106 /* don't register the same transport class twice */ 107 if (t->ident == transport->ident) 108 goto out; 109 } 110 111 result = -EINVAL; 112 if (try_module_get(THIS_MODULE)) { 113 list_add_tail(&transport->list, &xprt_list); 114 printk(KERN_INFO "RPC: Registered %s transport module.\n", 115 transport->name); 116 result = 0; 117 } 118 119 out: 120 spin_unlock(&xprt_list_lock); 121 return result; 122 } 123 EXPORT_SYMBOL_GPL(xprt_register_transport); 124 125 /** 126 * xprt_unregister_transport - unregister a transport implementation 127 * @transport: transport to unregister 128 * 129 * Returns: 130 * 0: transport successfully unregistered 131 * -ENOENT: transport never registered 132 */ 133 int xprt_unregister_transport(struct xprt_class *transport) 134 { 135 struct xprt_class *t; 136 int result; 137 138 result = 0; 139 spin_lock(&xprt_list_lock); 140 list_for_each_entry(t, &xprt_list, list) { 141 if (t == transport) { 142 printk(KERN_INFO 143 "RPC: Unregistered %s transport module.\n", 144 transport->name); 145 list_del_init(&transport->list); 146 module_put(THIS_MODULE); 147 goto out; 148 } 149 } 150 result = -ENOENT; 151 152 out: 153 spin_unlock(&xprt_list_lock); 154 return result; 155 } 156 EXPORT_SYMBOL_GPL(xprt_unregister_transport); 157 158 /** 159 * xprt_reserve_xprt - serialize write access to transports 160 * @task: task that is requesting access to the transport 161 * 162 * This prevents mixing the payload of separate requests, and prevents 163 * transport connects from colliding with writes. No congestion control 164 * is provided. 165 */ 166 int xprt_reserve_xprt(struct rpc_task *task) 167 { 168 struct rpc_xprt *xprt = task->tk_xprt; 169 struct rpc_rqst *req = task->tk_rqstp; 170 171 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 172 if (task == xprt->snd_task) 173 return 1; 174 if (task == NULL) 175 return 0; 176 goto out_sleep; 177 } 178 xprt->snd_task = task; 179 if (req) { 180 req->rq_bytes_sent = 0; 181 req->rq_ntrans++; 182 } 183 return 1; 184 185 out_sleep: 186 dprintk("RPC: %5u failed to lock transport %p\n", 187 task->tk_pid, xprt); 188 task->tk_timeout = 0; 189 task->tk_status = -EAGAIN; 190 if (req && req->rq_ntrans) 191 rpc_sleep_on(&xprt->resend, task, NULL); 192 else 193 rpc_sleep_on(&xprt->sending, task, NULL); 194 return 0; 195 } 196 EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 197 198 static void xprt_clear_locked(struct rpc_xprt *xprt) 199 { 200 xprt->snd_task = NULL; 201 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) { 202 smp_mb__before_clear_bit(); 203 clear_bit(XPRT_LOCKED, &xprt->state); 204 smp_mb__after_clear_bit(); 205 } else 206 queue_work(rpciod_workqueue, &xprt->task_cleanup); 207 } 208 209 /* 210 * xprt_reserve_xprt_cong - serialize write access to transports 211 * @task: task that is requesting access to the transport 212 * 213 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is 214 * integrated into the decision of whether a request is allowed to be 215 * woken up and given access to the transport. 216 */ 217 int xprt_reserve_xprt_cong(struct rpc_task *task) 218 { 219 struct rpc_xprt *xprt = task->tk_xprt; 220 struct rpc_rqst *req = task->tk_rqstp; 221 222 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 223 if (task == xprt->snd_task) 224 return 1; 225 goto out_sleep; 226 } 227 if (__xprt_get_cong(xprt, task)) { 228 xprt->snd_task = task; 229 if (req) { 230 req->rq_bytes_sent = 0; 231 req->rq_ntrans++; 232 } 233 return 1; 234 } 235 xprt_clear_locked(xprt); 236 out_sleep: 237 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); 238 task->tk_timeout = 0; 239 task->tk_status = -EAGAIN; 240 if (req && req->rq_ntrans) 241 rpc_sleep_on(&xprt->resend, task, NULL); 242 else 243 rpc_sleep_on(&xprt->sending, task, NULL); 244 return 0; 245 } 246 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 247 248 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 249 { 250 int retval; 251 252 spin_lock_bh(&xprt->transport_lock); 253 retval = xprt->ops->reserve_xprt(task); 254 spin_unlock_bh(&xprt->transport_lock); 255 return retval; 256 } 257 258 static void __xprt_lock_write_next(struct rpc_xprt *xprt) 259 { 260 struct rpc_task *task; 261 struct rpc_rqst *req; 262 263 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 264 return; 265 266 task = rpc_wake_up_next(&xprt->resend); 267 if (!task) { 268 task = rpc_wake_up_next(&xprt->sending); 269 if (!task) 270 goto out_unlock; 271 } 272 273 req = task->tk_rqstp; 274 xprt->snd_task = task; 275 if (req) { 276 req->rq_bytes_sent = 0; 277 req->rq_ntrans++; 278 } 279 return; 280 281 out_unlock: 282 xprt_clear_locked(xprt); 283 } 284 285 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 286 { 287 struct rpc_task *task; 288 289 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 290 return; 291 if (RPCXPRT_CONGESTED(xprt)) 292 goto out_unlock; 293 task = rpc_wake_up_next(&xprt->resend); 294 if (!task) { 295 task = rpc_wake_up_next(&xprt->sending); 296 if (!task) 297 goto out_unlock; 298 } 299 if (__xprt_get_cong(xprt, task)) { 300 struct rpc_rqst *req = task->tk_rqstp; 301 xprt->snd_task = task; 302 if (req) { 303 req->rq_bytes_sent = 0; 304 req->rq_ntrans++; 305 } 306 return; 307 } 308 out_unlock: 309 xprt_clear_locked(xprt); 310 } 311 312 /** 313 * xprt_release_xprt - allow other requests to use a transport 314 * @xprt: transport with other tasks potentially waiting 315 * @task: task that is releasing access to the transport 316 * 317 * Note that "task" can be NULL. No congestion control is provided. 318 */ 319 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 320 { 321 if (xprt->snd_task == task) { 322 xprt_clear_locked(xprt); 323 __xprt_lock_write_next(xprt); 324 } 325 } 326 EXPORT_SYMBOL_GPL(xprt_release_xprt); 327 328 /** 329 * xprt_release_xprt_cong - allow other requests to use a transport 330 * @xprt: transport with other tasks potentially waiting 331 * @task: task that is releasing access to the transport 332 * 333 * Note that "task" can be NULL. Another task is awoken to use the 334 * transport if the transport's congestion window allows it. 335 */ 336 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) 337 { 338 if (xprt->snd_task == task) { 339 xprt_clear_locked(xprt); 340 __xprt_lock_write_next_cong(xprt); 341 } 342 } 343 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); 344 345 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 346 { 347 spin_lock_bh(&xprt->transport_lock); 348 xprt->ops->release_xprt(xprt, task); 349 spin_unlock_bh(&xprt->transport_lock); 350 } 351 352 /* 353 * Van Jacobson congestion avoidance. Check if the congestion window 354 * overflowed. Put the task to sleep if this is the case. 355 */ 356 static int 357 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) 358 { 359 struct rpc_rqst *req = task->tk_rqstp; 360 361 if (req->rq_cong) 362 return 1; 363 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n", 364 task->tk_pid, xprt->cong, xprt->cwnd); 365 if (RPCXPRT_CONGESTED(xprt)) 366 return 0; 367 req->rq_cong = 1; 368 xprt->cong += RPC_CWNDSCALE; 369 return 1; 370 } 371 372 /* 373 * Adjust the congestion window, and wake up the next task 374 * that has been sleeping due to congestion 375 */ 376 static void 377 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) 378 { 379 if (!req->rq_cong) 380 return; 381 req->rq_cong = 0; 382 xprt->cong -= RPC_CWNDSCALE; 383 __xprt_lock_write_next_cong(xprt); 384 } 385 386 /** 387 * xprt_release_rqst_cong - housekeeping when request is complete 388 * @task: RPC request that recently completed 389 * 390 * Useful for transports that require congestion control. 391 */ 392 void xprt_release_rqst_cong(struct rpc_task *task) 393 { 394 __xprt_put_cong(task->tk_xprt, task->tk_rqstp); 395 } 396 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 397 398 /** 399 * xprt_adjust_cwnd - adjust transport congestion window 400 * @task: recently completed RPC request used to adjust window 401 * @result: result code of completed RPC request 402 * 403 * We use a time-smoothed congestion estimator to avoid heavy oscillation. 404 */ 405 void xprt_adjust_cwnd(struct rpc_task *task, int result) 406 { 407 struct rpc_rqst *req = task->tk_rqstp; 408 struct rpc_xprt *xprt = task->tk_xprt; 409 unsigned long cwnd = xprt->cwnd; 410 411 if (result >= 0 && cwnd <= xprt->cong) { 412 /* The (cwnd >> 1) term makes sure 413 * the result gets rounded properly. */ 414 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 415 if (cwnd > RPC_MAXCWND(xprt)) 416 cwnd = RPC_MAXCWND(xprt); 417 __xprt_lock_write_next_cong(xprt); 418 } else if (result == -ETIMEDOUT) { 419 cwnd >>= 1; 420 if (cwnd < RPC_CWNDSCALE) 421 cwnd = RPC_CWNDSCALE; 422 } 423 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", 424 xprt->cong, xprt->cwnd, cwnd); 425 xprt->cwnd = cwnd; 426 __xprt_put_cong(xprt, req); 427 } 428 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); 429 430 /** 431 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue 432 * @xprt: transport with waiting tasks 433 * @status: result code to plant in each task before waking it 434 * 435 */ 436 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) 437 { 438 if (status < 0) 439 rpc_wake_up_status(&xprt->pending, status); 440 else 441 rpc_wake_up(&xprt->pending); 442 } 443 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); 444 445 /** 446 * xprt_wait_for_buffer_space - wait for transport output buffer to clear 447 * @task: task to be put to sleep 448 * @action: function pointer to be executed after wait 449 */ 450 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action) 451 { 452 struct rpc_rqst *req = task->tk_rqstp; 453 struct rpc_xprt *xprt = req->rq_xprt; 454 455 task->tk_timeout = req->rq_timeout; 456 rpc_sleep_on(&xprt->pending, task, action); 457 } 458 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 459 460 /** 461 * xprt_write_space - wake the task waiting for transport output buffer space 462 * @xprt: transport with waiting tasks 463 * 464 * Can be called in a soft IRQ context, so xprt_write_space never sleeps. 465 */ 466 void xprt_write_space(struct rpc_xprt *xprt) 467 { 468 if (unlikely(xprt->shutdown)) 469 return; 470 471 spin_lock_bh(&xprt->transport_lock); 472 if (xprt->snd_task) { 473 dprintk("RPC: write space: waking waiting task on " 474 "xprt %p\n", xprt); 475 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task); 476 } 477 spin_unlock_bh(&xprt->transport_lock); 478 } 479 EXPORT_SYMBOL_GPL(xprt_write_space); 480 481 /** 482 * xprt_set_retrans_timeout_def - set a request's retransmit timeout 483 * @task: task whose timeout is to be set 484 * 485 * Set a request's retransmit timeout based on the transport's 486 * default timeout parameters. Used by transports that don't adjust 487 * the retransmit timeout based on round-trip time estimation. 488 */ 489 void xprt_set_retrans_timeout_def(struct rpc_task *task) 490 { 491 task->tk_timeout = task->tk_rqstp->rq_timeout; 492 } 493 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def); 494 495 /* 496 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 497 * @task: task whose timeout is to be set 498 * 499 * Set a request's retransmit timeout using the RTT estimator. 500 */ 501 void xprt_set_retrans_timeout_rtt(struct rpc_task *task) 502 { 503 int timer = task->tk_msg.rpc_proc->p_timer; 504 struct rpc_clnt *clnt = task->tk_client; 505 struct rpc_rtt *rtt = clnt->cl_rtt; 506 struct rpc_rqst *req = task->tk_rqstp; 507 unsigned long max_timeout = clnt->cl_timeout->to_maxval; 508 509 task->tk_timeout = rpc_calc_rto(rtt, timer); 510 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; 511 if (task->tk_timeout > max_timeout || task->tk_timeout == 0) 512 task->tk_timeout = max_timeout; 513 } 514 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt); 515 516 static void xprt_reset_majortimeo(struct rpc_rqst *req) 517 { 518 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 519 520 req->rq_majortimeo = req->rq_timeout; 521 if (to->to_exponential) 522 req->rq_majortimeo <<= to->to_retries; 523 else 524 req->rq_majortimeo += to->to_increment * to->to_retries; 525 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0) 526 req->rq_majortimeo = to->to_maxval; 527 req->rq_majortimeo += jiffies; 528 } 529 530 /** 531 * xprt_adjust_timeout - adjust timeout values for next retransmit 532 * @req: RPC request containing parameters to use for the adjustment 533 * 534 */ 535 int xprt_adjust_timeout(struct rpc_rqst *req) 536 { 537 struct rpc_xprt *xprt = req->rq_xprt; 538 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; 539 int status = 0; 540 541 if (time_before(jiffies, req->rq_majortimeo)) { 542 if (to->to_exponential) 543 req->rq_timeout <<= 1; 544 else 545 req->rq_timeout += to->to_increment; 546 if (to->to_maxval && req->rq_timeout >= to->to_maxval) 547 req->rq_timeout = to->to_maxval; 548 req->rq_retries++; 549 } else { 550 req->rq_timeout = to->to_initval; 551 req->rq_retries = 0; 552 xprt_reset_majortimeo(req); 553 /* Reset the RTT counters == "slow start" */ 554 spin_lock_bh(&xprt->transport_lock); 555 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 556 spin_unlock_bh(&xprt->transport_lock); 557 status = -ETIMEDOUT; 558 } 559 560 if (req->rq_timeout == 0) { 561 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); 562 req->rq_timeout = 5 * HZ; 563 } 564 return status; 565 } 566 567 static void xprt_autoclose(struct work_struct *work) 568 { 569 struct rpc_xprt *xprt = 570 container_of(work, struct rpc_xprt, task_cleanup); 571 572 xprt->ops->close(xprt); 573 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 574 xprt_release_write(xprt, NULL); 575 } 576 577 /** 578 * xprt_disconnect_done - mark a transport as disconnected 579 * @xprt: transport to flag for disconnect 580 * 581 */ 582 void xprt_disconnect_done(struct rpc_xprt *xprt) 583 { 584 dprintk("RPC: disconnected transport %p\n", xprt); 585 spin_lock_bh(&xprt->transport_lock); 586 xprt_clear_connected(xprt); 587 xprt_wake_pending_tasks(xprt, -ENOTCONN); 588 spin_unlock_bh(&xprt->transport_lock); 589 } 590 EXPORT_SYMBOL_GPL(xprt_disconnect_done); 591 592 /** 593 * xprt_force_disconnect - force a transport to disconnect 594 * @xprt: transport to disconnect 595 * 596 */ 597 void xprt_force_disconnect(struct rpc_xprt *xprt) 598 { 599 /* Don't race with the test_bit() in xprt_clear_locked() */ 600 spin_lock_bh(&xprt->transport_lock); 601 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 602 /* Try to schedule an autoclose RPC call */ 603 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 604 queue_work(rpciod_workqueue, &xprt->task_cleanup); 605 xprt_wake_pending_tasks(xprt, -ENOTCONN); 606 spin_unlock_bh(&xprt->transport_lock); 607 } 608 609 /** 610 * xprt_conditional_disconnect - force a transport to disconnect 611 * @xprt: transport to disconnect 612 * @cookie: 'connection cookie' 613 * 614 * This attempts to break the connection if and only if 'cookie' matches 615 * the current transport 'connection cookie'. It ensures that we don't 616 * try to break the connection more than once when we need to retransmit 617 * a batch of RPC requests. 618 * 619 */ 620 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) 621 { 622 /* Don't race with the test_bit() in xprt_clear_locked() */ 623 spin_lock_bh(&xprt->transport_lock); 624 if (cookie != xprt->connect_cookie) 625 goto out; 626 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt)) 627 goto out; 628 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 629 /* Try to schedule an autoclose RPC call */ 630 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 631 queue_work(rpciod_workqueue, &xprt->task_cleanup); 632 xprt_wake_pending_tasks(xprt, -ENOTCONN); 633 out: 634 spin_unlock_bh(&xprt->transport_lock); 635 } 636 637 static void 638 xprt_init_autodisconnect(unsigned long data) 639 { 640 struct rpc_xprt *xprt = (struct rpc_xprt *)data; 641 642 spin_lock(&xprt->transport_lock); 643 if (!list_empty(&xprt->recv) || xprt->shutdown) 644 goto out_abort; 645 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 646 goto out_abort; 647 spin_unlock(&xprt->transport_lock); 648 if (xprt_connecting(xprt)) 649 xprt_release_write(xprt, NULL); 650 else 651 queue_work(rpciod_workqueue, &xprt->task_cleanup); 652 return; 653 out_abort: 654 spin_unlock(&xprt->transport_lock); 655 } 656 657 /** 658 * xprt_connect - schedule a transport connect operation 659 * @task: RPC task that is requesting the connect 660 * 661 */ 662 void xprt_connect(struct rpc_task *task) 663 { 664 struct rpc_xprt *xprt = task->tk_xprt; 665 666 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, 667 xprt, (xprt_connected(xprt) ? "is" : "is not")); 668 669 if (!xprt_bound(xprt)) { 670 task->tk_status = -EIO; 671 return; 672 } 673 if (!xprt_lock_write(xprt, task)) 674 return; 675 if (xprt_connected(xprt)) 676 xprt_release_write(xprt, task); 677 else { 678 if (task->tk_rqstp) 679 task->tk_rqstp->rq_bytes_sent = 0; 680 681 task->tk_timeout = xprt->connect_timeout; 682 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); 683 xprt->stat.connect_start = jiffies; 684 xprt->ops->connect(task); 685 } 686 return; 687 } 688 689 static void xprt_connect_status(struct rpc_task *task) 690 { 691 struct rpc_xprt *xprt = task->tk_xprt; 692 693 if (task->tk_status == 0) { 694 xprt->stat.connect_count++; 695 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; 696 dprintk("RPC: %5u xprt_connect_status: connection established\n", 697 task->tk_pid); 698 return; 699 } 700 701 switch (task->tk_status) { 702 case -ENOTCONN: 703 dprintk("RPC: %5u xprt_connect_status: connection broken\n", 704 task->tk_pid); 705 break; 706 case -ETIMEDOUT: 707 dprintk("RPC: %5u xprt_connect_status: connect attempt timed " 708 "out\n", task->tk_pid); 709 break; 710 default: 711 dprintk("RPC: %5u xprt_connect_status: error %d connecting to " 712 "server %s\n", task->tk_pid, -task->tk_status, 713 task->tk_client->cl_server); 714 xprt_release_write(xprt, task); 715 task->tk_status = -EIO; 716 } 717 } 718 719 /** 720 * xprt_lookup_rqst - find an RPC request corresponding to an XID 721 * @xprt: transport on which the original request was transmitted 722 * @xid: RPC XID of incoming reply 723 * 724 */ 725 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) 726 { 727 struct list_head *pos; 728 729 list_for_each(pos, &xprt->recv) { 730 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list); 731 if (entry->rq_xid == xid) 732 return entry; 733 } 734 735 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", 736 ntohl(xid)); 737 xprt->stat.bad_xids++; 738 return NULL; 739 } 740 EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 741 742 /** 743 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply 744 * @task: RPC request that recently completed 745 * 746 */ 747 void xprt_update_rtt(struct rpc_task *task) 748 { 749 struct rpc_rqst *req = task->tk_rqstp; 750 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 751 unsigned timer = task->tk_msg.rpc_proc->p_timer; 752 753 if (timer) { 754 if (req->rq_ntrans == 1) 755 rpc_update_rtt(rtt, timer, 756 (long)jiffies - req->rq_xtime); 757 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 758 } 759 } 760 EXPORT_SYMBOL_GPL(xprt_update_rtt); 761 762 /** 763 * xprt_complete_rqst - called when reply processing is complete 764 * @task: RPC request that recently completed 765 * @copied: actual number of bytes received from the transport 766 * 767 * Caller holds transport lock. 768 */ 769 void xprt_complete_rqst(struct rpc_task *task, int copied) 770 { 771 struct rpc_rqst *req = task->tk_rqstp; 772 struct rpc_xprt *xprt = req->rq_xprt; 773 774 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", 775 task->tk_pid, ntohl(req->rq_xid), copied); 776 777 xprt->stat.recvs++; 778 task->tk_rtt = (long)jiffies - req->rq_xtime; 779 780 list_del_init(&req->rq_list); 781 req->rq_private_buf.len = copied; 782 /* Ensure all writes are done before we update req->rq_received */ 783 smp_wmb(); 784 req->rq_received = copied; 785 rpc_wake_up_queued_task(&xprt->pending, task); 786 } 787 EXPORT_SYMBOL_GPL(xprt_complete_rqst); 788 789 static void xprt_timer(struct rpc_task *task) 790 { 791 struct rpc_rqst *req = task->tk_rqstp; 792 struct rpc_xprt *xprt = req->rq_xprt; 793 794 if (task->tk_status != -ETIMEDOUT) 795 return; 796 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); 797 798 spin_lock_bh(&xprt->transport_lock); 799 if (!req->rq_received) { 800 if (xprt->ops->timer) 801 xprt->ops->timer(task); 802 } else 803 task->tk_status = 0; 804 spin_unlock_bh(&xprt->transport_lock); 805 } 806 807 /** 808 * xprt_prepare_transmit - reserve the transport before sending a request 809 * @task: RPC task about to send a request 810 * 811 */ 812 int xprt_prepare_transmit(struct rpc_task *task) 813 { 814 struct rpc_rqst *req = task->tk_rqstp; 815 struct rpc_xprt *xprt = req->rq_xprt; 816 int err = 0; 817 818 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); 819 820 spin_lock_bh(&xprt->transport_lock); 821 if (req->rq_received && !req->rq_bytes_sent) { 822 err = req->rq_received; 823 goto out_unlock; 824 } 825 if (!xprt->ops->reserve_xprt(task)) { 826 err = -EAGAIN; 827 goto out_unlock; 828 } 829 830 if (!xprt_connected(xprt)) { 831 err = -ENOTCONN; 832 goto out_unlock; 833 } 834 out_unlock: 835 spin_unlock_bh(&xprt->transport_lock); 836 return err; 837 } 838 839 void xprt_end_transmit(struct rpc_task *task) 840 { 841 xprt_release_write(task->tk_xprt, task); 842 } 843 844 /** 845 * xprt_transmit - send an RPC request on a transport 846 * @task: controlling RPC task 847 * 848 * We have to copy the iovec because sendmsg fiddles with its contents. 849 */ 850 void xprt_transmit(struct rpc_task *task) 851 { 852 struct rpc_rqst *req = task->tk_rqstp; 853 struct rpc_xprt *xprt = req->rq_xprt; 854 int status; 855 856 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 857 858 if (!req->rq_received) { 859 if (list_empty(&req->rq_list)) { 860 spin_lock_bh(&xprt->transport_lock); 861 /* Update the softirq receive buffer */ 862 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 863 sizeof(req->rq_private_buf)); 864 /* Add request to the receive list */ 865 list_add_tail(&req->rq_list, &xprt->recv); 866 spin_unlock_bh(&xprt->transport_lock); 867 xprt_reset_majortimeo(req); 868 /* Turn off autodisconnect */ 869 del_singleshot_timer_sync(&xprt->timer); 870 } 871 } else if (!req->rq_bytes_sent) 872 return; 873 874 req->rq_connect_cookie = xprt->connect_cookie; 875 req->rq_xtime = jiffies; 876 status = xprt->ops->send_request(task); 877 if (status == 0) { 878 dprintk("RPC: %5u xmit complete\n", task->tk_pid); 879 spin_lock_bh(&xprt->transport_lock); 880 881 xprt->ops->set_retrans_timeout(task); 882 883 xprt->stat.sends++; 884 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 885 xprt->stat.bklog_u += xprt->backlog.qlen; 886 887 /* Don't race with disconnect */ 888 if (!xprt_connected(xprt)) 889 task->tk_status = -ENOTCONN; 890 else if (!req->rq_received) 891 rpc_sleep_on(&xprt->pending, task, xprt_timer); 892 spin_unlock_bh(&xprt->transport_lock); 893 return; 894 } 895 896 /* Note: at this point, task->tk_sleeping has not yet been set, 897 * hence there is no danger of the waking up task being put on 898 * schedq, and being picked up by a parallel run of rpciod(). 899 */ 900 task->tk_status = status; 901 if (status == -ECONNREFUSED) 902 rpc_sleep_on(&xprt->sending, task, NULL); 903 } 904 905 static inline void do_xprt_reserve(struct rpc_task *task) 906 { 907 struct rpc_xprt *xprt = task->tk_xprt; 908 909 task->tk_status = 0; 910 if (task->tk_rqstp) 911 return; 912 if (!list_empty(&xprt->free)) { 913 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 914 list_del_init(&req->rq_list); 915 task->tk_rqstp = req; 916 xprt_request_init(task, xprt); 917 return; 918 } 919 dprintk("RPC: waiting for request slot\n"); 920 task->tk_status = -EAGAIN; 921 task->tk_timeout = 0; 922 rpc_sleep_on(&xprt->backlog, task, NULL); 923 } 924 925 /** 926 * xprt_reserve - allocate an RPC request slot 927 * @task: RPC task requesting a slot allocation 928 * 929 * If no more slots are available, place the task on the transport's 930 * backlog queue. 931 */ 932 void xprt_reserve(struct rpc_task *task) 933 { 934 struct rpc_xprt *xprt = task->tk_xprt; 935 936 task->tk_status = -EIO; 937 spin_lock(&xprt->reserve_lock); 938 do_xprt_reserve(task); 939 spin_unlock(&xprt->reserve_lock); 940 } 941 942 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 943 { 944 return xprt->xid++; 945 } 946 947 static inline void xprt_init_xid(struct rpc_xprt *xprt) 948 { 949 xprt->xid = net_random(); 950 } 951 952 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) 953 { 954 struct rpc_rqst *req = task->tk_rqstp; 955 956 req->rq_timeout = task->tk_client->cl_timeout->to_initval; 957 req->rq_task = task; 958 req->rq_xprt = xprt; 959 req->rq_buffer = NULL; 960 req->rq_xid = xprt_alloc_xid(xprt); 961 req->rq_release_snd_buf = NULL; 962 xprt_reset_majortimeo(req); 963 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, 964 req, ntohl(req->rq_xid)); 965 } 966 967 /** 968 * xprt_release - release an RPC request slot 969 * @task: task which is finished with the slot 970 * 971 */ 972 void xprt_release(struct rpc_task *task) 973 { 974 struct rpc_xprt *xprt = task->tk_xprt; 975 struct rpc_rqst *req; 976 977 if (!(req = task->tk_rqstp)) 978 return; 979 rpc_count_iostats(task); 980 spin_lock_bh(&xprt->transport_lock); 981 xprt->ops->release_xprt(xprt, task); 982 if (xprt->ops->release_request) 983 xprt->ops->release_request(task); 984 if (!list_empty(&req->rq_list)) 985 list_del(&req->rq_list); 986 xprt->last_used = jiffies; 987 if (list_empty(&xprt->recv)) 988 mod_timer(&xprt->timer, 989 xprt->last_used + xprt->idle_timeout); 990 spin_unlock_bh(&xprt->transport_lock); 991 xprt->ops->buf_free(req->rq_buffer); 992 task->tk_rqstp = NULL; 993 if (req->rq_release_snd_buf) 994 req->rq_release_snd_buf(req); 995 memset(req, 0, sizeof(*req)); /* mark unused */ 996 997 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 998 999 spin_lock(&xprt->reserve_lock); 1000 list_add(&req->rq_list, &xprt->free); 1001 rpc_wake_up_next(&xprt->backlog); 1002 spin_unlock(&xprt->reserve_lock); 1003 } 1004 1005 /** 1006 * xprt_create_transport - create an RPC transport 1007 * @args: rpc transport creation arguments 1008 * 1009 */ 1010 struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 1011 { 1012 struct rpc_xprt *xprt; 1013 struct rpc_rqst *req; 1014 struct xprt_class *t; 1015 1016 spin_lock(&xprt_list_lock); 1017 list_for_each_entry(t, &xprt_list, list) { 1018 if (t->ident == args->ident) { 1019 spin_unlock(&xprt_list_lock); 1020 goto found; 1021 } 1022 } 1023 spin_unlock(&xprt_list_lock); 1024 printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident); 1025 return ERR_PTR(-EIO); 1026 1027 found: 1028 xprt = t->setup(args); 1029 if (IS_ERR(xprt)) { 1030 dprintk("RPC: xprt_create_transport: failed, %ld\n", 1031 -PTR_ERR(xprt)); 1032 return xprt; 1033 } 1034 1035 kref_init(&xprt->kref); 1036 spin_lock_init(&xprt->transport_lock); 1037 spin_lock_init(&xprt->reserve_lock); 1038 1039 INIT_LIST_HEAD(&xprt->free); 1040 INIT_LIST_HEAD(&xprt->recv); 1041 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1042 setup_timer(&xprt->timer, xprt_init_autodisconnect, 1043 (unsigned long)xprt); 1044 xprt->last_used = jiffies; 1045 xprt->cwnd = RPC_INITCWND; 1046 xprt->bind_index = 0; 1047 1048 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); 1049 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 1050 rpc_init_wait_queue(&xprt->sending, "xprt_sending"); 1051 rpc_init_wait_queue(&xprt->resend, "xprt_resend"); 1052 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); 1053 1054 /* initialize free list */ 1055 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) 1056 list_add(&req->rq_list, &xprt->free); 1057 1058 xprt_init_xid(xprt); 1059 1060 dprintk("RPC: created transport %p with %u slots\n", xprt, 1061 xprt->max_reqs); 1062 1063 return xprt; 1064 } 1065 1066 /** 1067 * xprt_destroy - destroy an RPC transport, killing off all requests. 1068 * @kref: kref for the transport to destroy 1069 * 1070 */ 1071 static void xprt_destroy(struct kref *kref) 1072 { 1073 struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref); 1074 1075 dprintk("RPC: destroying transport %p\n", xprt); 1076 xprt->shutdown = 1; 1077 del_timer_sync(&xprt->timer); 1078 1079 rpc_destroy_wait_queue(&xprt->binding); 1080 rpc_destroy_wait_queue(&xprt->pending); 1081 rpc_destroy_wait_queue(&xprt->sending); 1082 rpc_destroy_wait_queue(&xprt->resend); 1083 rpc_destroy_wait_queue(&xprt->backlog); 1084 /* 1085 * Tear down transport state and free the rpc_xprt 1086 */ 1087 xprt->ops->destroy(xprt); 1088 } 1089 1090 /** 1091 * xprt_put - release a reference to an RPC transport. 1092 * @xprt: pointer to the transport 1093 * 1094 */ 1095 void xprt_put(struct rpc_xprt *xprt) 1096 { 1097 kref_put(&xprt->kref, xprt_destroy); 1098 } 1099 1100 /** 1101 * xprt_get - return a reference to an RPC transport. 1102 * @xprt: pointer to the transport 1103 * 1104 */ 1105 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) 1106 { 1107 kref_get(&xprt->kref); 1108 return xprt; 1109 } 1110