1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/clnt.c 4 * 5 * This file contains the high-level RPC interface. 6 * It is modeled as a finite state machine to support both synchronous 7 * and asynchronous requests. 8 * 9 * - RPC header generation and argument serialization. 10 * - Credential refresh. 11 * - TCP connect handling. 12 * - Retry of operation when it is suspected the operation failed because 13 * of uid squashing on the server, or when the credentials were stale 14 * and need to be refreshed, or when a packet was damaged in transit. 15 * This may be have to be moved to the VFS layer. 16 * 17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 19 */ 20 21 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/kallsyms.h> 25 #include <linux/mm.h> 26 #include <linux/namei.h> 27 #include <linux/mount.h> 28 #include <linux/slab.h> 29 #include <linux/rcupdate.h> 30 #include <linux/utsname.h> 31 #include <linux/workqueue.h> 32 #include <linux/in.h> 33 #include <linux/in6.h> 34 #include <linux/un.h> 35 36 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/addr.h> 38 #include <linux/sunrpc/rpc_pipe_fs.h> 39 #include <linux/sunrpc/metrics.h> 40 #include <linux/sunrpc/bc_xprt.h> 41 #include <trace/events/sunrpc.h> 42 43 #include "sunrpc.h" 44 #include "sysfs.h" 45 #include "netns.h" 46 47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 48 # define RPCDBG_FACILITY RPCDBG_CALL 49 #endif 50 51 /* 52 * All RPC clients are linked into this list 53 */ 54 55 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 56 57 58 static void call_start(struct rpc_task *task); 59 static void call_reserve(struct rpc_task *task); 60 static void call_reserveresult(struct rpc_task *task); 61 static void call_allocate(struct rpc_task *task); 62 static void call_encode(struct rpc_task *task); 63 static void call_decode(struct rpc_task *task); 64 static void call_bind(struct rpc_task *task); 65 static void call_bind_status(struct rpc_task *task); 66 static void call_transmit(struct rpc_task *task); 67 static void call_status(struct rpc_task *task); 68 static void call_transmit_status(struct rpc_task *task); 69 static void call_refresh(struct rpc_task *task); 70 static void call_refreshresult(struct rpc_task *task); 71 static void call_connect(struct rpc_task *task); 72 static void call_connect_status(struct rpc_task *task); 73 74 static int rpc_encode_header(struct rpc_task *task, 75 struct xdr_stream *xdr); 76 static int rpc_decode_header(struct rpc_task *task, 77 struct xdr_stream *xdr); 78 static int rpc_ping(struct rpc_clnt *clnt); 79 static int rpc_ping_noreply(struct rpc_clnt *clnt); 80 static void rpc_check_timeout(struct rpc_task *task); 81 82 static void rpc_register_client(struct rpc_clnt *clnt) 83 { 84 struct net *net = rpc_net_ns(clnt); 85 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 86 87 spin_lock(&sn->rpc_client_lock); 88 list_add(&clnt->cl_clients, &sn->all_clients); 89 spin_unlock(&sn->rpc_client_lock); 90 } 91 92 static void rpc_unregister_client(struct rpc_clnt *clnt) 93 { 94 struct net *net = rpc_net_ns(clnt); 95 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 96 97 spin_lock(&sn->rpc_client_lock); 98 list_del(&clnt->cl_clients); 99 spin_unlock(&sn->rpc_client_lock); 100 } 101 102 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 103 { 104 rpc_remove_client_dir(clnt); 105 } 106 107 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 108 { 109 struct net *net = rpc_net_ns(clnt); 110 struct super_block *pipefs_sb; 111 112 pipefs_sb = rpc_get_sb_net(net); 113 if (pipefs_sb) { 114 __rpc_clnt_remove_pipedir(clnt); 115 rpc_put_sb_net(net); 116 } 117 } 118 119 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, 120 struct rpc_clnt *clnt) 121 { 122 static uint32_t clntid; 123 const char *dir_name = clnt->cl_program->pipe_dir_name; 124 char name[15]; 125 struct dentry *dir, *dentry; 126 127 dir = rpc_d_lookup_sb(sb, dir_name); 128 if (dir == NULL) { 129 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); 130 return dir; 131 } 132 for (;;) { 133 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 134 name[sizeof(name) - 1] = '\0'; 135 dentry = rpc_create_client_dir(dir, name, clnt); 136 if (!IS_ERR(dentry)) 137 break; 138 if (dentry == ERR_PTR(-EEXIST)) 139 continue; 140 printk(KERN_INFO "RPC: Couldn't create pipefs entry" 141 " %s/%s, error %ld\n", 142 dir_name, name, PTR_ERR(dentry)); 143 break; 144 } 145 dput(dir); 146 return dentry; 147 } 148 149 static int 150 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt) 151 { 152 struct dentry *dentry; 153 154 if (clnt->cl_program->pipe_dir_name != NULL) { 155 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt); 156 if (IS_ERR(dentry)) 157 return PTR_ERR(dentry); 158 } 159 return 0; 160 } 161 162 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) 163 { 164 if (clnt->cl_program->pipe_dir_name == NULL) 165 return 1; 166 167 switch (event) { 168 case RPC_PIPEFS_MOUNT: 169 if (clnt->cl_pipedir_objects.pdh_dentry != NULL) 170 return 1; 171 if (refcount_read(&clnt->cl_count) == 0) 172 return 1; 173 break; 174 case RPC_PIPEFS_UMOUNT: 175 if (clnt->cl_pipedir_objects.pdh_dentry == NULL) 176 return 1; 177 break; 178 } 179 return 0; 180 } 181 182 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, 183 struct super_block *sb) 184 { 185 struct dentry *dentry; 186 187 switch (event) { 188 case RPC_PIPEFS_MOUNT: 189 dentry = rpc_setup_pipedir_sb(sb, clnt); 190 if (!dentry) 191 return -ENOENT; 192 if (IS_ERR(dentry)) 193 return PTR_ERR(dentry); 194 break; 195 case RPC_PIPEFS_UMOUNT: 196 __rpc_clnt_remove_pipedir(clnt); 197 break; 198 default: 199 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); 200 return -ENOTSUPP; 201 } 202 return 0; 203 } 204 205 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, 206 struct super_block *sb) 207 { 208 int error = 0; 209 210 for (;; clnt = clnt->cl_parent) { 211 if (!rpc_clnt_skip_event(clnt, event)) 212 error = __rpc_clnt_handle_event(clnt, event, sb); 213 if (error || clnt == clnt->cl_parent) 214 break; 215 } 216 return error; 217 } 218 219 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) 220 { 221 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 222 struct rpc_clnt *clnt; 223 224 spin_lock(&sn->rpc_client_lock); 225 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 226 if (rpc_clnt_skip_event(clnt, event)) 227 continue; 228 spin_unlock(&sn->rpc_client_lock); 229 return clnt; 230 } 231 spin_unlock(&sn->rpc_client_lock); 232 return NULL; 233 } 234 235 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, 236 void *ptr) 237 { 238 struct super_block *sb = ptr; 239 struct rpc_clnt *clnt; 240 int error = 0; 241 242 while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { 243 error = __rpc_pipefs_event(clnt, event, sb); 244 if (error) 245 break; 246 } 247 return error; 248 } 249 250 static struct notifier_block rpc_clients_block = { 251 .notifier_call = rpc_pipefs_event, 252 .priority = SUNRPC_PIPEFS_RPC_PRIO, 253 }; 254 255 int rpc_clients_notifier_register(void) 256 { 257 return rpc_pipefs_notifier_register(&rpc_clients_block); 258 } 259 260 void rpc_clients_notifier_unregister(void) 261 { 262 return rpc_pipefs_notifier_unregister(&rpc_clients_block); 263 } 264 265 static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, 266 struct rpc_xprt *xprt, 267 const struct rpc_timeout *timeout) 268 { 269 struct rpc_xprt *old; 270 271 spin_lock(&clnt->cl_lock); 272 old = rcu_dereference_protected(clnt->cl_xprt, 273 lockdep_is_held(&clnt->cl_lock)); 274 275 if (!xprt_bound(xprt)) 276 clnt->cl_autobind = 1; 277 278 clnt->cl_timeout = timeout; 279 rcu_assign_pointer(clnt->cl_xprt, xprt); 280 spin_unlock(&clnt->cl_lock); 281 282 return old; 283 } 284 285 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) 286 { 287 clnt->cl_nodelen = strlcpy(clnt->cl_nodename, 288 nodename, sizeof(clnt->cl_nodename)); 289 } 290 291 static int rpc_client_register(struct rpc_clnt *clnt, 292 rpc_authflavor_t pseudoflavor, 293 const char *client_name) 294 { 295 struct rpc_auth_create_args auth_args = { 296 .pseudoflavor = pseudoflavor, 297 .target_name = client_name, 298 }; 299 struct rpc_auth *auth; 300 struct net *net = rpc_net_ns(clnt); 301 struct super_block *pipefs_sb; 302 int err; 303 304 rpc_clnt_debugfs_register(clnt); 305 306 pipefs_sb = rpc_get_sb_net(net); 307 if (pipefs_sb) { 308 err = rpc_setup_pipedir(pipefs_sb, clnt); 309 if (err) 310 goto out; 311 } 312 313 rpc_register_client(clnt); 314 if (pipefs_sb) 315 rpc_put_sb_net(net); 316 317 auth = rpcauth_create(&auth_args, clnt); 318 if (IS_ERR(auth)) { 319 dprintk("RPC: Couldn't create auth handle (flavor %u)\n", 320 pseudoflavor); 321 err = PTR_ERR(auth); 322 goto err_auth; 323 } 324 return 0; 325 err_auth: 326 pipefs_sb = rpc_get_sb_net(net); 327 rpc_unregister_client(clnt); 328 __rpc_clnt_remove_pipedir(clnt); 329 out: 330 if (pipefs_sb) 331 rpc_put_sb_net(net); 332 rpc_sysfs_client_destroy(clnt); 333 rpc_clnt_debugfs_unregister(clnt); 334 return err; 335 } 336 337 static DEFINE_IDA(rpc_clids); 338 339 void rpc_cleanup_clids(void) 340 { 341 ida_destroy(&rpc_clids); 342 } 343 344 static int rpc_alloc_clid(struct rpc_clnt *clnt) 345 { 346 int clid; 347 348 clid = ida_alloc(&rpc_clids, GFP_KERNEL); 349 if (clid < 0) 350 return clid; 351 clnt->cl_clid = clid; 352 return 0; 353 } 354 355 static void rpc_free_clid(struct rpc_clnt *clnt) 356 { 357 ida_free(&rpc_clids, clnt->cl_clid); 358 } 359 360 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, 361 struct rpc_xprt_switch *xps, 362 struct rpc_xprt *xprt, 363 struct rpc_clnt *parent) 364 { 365 const struct rpc_program *program = args->program; 366 const struct rpc_version *version; 367 struct rpc_clnt *clnt = NULL; 368 const struct rpc_timeout *timeout; 369 const char *nodename = args->nodename; 370 int err; 371 372 err = rpciod_up(); 373 if (err) 374 goto out_no_rpciod; 375 376 err = -EINVAL; 377 if (args->version >= program->nrvers) 378 goto out_err; 379 version = program->version[args->version]; 380 if (version == NULL) 381 goto out_err; 382 383 err = -ENOMEM; 384 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 385 if (!clnt) 386 goto out_err; 387 clnt->cl_parent = parent ? : clnt; 388 clnt->cl_xprtsec = args->xprtsec; 389 390 err = rpc_alloc_clid(clnt); 391 if (err) 392 goto out_no_clid; 393 394 clnt->cl_cred = get_cred(args->cred); 395 clnt->cl_procinfo = version->procs; 396 clnt->cl_maxproc = version->nrprocs; 397 clnt->cl_prog = args->prognumber ? : program->number; 398 clnt->cl_vers = version->number; 399 clnt->cl_stats = program->stats; 400 clnt->cl_metrics = rpc_alloc_iostats(clnt); 401 rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); 402 err = -ENOMEM; 403 if (clnt->cl_metrics == NULL) 404 goto out_no_stats; 405 clnt->cl_program = program; 406 INIT_LIST_HEAD(&clnt->cl_tasks); 407 spin_lock_init(&clnt->cl_lock); 408 409 timeout = xprt->timeout; 410 if (args->timeout != NULL) { 411 memcpy(&clnt->cl_timeout_default, args->timeout, 412 sizeof(clnt->cl_timeout_default)); 413 timeout = &clnt->cl_timeout_default; 414 } 415 416 rpc_clnt_set_transport(clnt, xprt, timeout); 417 xprt->main = true; 418 xprt_iter_init(&clnt->cl_xpi, xps); 419 xprt_switch_put(xps); 420 421 clnt->cl_rtt = &clnt->cl_rtt_default; 422 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 423 424 refcount_set(&clnt->cl_count, 1); 425 426 if (nodename == NULL) 427 nodename = utsname()->nodename; 428 /* save the nodename */ 429 rpc_clnt_set_nodename(clnt, nodename); 430 431 rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt)); 432 err = rpc_client_register(clnt, args->authflavor, args->client_name); 433 if (err) 434 goto out_no_path; 435 if (parent) 436 refcount_inc(&parent->cl_count); 437 438 trace_rpc_clnt_new(clnt, xprt, args); 439 return clnt; 440 441 out_no_path: 442 rpc_free_iostats(clnt->cl_metrics); 443 out_no_stats: 444 put_cred(clnt->cl_cred); 445 rpc_free_clid(clnt); 446 out_no_clid: 447 kfree(clnt); 448 out_err: 449 rpciod_down(); 450 out_no_rpciod: 451 xprt_switch_put(xps); 452 xprt_put(xprt); 453 trace_rpc_clnt_new_err(program->name, args->servername, err); 454 return ERR_PTR(err); 455 } 456 457 static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, 458 struct rpc_xprt *xprt) 459 { 460 struct rpc_clnt *clnt = NULL; 461 struct rpc_xprt_switch *xps; 462 463 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { 464 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 465 xps = args->bc_xprt->xpt_bc_xps; 466 xprt_switch_get(xps); 467 } else { 468 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 469 if (xps == NULL) { 470 xprt_put(xprt); 471 return ERR_PTR(-ENOMEM); 472 } 473 if (xprt->bc_xprt) { 474 xprt_switch_get(xps); 475 xprt->bc_xprt->xpt_bc_xps = xps; 476 } 477 } 478 clnt = rpc_new_client(args, xps, xprt, NULL); 479 if (IS_ERR(clnt)) 480 return clnt; 481 482 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 483 int err = rpc_ping(clnt); 484 if (err != 0) { 485 rpc_shutdown_client(clnt); 486 return ERR_PTR(err); 487 } 488 } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) { 489 int err = rpc_ping_noreply(clnt); 490 if (err != 0) { 491 rpc_shutdown_client(clnt); 492 return ERR_PTR(err); 493 } 494 } 495 496 clnt->cl_softrtry = 1; 497 if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) { 498 clnt->cl_softrtry = 0; 499 if (args->flags & RPC_CLNT_CREATE_SOFTERR) 500 clnt->cl_softerr = 1; 501 } 502 503 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 504 clnt->cl_autobind = 1; 505 if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) 506 clnt->cl_noretranstimeo = 1; 507 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 508 clnt->cl_discrtry = 1; 509 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 510 clnt->cl_chatty = 1; 511 512 return clnt; 513 } 514 515 /** 516 * rpc_create - create an RPC client and transport with one call 517 * @args: rpc_clnt create argument structure 518 * 519 * Creates and initializes an RPC transport and an RPC client. 520 * 521 * It can ping the server in order to determine if it is up, and to see if 522 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 523 * this behavior so asynchronous tasks can also use rpc_create. 524 */ 525 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 526 { 527 struct rpc_xprt *xprt; 528 struct xprt_create xprtargs = { 529 .net = args->net, 530 .ident = args->protocol, 531 .srcaddr = args->saddress, 532 .dstaddr = args->address, 533 .addrlen = args->addrsize, 534 .servername = args->servername, 535 .bc_xprt = args->bc_xprt, 536 .xprtsec = args->xprtsec, 537 }; 538 char servername[48]; 539 struct rpc_clnt *clnt; 540 int i; 541 542 if (args->bc_xprt) { 543 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 544 xprt = args->bc_xprt->xpt_bc_xprt; 545 if (xprt) { 546 xprt_get(xprt); 547 return rpc_create_xprt(args, xprt); 548 } 549 } 550 551 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) 552 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; 553 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) 554 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; 555 /* 556 * If the caller chooses not to specify a hostname, whip 557 * up a string representation of the passed-in address. 558 */ 559 if (xprtargs.servername == NULL) { 560 struct sockaddr_un *sun = 561 (struct sockaddr_un *)args->address; 562 struct sockaddr_in *sin = 563 (struct sockaddr_in *)args->address; 564 struct sockaddr_in6 *sin6 = 565 (struct sockaddr_in6 *)args->address; 566 567 servername[0] = '\0'; 568 switch (args->address->sa_family) { 569 case AF_LOCAL: 570 if (sun->sun_path[0]) 571 snprintf(servername, sizeof(servername), "%s", 572 sun->sun_path); 573 else 574 snprintf(servername, sizeof(servername), "@%s", 575 sun->sun_path+1); 576 break; 577 case AF_INET: 578 snprintf(servername, sizeof(servername), "%pI4", 579 &sin->sin_addr.s_addr); 580 break; 581 case AF_INET6: 582 snprintf(servername, sizeof(servername), "%pI6", 583 &sin6->sin6_addr); 584 break; 585 default: 586 /* caller wants default server name, but 587 * address family isn't recognized. */ 588 return ERR_PTR(-EINVAL); 589 } 590 xprtargs.servername = servername; 591 } 592 593 xprt = xprt_create_transport(&xprtargs); 594 if (IS_ERR(xprt)) 595 return (struct rpc_clnt *)xprt; 596 597 /* 598 * By default, kernel RPC client connects from a reserved port. 599 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 600 * but it is always enabled for rpciod, which handles the connect 601 * operation. 602 */ 603 xprt->resvport = 1; 604 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 605 xprt->resvport = 0; 606 xprt->reuseport = 0; 607 if (args->flags & RPC_CLNT_CREATE_REUSEPORT) 608 xprt->reuseport = 1; 609 610 clnt = rpc_create_xprt(args, xprt); 611 if (IS_ERR(clnt) || args->nconnect <= 1) 612 return clnt; 613 614 for (i = 0; i < args->nconnect - 1; i++) { 615 if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) 616 break; 617 } 618 return clnt; 619 } 620 EXPORT_SYMBOL_GPL(rpc_create); 621 622 /* 623 * This function clones the RPC client structure. It allows us to share the 624 * same transport while varying parameters such as the authentication 625 * flavour. 626 */ 627 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, 628 struct rpc_clnt *clnt) 629 { 630 struct rpc_xprt_switch *xps; 631 struct rpc_xprt *xprt; 632 struct rpc_clnt *new; 633 int err; 634 635 err = -ENOMEM; 636 rcu_read_lock(); 637 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 638 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 639 rcu_read_unlock(); 640 if (xprt == NULL || xps == NULL) { 641 xprt_put(xprt); 642 xprt_switch_put(xps); 643 goto out_err; 644 } 645 args->servername = xprt->servername; 646 args->nodename = clnt->cl_nodename; 647 648 new = rpc_new_client(args, xps, xprt, clnt); 649 if (IS_ERR(new)) 650 return new; 651 652 /* Turn off autobind on clones */ 653 new->cl_autobind = 0; 654 new->cl_softrtry = clnt->cl_softrtry; 655 new->cl_softerr = clnt->cl_softerr; 656 new->cl_noretranstimeo = clnt->cl_noretranstimeo; 657 new->cl_discrtry = clnt->cl_discrtry; 658 new->cl_chatty = clnt->cl_chatty; 659 new->cl_principal = clnt->cl_principal; 660 new->cl_max_connect = clnt->cl_max_connect; 661 return new; 662 663 out_err: 664 trace_rpc_clnt_clone_err(clnt, err); 665 return ERR_PTR(err); 666 } 667 668 /** 669 * rpc_clone_client - Clone an RPC client structure 670 * 671 * @clnt: RPC client whose parameters are copied 672 * 673 * Returns a fresh RPC client or an ERR_PTR. 674 */ 675 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) 676 { 677 struct rpc_create_args args = { 678 .program = clnt->cl_program, 679 .prognumber = clnt->cl_prog, 680 .version = clnt->cl_vers, 681 .authflavor = clnt->cl_auth->au_flavor, 682 .cred = clnt->cl_cred, 683 }; 684 return __rpc_clone_client(&args, clnt); 685 } 686 EXPORT_SYMBOL_GPL(rpc_clone_client); 687 688 /** 689 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth 690 * 691 * @clnt: RPC client whose parameters are copied 692 * @flavor: security flavor for new client 693 * 694 * Returns a fresh RPC client or an ERR_PTR. 695 */ 696 struct rpc_clnt * 697 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) 698 { 699 struct rpc_create_args args = { 700 .program = clnt->cl_program, 701 .prognumber = clnt->cl_prog, 702 .version = clnt->cl_vers, 703 .authflavor = flavor, 704 .cred = clnt->cl_cred, 705 }; 706 return __rpc_clone_client(&args, clnt); 707 } 708 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); 709 710 /** 711 * rpc_switch_client_transport: switch the RPC transport on the fly 712 * @clnt: pointer to a struct rpc_clnt 713 * @args: pointer to the new transport arguments 714 * @timeout: pointer to the new timeout parameters 715 * 716 * This function allows the caller to switch the RPC transport for the 717 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS 718 * server, for instance. It assumes that the caller has ensured that 719 * there are no active RPC tasks by using some form of locking. 720 * 721 * Returns zero if "clnt" is now using the new xprt. Otherwise a 722 * negative errno is returned, and "clnt" continues to use the old 723 * xprt. 724 */ 725 int rpc_switch_client_transport(struct rpc_clnt *clnt, 726 struct xprt_create *args, 727 const struct rpc_timeout *timeout) 728 { 729 const struct rpc_timeout *old_timeo; 730 rpc_authflavor_t pseudoflavor; 731 struct rpc_xprt_switch *xps, *oldxps; 732 struct rpc_xprt *xprt, *old; 733 struct rpc_clnt *parent; 734 int err; 735 736 args->xprtsec = clnt->cl_xprtsec; 737 xprt = xprt_create_transport(args); 738 if (IS_ERR(xprt)) 739 return PTR_ERR(xprt); 740 741 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 742 if (xps == NULL) { 743 xprt_put(xprt); 744 return -ENOMEM; 745 } 746 747 pseudoflavor = clnt->cl_auth->au_flavor; 748 749 old_timeo = clnt->cl_timeout; 750 old = rpc_clnt_set_transport(clnt, xprt, timeout); 751 oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps); 752 753 rpc_unregister_client(clnt); 754 __rpc_clnt_remove_pipedir(clnt); 755 rpc_sysfs_client_destroy(clnt); 756 rpc_clnt_debugfs_unregister(clnt); 757 758 /* 759 * A new transport was created. "clnt" therefore 760 * becomes the root of a new cl_parent tree. clnt's 761 * children, if it has any, still point to the old xprt. 762 */ 763 parent = clnt->cl_parent; 764 clnt->cl_parent = clnt; 765 766 /* 767 * The old rpc_auth cache cannot be re-used. GSS 768 * contexts in particular are between a single 769 * client and server. 770 */ 771 err = rpc_client_register(clnt, pseudoflavor, NULL); 772 if (err) 773 goto out_revert; 774 775 synchronize_rcu(); 776 if (parent != clnt) 777 rpc_release_client(parent); 778 xprt_switch_put(oldxps); 779 xprt_put(old); 780 trace_rpc_clnt_replace_xprt(clnt); 781 return 0; 782 783 out_revert: 784 xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps); 785 rpc_clnt_set_transport(clnt, old, old_timeo); 786 clnt->cl_parent = parent; 787 rpc_client_register(clnt, pseudoflavor, NULL); 788 xprt_switch_put(xps); 789 xprt_put(xprt); 790 trace_rpc_clnt_replace_xprt_err(clnt); 791 return err; 792 } 793 EXPORT_SYMBOL_GPL(rpc_switch_client_transport); 794 795 static 796 int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi, 797 void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps)) 798 { 799 struct rpc_xprt_switch *xps; 800 801 rcu_read_lock(); 802 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 803 rcu_read_unlock(); 804 if (xps == NULL) 805 return -EAGAIN; 806 func(xpi, xps); 807 xprt_switch_put(xps); 808 return 0; 809 } 810 811 static 812 int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) 813 { 814 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall); 815 } 816 817 static 818 int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt, 819 struct rpc_xprt_iter *xpi) 820 { 821 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline); 822 } 823 824 /** 825 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports 826 * @clnt: pointer to client 827 * @fn: function to apply 828 * @data: void pointer to function data 829 * 830 * Iterates through the list of RPC transports currently attached to the 831 * client and applies the function fn(clnt, xprt, data). 832 * 833 * On error, the iteration stops, and the function returns the error value. 834 */ 835 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, 836 int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), 837 void *data) 838 { 839 struct rpc_xprt_iter xpi; 840 int ret; 841 842 ret = rpc_clnt_xprt_iter_init(clnt, &xpi); 843 if (ret) 844 return ret; 845 for (;;) { 846 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 847 848 if (!xprt) 849 break; 850 ret = fn(clnt, xprt, data); 851 xprt_put(xprt); 852 if (ret < 0) 853 break; 854 } 855 xprt_iter_destroy(&xpi); 856 return ret; 857 } 858 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt); 859 860 /* 861 * Kill all tasks for the given client. 862 * XXX: kill their descendants as well? 863 */ 864 void rpc_killall_tasks(struct rpc_clnt *clnt) 865 { 866 struct rpc_task *rovr; 867 868 869 if (list_empty(&clnt->cl_tasks)) 870 return; 871 872 /* 873 * Spin lock all_tasks to prevent changes... 874 */ 875 trace_rpc_clnt_killall(clnt); 876 spin_lock(&clnt->cl_lock); 877 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) 878 rpc_signal_task(rovr); 879 spin_unlock(&clnt->cl_lock); 880 } 881 EXPORT_SYMBOL_GPL(rpc_killall_tasks); 882 883 /** 884 * rpc_cancel_tasks - try to cancel a set of RPC tasks 885 * @clnt: Pointer to RPC client 886 * @error: RPC task error value to set 887 * @fnmatch: Pointer to selector function 888 * @data: User data 889 * 890 * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. 891 * The argument @error must be a negative error value. 892 */ 893 unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, 894 bool (*fnmatch)(const struct rpc_task *, 895 const void *), 896 const void *data) 897 { 898 struct rpc_task *task; 899 unsigned long count = 0; 900 901 if (list_empty(&clnt->cl_tasks)) 902 return 0; 903 /* 904 * Spin lock all_tasks to prevent changes... 905 */ 906 spin_lock(&clnt->cl_lock); 907 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 908 if (!RPC_IS_ACTIVATED(task)) 909 continue; 910 if (!fnmatch(task, data)) 911 continue; 912 rpc_task_try_cancel(task, error); 913 count++; 914 } 915 spin_unlock(&clnt->cl_lock); 916 return count; 917 } 918 EXPORT_SYMBOL_GPL(rpc_cancel_tasks); 919 920 static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, 921 struct rpc_xprt *xprt, void *dummy) 922 { 923 if (xprt_connected(xprt)) 924 xprt_force_disconnect(xprt); 925 return 0; 926 } 927 928 void rpc_clnt_disconnect(struct rpc_clnt *clnt) 929 { 930 rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); 931 } 932 EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); 933 934 /* 935 * Properly shut down an RPC client, terminating all outstanding 936 * requests. 937 */ 938 void rpc_shutdown_client(struct rpc_clnt *clnt) 939 { 940 might_sleep(); 941 942 trace_rpc_clnt_shutdown(clnt); 943 944 while (!list_empty(&clnt->cl_tasks)) { 945 rpc_killall_tasks(clnt); 946 wait_event_timeout(destroy_wait, 947 list_empty(&clnt->cl_tasks), 1*HZ); 948 } 949 950 rpc_release_client(clnt); 951 } 952 EXPORT_SYMBOL_GPL(rpc_shutdown_client); 953 954 /* 955 * Free an RPC client 956 */ 957 static void rpc_free_client_work(struct work_struct *work) 958 { 959 struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); 960 961 trace_rpc_clnt_free(clnt); 962 963 /* These might block on processes that might allocate memory, 964 * so they cannot be called in rpciod, so they are handled separately 965 * here. 966 */ 967 rpc_sysfs_client_destroy(clnt); 968 rpc_clnt_debugfs_unregister(clnt); 969 rpc_free_clid(clnt); 970 rpc_clnt_remove_pipedir(clnt); 971 xprt_put(rcu_dereference_raw(clnt->cl_xprt)); 972 973 kfree(clnt); 974 rpciod_down(); 975 } 976 static struct rpc_clnt * 977 rpc_free_client(struct rpc_clnt *clnt) 978 { 979 struct rpc_clnt *parent = NULL; 980 981 trace_rpc_clnt_release(clnt); 982 if (clnt->cl_parent != clnt) 983 parent = clnt->cl_parent; 984 rpc_unregister_client(clnt); 985 rpc_free_iostats(clnt->cl_metrics); 986 clnt->cl_metrics = NULL; 987 xprt_iter_destroy(&clnt->cl_xpi); 988 put_cred(clnt->cl_cred); 989 990 INIT_WORK(&clnt->cl_work, rpc_free_client_work); 991 schedule_work(&clnt->cl_work); 992 return parent; 993 } 994 995 /* 996 * Free an RPC client 997 */ 998 static struct rpc_clnt * 999 rpc_free_auth(struct rpc_clnt *clnt) 1000 { 1001 /* 1002 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 1003 * release remaining GSS contexts. This mechanism ensures 1004 * that it can do so safely. 1005 */ 1006 if (clnt->cl_auth != NULL) { 1007 rpcauth_release(clnt->cl_auth); 1008 clnt->cl_auth = NULL; 1009 } 1010 if (refcount_dec_and_test(&clnt->cl_count)) 1011 return rpc_free_client(clnt); 1012 return NULL; 1013 } 1014 1015 /* 1016 * Release reference to the RPC client 1017 */ 1018 void 1019 rpc_release_client(struct rpc_clnt *clnt) 1020 { 1021 do { 1022 if (list_empty(&clnt->cl_tasks)) 1023 wake_up(&destroy_wait); 1024 if (refcount_dec_not_one(&clnt->cl_count)) 1025 break; 1026 clnt = rpc_free_auth(clnt); 1027 } while (clnt != NULL); 1028 } 1029 EXPORT_SYMBOL_GPL(rpc_release_client); 1030 1031 /** 1032 * rpc_bind_new_program - bind a new RPC program to an existing client 1033 * @old: old rpc_client 1034 * @program: rpc program to set 1035 * @vers: rpc program version 1036 * 1037 * Clones the rpc client and sets up a new RPC program. This is mainly 1038 * of use for enabling different RPC programs to share the same transport. 1039 * The Sun NFSv2/v3 ACL protocol can do this. 1040 */ 1041 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 1042 const struct rpc_program *program, 1043 u32 vers) 1044 { 1045 struct rpc_create_args args = { 1046 .program = program, 1047 .prognumber = program->number, 1048 .version = vers, 1049 .authflavor = old->cl_auth->au_flavor, 1050 .cred = old->cl_cred, 1051 }; 1052 struct rpc_clnt *clnt; 1053 int err; 1054 1055 clnt = __rpc_clone_client(&args, old); 1056 if (IS_ERR(clnt)) 1057 goto out; 1058 err = rpc_ping(clnt); 1059 if (err != 0) { 1060 rpc_shutdown_client(clnt); 1061 clnt = ERR_PTR(err); 1062 } 1063 out: 1064 return clnt; 1065 } 1066 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 1067 1068 struct rpc_xprt * 1069 rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1070 { 1071 struct rpc_xprt_switch *xps; 1072 1073 if (!xprt) 1074 return NULL; 1075 rcu_read_lock(); 1076 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1077 atomic_long_inc(&xps->xps_queuelen); 1078 rcu_read_unlock(); 1079 atomic_long_inc(&xprt->queuelen); 1080 1081 return xprt; 1082 } 1083 1084 static void 1085 rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1086 { 1087 struct rpc_xprt_switch *xps; 1088 1089 atomic_long_dec(&xprt->queuelen); 1090 rcu_read_lock(); 1091 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1092 atomic_long_dec(&xps->xps_queuelen); 1093 rcu_read_unlock(); 1094 1095 xprt_put(xprt); 1096 } 1097 1098 void rpc_task_release_transport(struct rpc_task *task) 1099 { 1100 struct rpc_xprt *xprt = task->tk_xprt; 1101 1102 if (xprt) { 1103 task->tk_xprt = NULL; 1104 if (task->tk_client) 1105 rpc_task_release_xprt(task->tk_client, xprt); 1106 else 1107 xprt_put(xprt); 1108 } 1109 } 1110 EXPORT_SYMBOL_GPL(rpc_task_release_transport); 1111 1112 void rpc_task_release_client(struct rpc_task *task) 1113 { 1114 struct rpc_clnt *clnt = task->tk_client; 1115 1116 rpc_task_release_transport(task); 1117 if (clnt != NULL) { 1118 /* Remove from client task list */ 1119 spin_lock(&clnt->cl_lock); 1120 list_del(&task->tk_task); 1121 spin_unlock(&clnt->cl_lock); 1122 task->tk_client = NULL; 1123 1124 rpc_release_client(clnt); 1125 } 1126 } 1127 1128 static struct rpc_xprt * 1129 rpc_task_get_first_xprt(struct rpc_clnt *clnt) 1130 { 1131 struct rpc_xprt *xprt; 1132 1133 rcu_read_lock(); 1134 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 1135 rcu_read_unlock(); 1136 return rpc_task_get_xprt(clnt, xprt); 1137 } 1138 1139 static struct rpc_xprt * 1140 rpc_task_get_next_xprt(struct rpc_clnt *clnt) 1141 { 1142 return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi)); 1143 } 1144 1145 static 1146 void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) 1147 { 1148 if (task->tk_xprt) { 1149 if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && 1150 (task->tk_flags & RPC_TASK_MOVEABLE))) 1151 return; 1152 xprt_release(task); 1153 xprt_put(task->tk_xprt); 1154 } 1155 if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) 1156 task->tk_xprt = rpc_task_get_first_xprt(clnt); 1157 else 1158 task->tk_xprt = rpc_task_get_next_xprt(clnt); 1159 } 1160 1161 static 1162 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) 1163 { 1164 rpc_task_set_transport(task, clnt); 1165 task->tk_client = clnt; 1166 refcount_inc(&clnt->cl_count); 1167 if (clnt->cl_softrtry) 1168 task->tk_flags |= RPC_TASK_SOFT; 1169 if (clnt->cl_softerr) 1170 task->tk_flags |= RPC_TASK_TIMEOUT; 1171 if (clnt->cl_noretranstimeo) 1172 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; 1173 /* Add to the client's list of all tasks */ 1174 spin_lock(&clnt->cl_lock); 1175 list_add_tail(&task->tk_task, &clnt->cl_tasks); 1176 spin_unlock(&clnt->cl_lock); 1177 } 1178 1179 static void 1180 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) 1181 { 1182 if (msg != NULL) { 1183 task->tk_msg.rpc_proc = msg->rpc_proc; 1184 task->tk_msg.rpc_argp = msg->rpc_argp; 1185 task->tk_msg.rpc_resp = msg->rpc_resp; 1186 task->tk_msg.rpc_cred = msg->rpc_cred; 1187 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1188 get_cred(task->tk_msg.rpc_cred); 1189 } 1190 } 1191 1192 /* 1193 * Default callback for async RPC calls 1194 */ 1195 static void 1196 rpc_default_callback(struct rpc_task *task, void *data) 1197 { 1198 } 1199 1200 static const struct rpc_call_ops rpc_default_ops = { 1201 .rpc_call_done = rpc_default_callback, 1202 }; 1203 1204 /** 1205 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 1206 * @task_setup_data: pointer to task initialisation data 1207 */ 1208 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 1209 { 1210 struct rpc_task *task; 1211 1212 task = rpc_new_task(task_setup_data); 1213 if (IS_ERR(task)) 1214 return task; 1215 1216 if (!RPC_IS_ASYNC(task)) 1217 task->tk_flags |= RPC_TASK_CRED_NOREF; 1218 1219 rpc_task_set_client(task, task_setup_data->rpc_client); 1220 rpc_task_set_rpc_message(task, task_setup_data->rpc_message); 1221 1222 if (task->tk_action == NULL) 1223 rpc_call_start(task); 1224 1225 atomic_inc(&task->tk_count); 1226 rpc_execute(task); 1227 return task; 1228 } 1229 EXPORT_SYMBOL_GPL(rpc_run_task); 1230 1231 /** 1232 * rpc_call_sync - Perform a synchronous RPC call 1233 * @clnt: pointer to RPC client 1234 * @msg: RPC call parameters 1235 * @flags: RPC call flags 1236 */ 1237 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) 1238 { 1239 struct rpc_task *task; 1240 struct rpc_task_setup task_setup_data = { 1241 .rpc_client = clnt, 1242 .rpc_message = msg, 1243 .callback_ops = &rpc_default_ops, 1244 .flags = flags, 1245 }; 1246 int status; 1247 1248 WARN_ON_ONCE(flags & RPC_TASK_ASYNC); 1249 if (flags & RPC_TASK_ASYNC) { 1250 rpc_release_calldata(task_setup_data.callback_ops, 1251 task_setup_data.callback_data); 1252 return -EINVAL; 1253 } 1254 1255 task = rpc_run_task(&task_setup_data); 1256 if (IS_ERR(task)) 1257 return PTR_ERR(task); 1258 status = task->tk_status; 1259 rpc_put_task(task); 1260 return status; 1261 } 1262 EXPORT_SYMBOL_GPL(rpc_call_sync); 1263 1264 /** 1265 * rpc_call_async - Perform an asynchronous RPC call 1266 * @clnt: pointer to RPC client 1267 * @msg: RPC call parameters 1268 * @flags: RPC call flags 1269 * @tk_ops: RPC call ops 1270 * @data: user call data 1271 */ 1272 int 1273 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, 1274 const struct rpc_call_ops *tk_ops, void *data) 1275 { 1276 struct rpc_task *task; 1277 struct rpc_task_setup task_setup_data = { 1278 .rpc_client = clnt, 1279 .rpc_message = msg, 1280 .callback_ops = tk_ops, 1281 .callback_data = data, 1282 .flags = flags|RPC_TASK_ASYNC, 1283 }; 1284 1285 task = rpc_run_task(&task_setup_data); 1286 if (IS_ERR(task)) 1287 return PTR_ERR(task); 1288 rpc_put_task(task); 1289 return 0; 1290 } 1291 EXPORT_SYMBOL_GPL(rpc_call_async); 1292 1293 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1294 static void call_bc_encode(struct rpc_task *task); 1295 1296 /** 1297 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 1298 * rpc_execute against it 1299 * @req: RPC request 1300 */ 1301 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req) 1302 { 1303 struct rpc_task *task; 1304 struct rpc_task_setup task_setup_data = { 1305 .callback_ops = &rpc_default_ops, 1306 .flags = RPC_TASK_SOFTCONN | 1307 RPC_TASK_NO_RETRANS_TIMEOUT, 1308 }; 1309 1310 dprintk("RPC: rpc_run_bc_task req= %p\n", req); 1311 /* 1312 * Create an rpc_task to send the data 1313 */ 1314 task = rpc_new_task(&task_setup_data); 1315 if (IS_ERR(task)) { 1316 xprt_free_bc_request(req); 1317 return task; 1318 } 1319 1320 xprt_init_bc_request(req, task); 1321 1322 task->tk_action = call_bc_encode; 1323 atomic_inc(&task->tk_count); 1324 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); 1325 rpc_execute(task); 1326 1327 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 1328 return task; 1329 } 1330 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1331 1332 /** 1333 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages 1334 * @req: RPC request to prepare 1335 * @pages: vector of struct page pointers 1336 * @base: offset in first page where receive should start, in bytes 1337 * @len: expected size of the upper layer data payload, in bytes 1338 * @hdrsize: expected size of upper layer reply header, in XDR words 1339 * 1340 */ 1341 void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, 1342 unsigned int base, unsigned int len, 1343 unsigned int hdrsize) 1344 { 1345 hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; 1346 1347 xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); 1348 trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf); 1349 } 1350 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages); 1351 1352 void 1353 rpc_call_start(struct rpc_task *task) 1354 { 1355 task->tk_action = call_start; 1356 } 1357 EXPORT_SYMBOL_GPL(rpc_call_start); 1358 1359 /** 1360 * rpc_peeraddr - extract remote peer address from clnt's xprt 1361 * @clnt: RPC client structure 1362 * @buf: target buffer 1363 * @bufsize: length of target buffer 1364 * 1365 * Returns the number of bytes that are actually in the stored address. 1366 */ 1367 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 1368 { 1369 size_t bytes; 1370 struct rpc_xprt *xprt; 1371 1372 rcu_read_lock(); 1373 xprt = rcu_dereference(clnt->cl_xprt); 1374 1375 bytes = xprt->addrlen; 1376 if (bytes > bufsize) 1377 bytes = bufsize; 1378 memcpy(buf, &xprt->addr, bytes); 1379 rcu_read_unlock(); 1380 1381 return bytes; 1382 } 1383 EXPORT_SYMBOL_GPL(rpc_peeraddr); 1384 1385 /** 1386 * rpc_peeraddr2str - return remote peer address in printable format 1387 * @clnt: RPC client structure 1388 * @format: address format 1389 * 1390 * NB: the lifetime of the memory referenced by the returned pointer is 1391 * the same as the rpc_xprt itself. As long as the caller uses this 1392 * pointer, it must hold the RCU read lock. 1393 */ 1394 const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 1395 enum rpc_display_format_t format) 1396 { 1397 struct rpc_xprt *xprt; 1398 1399 xprt = rcu_dereference(clnt->cl_xprt); 1400 1401 if (xprt->address_strings[format] != NULL) 1402 return xprt->address_strings[format]; 1403 else 1404 return "unprintable"; 1405 } 1406 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 1407 1408 static const struct sockaddr_in rpc_inaddr_loopback = { 1409 .sin_family = AF_INET, 1410 .sin_addr.s_addr = htonl(INADDR_ANY), 1411 }; 1412 1413 static const struct sockaddr_in6 rpc_in6addr_loopback = { 1414 .sin6_family = AF_INET6, 1415 .sin6_addr = IN6ADDR_ANY_INIT, 1416 }; 1417 1418 /* 1419 * Try a getsockname() on a connected datagram socket. Using a 1420 * connected datagram socket prevents leaving a socket in TIME_WAIT. 1421 * This conserves the ephemeral port number space. 1422 * 1423 * Returns zero and fills in "buf" if successful; otherwise, a 1424 * negative errno is returned. 1425 */ 1426 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, 1427 struct sockaddr *buf) 1428 { 1429 struct socket *sock; 1430 int err; 1431 1432 err = __sock_create(net, sap->sa_family, 1433 SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1434 if (err < 0) { 1435 dprintk("RPC: can't create UDP socket (%d)\n", err); 1436 goto out; 1437 } 1438 1439 switch (sap->sa_family) { 1440 case AF_INET: 1441 err = kernel_bind(sock, 1442 (struct sockaddr *)&rpc_inaddr_loopback, 1443 sizeof(rpc_inaddr_loopback)); 1444 break; 1445 case AF_INET6: 1446 err = kernel_bind(sock, 1447 (struct sockaddr *)&rpc_in6addr_loopback, 1448 sizeof(rpc_in6addr_loopback)); 1449 break; 1450 default: 1451 err = -EAFNOSUPPORT; 1452 goto out_release; 1453 } 1454 if (err < 0) { 1455 dprintk("RPC: can't bind UDP socket (%d)\n", err); 1456 goto out_release; 1457 } 1458 1459 err = kernel_connect(sock, sap, salen, 0); 1460 if (err < 0) { 1461 dprintk("RPC: can't connect UDP socket (%d)\n", err); 1462 goto out_release; 1463 } 1464 1465 err = kernel_getsockname(sock, buf); 1466 if (err < 0) { 1467 dprintk("RPC: getsockname failed (%d)\n", err); 1468 goto out_release; 1469 } 1470 1471 err = 0; 1472 if (buf->sa_family == AF_INET6) { 1473 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; 1474 sin6->sin6_scope_id = 0; 1475 } 1476 dprintk("RPC: %s succeeded\n", __func__); 1477 1478 out_release: 1479 sock_release(sock); 1480 out: 1481 return err; 1482 } 1483 1484 /* 1485 * Scraping a connected socket failed, so we don't have a useable 1486 * local address. Fallback: generate an address that will prevent 1487 * the server from calling us back. 1488 * 1489 * Returns zero and fills in "buf" if successful; otherwise, a 1490 * negative errno is returned. 1491 */ 1492 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) 1493 { 1494 switch (family) { 1495 case AF_INET: 1496 if (buflen < sizeof(rpc_inaddr_loopback)) 1497 return -EINVAL; 1498 memcpy(buf, &rpc_inaddr_loopback, 1499 sizeof(rpc_inaddr_loopback)); 1500 break; 1501 case AF_INET6: 1502 if (buflen < sizeof(rpc_in6addr_loopback)) 1503 return -EINVAL; 1504 memcpy(buf, &rpc_in6addr_loopback, 1505 sizeof(rpc_in6addr_loopback)); 1506 break; 1507 default: 1508 dprintk("RPC: %s: address family not supported\n", 1509 __func__); 1510 return -EAFNOSUPPORT; 1511 } 1512 dprintk("RPC: %s: succeeded\n", __func__); 1513 return 0; 1514 } 1515 1516 /** 1517 * rpc_localaddr - discover local endpoint address for an RPC client 1518 * @clnt: RPC client structure 1519 * @buf: target buffer 1520 * @buflen: size of target buffer, in bytes 1521 * 1522 * Returns zero and fills in "buf" and "buflen" if successful; 1523 * otherwise, a negative errno is returned. 1524 * 1525 * This works even if the underlying transport is not currently connected, 1526 * or if the upper layer never previously provided a source address. 1527 * 1528 * The result of this function call is transient: multiple calls in 1529 * succession may give different results, depending on how local 1530 * networking configuration changes over time. 1531 */ 1532 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) 1533 { 1534 struct sockaddr_storage address; 1535 struct sockaddr *sap = (struct sockaddr *)&address; 1536 struct rpc_xprt *xprt; 1537 struct net *net; 1538 size_t salen; 1539 int err; 1540 1541 rcu_read_lock(); 1542 xprt = rcu_dereference(clnt->cl_xprt); 1543 salen = xprt->addrlen; 1544 memcpy(sap, &xprt->addr, salen); 1545 net = get_net(xprt->xprt_net); 1546 rcu_read_unlock(); 1547 1548 rpc_set_port(sap, 0); 1549 err = rpc_sockname(net, sap, salen, buf); 1550 put_net(net); 1551 if (err != 0) 1552 /* Couldn't discover local address, return ANYADDR */ 1553 return rpc_anyaddr(sap->sa_family, buf, buflen); 1554 return 0; 1555 } 1556 EXPORT_SYMBOL_GPL(rpc_localaddr); 1557 1558 void 1559 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 1560 { 1561 struct rpc_xprt *xprt; 1562 1563 rcu_read_lock(); 1564 xprt = rcu_dereference(clnt->cl_xprt); 1565 if (xprt->ops->set_buffer_size) 1566 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 1567 rcu_read_unlock(); 1568 } 1569 EXPORT_SYMBOL_GPL(rpc_setbufsize); 1570 1571 /** 1572 * rpc_net_ns - Get the network namespace for this RPC client 1573 * @clnt: RPC client to query 1574 * 1575 */ 1576 struct net *rpc_net_ns(struct rpc_clnt *clnt) 1577 { 1578 struct net *ret; 1579 1580 rcu_read_lock(); 1581 ret = rcu_dereference(clnt->cl_xprt)->xprt_net; 1582 rcu_read_unlock(); 1583 return ret; 1584 } 1585 EXPORT_SYMBOL_GPL(rpc_net_ns); 1586 1587 /** 1588 * rpc_max_payload - Get maximum payload size for a transport, in bytes 1589 * @clnt: RPC client to query 1590 * 1591 * For stream transports, this is one RPC record fragment (see RFC 1592 * 1831), as we don't support multi-record requests yet. For datagram 1593 * transports, this is the size of an IP packet minus the IP, UDP, and 1594 * RPC header sizes. 1595 */ 1596 size_t rpc_max_payload(struct rpc_clnt *clnt) 1597 { 1598 size_t ret; 1599 1600 rcu_read_lock(); 1601 ret = rcu_dereference(clnt->cl_xprt)->max_payload; 1602 rcu_read_unlock(); 1603 return ret; 1604 } 1605 EXPORT_SYMBOL_GPL(rpc_max_payload); 1606 1607 /** 1608 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes 1609 * @clnt: RPC client to query 1610 */ 1611 size_t rpc_max_bc_payload(struct rpc_clnt *clnt) 1612 { 1613 struct rpc_xprt *xprt; 1614 size_t ret; 1615 1616 rcu_read_lock(); 1617 xprt = rcu_dereference(clnt->cl_xprt); 1618 ret = xprt->ops->bc_maxpayload(xprt); 1619 rcu_read_unlock(); 1620 return ret; 1621 } 1622 EXPORT_SYMBOL_GPL(rpc_max_bc_payload); 1623 1624 unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt) 1625 { 1626 struct rpc_xprt *xprt; 1627 unsigned int ret; 1628 1629 rcu_read_lock(); 1630 xprt = rcu_dereference(clnt->cl_xprt); 1631 ret = xprt->ops->bc_num_slots(xprt); 1632 rcu_read_unlock(); 1633 return ret; 1634 } 1635 EXPORT_SYMBOL_GPL(rpc_num_bc_slots); 1636 1637 /** 1638 * rpc_force_rebind - force transport to check that remote port is unchanged 1639 * @clnt: client to rebind 1640 * 1641 */ 1642 void rpc_force_rebind(struct rpc_clnt *clnt) 1643 { 1644 if (clnt->cl_autobind) { 1645 rcu_read_lock(); 1646 xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); 1647 rcu_read_unlock(); 1648 } 1649 } 1650 EXPORT_SYMBOL_GPL(rpc_force_rebind); 1651 1652 static int 1653 __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) 1654 { 1655 task->tk_status = 0; 1656 task->tk_rpc_status = 0; 1657 task->tk_action = action; 1658 return 1; 1659 } 1660 1661 /* 1662 * Restart an (async) RPC call. Usually called from within the 1663 * exit handler. 1664 */ 1665 int 1666 rpc_restart_call(struct rpc_task *task) 1667 { 1668 return __rpc_restart_call(task, call_start); 1669 } 1670 EXPORT_SYMBOL_GPL(rpc_restart_call); 1671 1672 /* 1673 * Restart an (async) RPC call from the call_prepare state. 1674 * Usually called from within the exit handler. 1675 */ 1676 int 1677 rpc_restart_call_prepare(struct rpc_task *task) 1678 { 1679 if (task->tk_ops->rpc_call_prepare != NULL) 1680 return __rpc_restart_call(task, rpc_prepare_task); 1681 return rpc_restart_call(task); 1682 } 1683 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 1684 1685 const char 1686 *rpc_proc_name(const struct rpc_task *task) 1687 { 1688 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1689 1690 if (proc) { 1691 if (proc->p_name) 1692 return proc->p_name; 1693 else 1694 return "NULL"; 1695 } else 1696 return "no proc"; 1697 } 1698 1699 static void 1700 __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) 1701 { 1702 trace_rpc_call_rpcerror(task, tk_status, rpc_status); 1703 rpc_task_set_rpc_status(task, rpc_status); 1704 rpc_exit(task, tk_status); 1705 } 1706 1707 static void 1708 rpc_call_rpcerror(struct rpc_task *task, int status) 1709 { 1710 __rpc_call_rpcerror(task, status, status); 1711 } 1712 1713 /* 1714 * 0. Initial state 1715 * 1716 * Other FSM states can be visited zero or more times, but 1717 * this state is visited exactly once for each RPC. 1718 */ 1719 static void 1720 call_start(struct rpc_task *task) 1721 { 1722 struct rpc_clnt *clnt = task->tk_client; 1723 int idx = task->tk_msg.rpc_proc->p_statidx; 1724 1725 trace_rpc_request(task); 1726 1727 if (task->tk_client->cl_shutdown) { 1728 rpc_call_rpcerror(task, -EIO); 1729 return; 1730 } 1731 1732 /* Increment call count (version might not be valid for ping) */ 1733 if (clnt->cl_program->version[clnt->cl_vers]) 1734 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; 1735 clnt->cl_stats->rpccnt++; 1736 task->tk_action = call_reserve; 1737 rpc_task_set_transport(task, clnt); 1738 } 1739 1740 /* 1741 * 1. Reserve an RPC call slot 1742 */ 1743 static void 1744 call_reserve(struct rpc_task *task) 1745 { 1746 task->tk_status = 0; 1747 task->tk_action = call_reserveresult; 1748 xprt_reserve(task); 1749 } 1750 1751 static void call_retry_reserve(struct rpc_task *task); 1752 1753 /* 1754 * 1b. Grok the result of xprt_reserve() 1755 */ 1756 static void 1757 call_reserveresult(struct rpc_task *task) 1758 { 1759 int status = task->tk_status; 1760 1761 /* 1762 * After a call to xprt_reserve(), we must have either 1763 * a request slot or else an error status. 1764 */ 1765 task->tk_status = 0; 1766 if (status >= 0) { 1767 if (task->tk_rqstp) { 1768 task->tk_action = call_refresh; 1769 return; 1770 } 1771 1772 rpc_call_rpcerror(task, -EIO); 1773 return; 1774 } 1775 1776 switch (status) { 1777 case -ENOMEM: 1778 rpc_delay(task, HZ >> 2); 1779 fallthrough; 1780 case -EAGAIN: /* woken up; retry */ 1781 task->tk_action = call_retry_reserve; 1782 return; 1783 default: 1784 rpc_call_rpcerror(task, status); 1785 } 1786 } 1787 1788 /* 1789 * 1c. Retry reserving an RPC call slot 1790 */ 1791 static void 1792 call_retry_reserve(struct rpc_task *task) 1793 { 1794 task->tk_status = 0; 1795 task->tk_action = call_reserveresult; 1796 xprt_retry_reserve(task); 1797 } 1798 1799 /* 1800 * 2. Bind and/or refresh the credentials 1801 */ 1802 static void 1803 call_refresh(struct rpc_task *task) 1804 { 1805 task->tk_action = call_refreshresult; 1806 task->tk_status = 0; 1807 task->tk_client->cl_stats->rpcauthrefresh++; 1808 rpcauth_refreshcred(task); 1809 } 1810 1811 /* 1812 * 2a. Process the results of a credential refresh 1813 */ 1814 static void 1815 call_refreshresult(struct rpc_task *task) 1816 { 1817 int status = task->tk_status; 1818 1819 task->tk_status = 0; 1820 task->tk_action = call_refresh; 1821 switch (status) { 1822 case 0: 1823 if (rpcauth_uptodatecred(task)) { 1824 task->tk_action = call_allocate; 1825 return; 1826 } 1827 /* Use rate-limiting and a max number of retries if refresh 1828 * had status 0 but failed to update the cred. 1829 */ 1830 fallthrough; 1831 case -ETIMEDOUT: 1832 rpc_delay(task, 3*HZ); 1833 fallthrough; 1834 case -EAGAIN: 1835 status = -EACCES; 1836 fallthrough; 1837 case -EKEYEXPIRED: 1838 if (!task->tk_cred_retry) 1839 break; 1840 task->tk_cred_retry--; 1841 trace_rpc_retry_refresh_status(task); 1842 return; 1843 case -ENOMEM: 1844 rpc_delay(task, HZ >> 4); 1845 return; 1846 } 1847 trace_rpc_refresh_status(task); 1848 rpc_call_rpcerror(task, status); 1849 } 1850 1851 /* 1852 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. 1853 * (Note: buffer memory is freed in xprt_release). 1854 */ 1855 static void 1856 call_allocate(struct rpc_task *task) 1857 { 1858 const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; 1859 struct rpc_rqst *req = task->tk_rqstp; 1860 struct rpc_xprt *xprt = req->rq_xprt; 1861 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1862 int status; 1863 1864 task->tk_status = 0; 1865 task->tk_action = call_encode; 1866 1867 if (req->rq_buffer) 1868 return; 1869 1870 if (proc->p_proc != 0) { 1871 BUG_ON(proc->p_arglen == 0); 1872 if (proc->p_decode != NULL) 1873 BUG_ON(proc->p_replen == 0); 1874 } 1875 1876 /* 1877 * Calculate the size (in quads) of the RPC call 1878 * and reply headers, and convert both values 1879 * to byte sizes. 1880 */ 1881 req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) + 1882 proc->p_arglen; 1883 req->rq_callsize <<= 2; 1884 /* 1885 * Note: the reply buffer must at minimum allocate enough space 1886 * for the 'struct accepted_reply' from RFC5531. 1887 */ 1888 req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \ 1889 max_t(size_t, proc->p_replen, 2); 1890 req->rq_rcvsize <<= 2; 1891 1892 status = xprt->ops->buf_alloc(task); 1893 trace_rpc_buf_alloc(task, status); 1894 if (status == 0) 1895 return; 1896 if (status != -ENOMEM) { 1897 rpc_call_rpcerror(task, status); 1898 return; 1899 } 1900 1901 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { 1902 task->tk_action = call_allocate; 1903 rpc_delay(task, HZ>>4); 1904 return; 1905 } 1906 1907 rpc_call_rpcerror(task, -ERESTARTSYS); 1908 } 1909 1910 static int 1911 rpc_task_need_encode(struct rpc_task *task) 1912 { 1913 return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && 1914 (!(task->tk_flags & RPC_TASK_SENT) || 1915 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || 1916 xprt_request_need_retransmit(task)); 1917 } 1918 1919 static void 1920 rpc_xdr_encode(struct rpc_task *task) 1921 { 1922 struct rpc_rqst *req = task->tk_rqstp; 1923 struct xdr_stream xdr; 1924 1925 xdr_buf_init(&req->rq_snd_buf, 1926 req->rq_buffer, 1927 req->rq_callsize); 1928 xdr_buf_init(&req->rq_rcv_buf, 1929 req->rq_rbuffer, 1930 req->rq_rcvsize); 1931 1932 req->rq_reply_bytes_recvd = 0; 1933 req->rq_snd_buf.head[0].iov_len = 0; 1934 xdr_init_encode(&xdr, &req->rq_snd_buf, 1935 req->rq_snd_buf.head[0].iov_base, req); 1936 if (rpc_encode_header(task, &xdr)) 1937 return; 1938 1939 task->tk_status = rpcauth_wrap_req(task, &xdr); 1940 } 1941 1942 /* 1943 * 3. Encode arguments of an RPC call 1944 */ 1945 static void 1946 call_encode(struct rpc_task *task) 1947 { 1948 if (!rpc_task_need_encode(task)) 1949 goto out; 1950 1951 /* Dequeue task from the receive queue while we're encoding */ 1952 xprt_request_dequeue_xprt(task); 1953 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1954 rpc_xdr_encode(task); 1955 /* Add task to reply queue before transmission to avoid races */ 1956 if (task->tk_status == 0 && rpc_reply_expected(task)) 1957 task->tk_status = xprt_request_enqueue_receive(task); 1958 /* Did the encode result in an error condition? */ 1959 if (task->tk_status != 0) { 1960 /* Was the error nonfatal? */ 1961 switch (task->tk_status) { 1962 case -EAGAIN: 1963 case -ENOMEM: 1964 rpc_delay(task, HZ >> 4); 1965 break; 1966 case -EKEYEXPIRED: 1967 if (!task->tk_cred_retry) { 1968 rpc_call_rpcerror(task, task->tk_status); 1969 } else { 1970 task->tk_action = call_refresh; 1971 task->tk_cred_retry--; 1972 trace_rpc_retry_refresh_status(task); 1973 } 1974 break; 1975 default: 1976 rpc_call_rpcerror(task, task->tk_status); 1977 } 1978 return; 1979 } 1980 1981 xprt_request_enqueue_transmit(task); 1982 out: 1983 task->tk_action = call_transmit; 1984 /* Check that the connection is OK */ 1985 if (!xprt_bound(task->tk_xprt)) 1986 task->tk_action = call_bind; 1987 else if (!xprt_connected(task->tk_xprt)) 1988 task->tk_action = call_connect; 1989 } 1990 1991 /* 1992 * Helpers to check if the task was already transmitted, and 1993 * to take action when that is the case. 1994 */ 1995 static bool 1996 rpc_task_transmitted(struct rpc_task *task) 1997 { 1998 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 1999 } 2000 2001 static void 2002 rpc_task_handle_transmitted(struct rpc_task *task) 2003 { 2004 xprt_end_transmit(task); 2005 task->tk_action = call_transmit_status; 2006 } 2007 2008 /* 2009 * 4. Get the server port number if not yet set 2010 */ 2011 static void 2012 call_bind(struct rpc_task *task) 2013 { 2014 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2015 2016 if (rpc_task_transmitted(task)) { 2017 rpc_task_handle_transmitted(task); 2018 return; 2019 } 2020 2021 if (xprt_bound(xprt)) { 2022 task->tk_action = call_connect; 2023 return; 2024 } 2025 2026 task->tk_action = call_bind_status; 2027 if (!xprt_prepare_transmit(task)) 2028 return; 2029 2030 xprt->ops->rpcbind(task); 2031 } 2032 2033 /* 2034 * 4a. Sort out bind result 2035 */ 2036 static void 2037 call_bind_status(struct rpc_task *task) 2038 { 2039 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2040 int status = -EIO; 2041 2042 if (rpc_task_transmitted(task)) { 2043 rpc_task_handle_transmitted(task); 2044 return; 2045 } 2046 2047 if (task->tk_status >= 0) 2048 goto out_next; 2049 if (xprt_bound(xprt)) { 2050 task->tk_status = 0; 2051 goto out_next; 2052 } 2053 2054 switch (task->tk_status) { 2055 case -ENOMEM: 2056 rpc_delay(task, HZ >> 2); 2057 goto retry_timeout; 2058 case -EACCES: 2059 trace_rpcb_prog_unavail_err(task); 2060 /* fail immediately if this is an RPC ping */ 2061 if (task->tk_msg.rpc_proc->p_proc == 0) { 2062 status = -EOPNOTSUPP; 2063 break; 2064 } 2065 rpc_delay(task, 3*HZ); 2066 goto retry_timeout; 2067 case -ENOBUFS: 2068 rpc_delay(task, HZ >> 2); 2069 goto retry_timeout; 2070 case -EAGAIN: 2071 goto retry_timeout; 2072 case -ETIMEDOUT: 2073 trace_rpcb_timeout_err(task); 2074 goto retry_timeout; 2075 case -EPFNOSUPPORT: 2076 /* server doesn't support any rpcbind version we know of */ 2077 trace_rpcb_bind_version_err(task); 2078 break; 2079 case -EPROTONOSUPPORT: 2080 trace_rpcb_bind_version_err(task); 2081 goto retry_timeout; 2082 case -ECONNREFUSED: /* connection problems */ 2083 case -ECONNRESET: 2084 case -ECONNABORTED: 2085 case -ENOTCONN: 2086 case -EHOSTDOWN: 2087 case -ENETDOWN: 2088 case -EHOSTUNREACH: 2089 case -ENETUNREACH: 2090 case -EPIPE: 2091 trace_rpcb_unreachable_err(task); 2092 if (!RPC_IS_SOFTCONN(task)) { 2093 rpc_delay(task, 5*HZ); 2094 goto retry_timeout; 2095 } 2096 status = task->tk_status; 2097 break; 2098 default: 2099 trace_rpcb_unrecognized_err(task); 2100 } 2101 2102 rpc_call_rpcerror(task, status); 2103 return; 2104 out_next: 2105 task->tk_action = call_connect; 2106 return; 2107 retry_timeout: 2108 task->tk_status = 0; 2109 task->tk_action = call_bind; 2110 rpc_check_timeout(task); 2111 } 2112 2113 /* 2114 * 4b. Connect to the RPC server 2115 */ 2116 static void 2117 call_connect(struct rpc_task *task) 2118 { 2119 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2120 2121 if (rpc_task_transmitted(task)) { 2122 rpc_task_handle_transmitted(task); 2123 return; 2124 } 2125 2126 if (xprt_connected(xprt)) { 2127 task->tk_action = call_transmit; 2128 return; 2129 } 2130 2131 task->tk_action = call_connect_status; 2132 if (task->tk_status < 0) 2133 return; 2134 if (task->tk_flags & RPC_TASK_NOCONNECT) { 2135 rpc_call_rpcerror(task, -ENOTCONN); 2136 return; 2137 } 2138 if (!xprt_prepare_transmit(task)) 2139 return; 2140 xprt_connect(task); 2141 } 2142 2143 /* 2144 * 4c. Sort out connect result 2145 */ 2146 static void 2147 call_connect_status(struct rpc_task *task) 2148 { 2149 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2150 struct rpc_clnt *clnt = task->tk_client; 2151 int status = task->tk_status; 2152 2153 if (rpc_task_transmitted(task)) { 2154 rpc_task_handle_transmitted(task); 2155 return; 2156 } 2157 2158 trace_rpc_connect_status(task); 2159 2160 if (task->tk_status == 0) { 2161 clnt->cl_stats->netreconn++; 2162 goto out_next; 2163 } 2164 if (xprt_connected(xprt)) { 2165 task->tk_status = 0; 2166 goto out_next; 2167 } 2168 2169 task->tk_status = 0; 2170 switch (status) { 2171 case -ECONNREFUSED: 2172 /* A positive refusal suggests a rebind is needed. */ 2173 if (RPC_IS_SOFTCONN(task)) 2174 break; 2175 if (clnt->cl_autobind) { 2176 rpc_force_rebind(clnt); 2177 goto out_retry; 2178 } 2179 fallthrough; 2180 case -ECONNRESET: 2181 case -ECONNABORTED: 2182 case -ENETDOWN: 2183 case -ENETUNREACH: 2184 case -EHOSTUNREACH: 2185 case -EPIPE: 2186 case -EPROTO: 2187 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2188 task->tk_rqstp->rq_connect_cookie); 2189 if (RPC_IS_SOFTCONN(task)) 2190 break; 2191 /* retry with existing socket, after a delay */ 2192 rpc_delay(task, 3*HZ); 2193 fallthrough; 2194 case -EADDRINUSE: 2195 case -ENOTCONN: 2196 case -EAGAIN: 2197 case -ETIMEDOUT: 2198 if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) && 2199 (task->tk_flags & RPC_TASK_MOVEABLE) && 2200 test_bit(XPRT_REMOVE, &xprt->state)) { 2201 struct rpc_xprt *saved = task->tk_xprt; 2202 struct rpc_xprt_switch *xps; 2203 2204 rcu_read_lock(); 2205 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 2206 rcu_read_unlock(); 2207 if (xps->xps_nxprts > 1) { 2208 long value; 2209 2210 xprt_release(task); 2211 value = atomic_long_dec_return(&xprt->queuelen); 2212 if (value == 0) 2213 rpc_xprt_switch_remove_xprt(xps, saved, 2214 true); 2215 xprt_put(saved); 2216 task->tk_xprt = NULL; 2217 task->tk_action = call_start; 2218 } 2219 xprt_switch_put(xps); 2220 if (!task->tk_xprt) 2221 return; 2222 } 2223 goto out_retry; 2224 case -ENOBUFS: 2225 rpc_delay(task, HZ >> 2); 2226 goto out_retry; 2227 } 2228 rpc_call_rpcerror(task, status); 2229 return; 2230 out_next: 2231 task->tk_action = call_transmit; 2232 return; 2233 out_retry: 2234 /* Check for timeouts before looping back to call_bind */ 2235 task->tk_action = call_bind; 2236 rpc_check_timeout(task); 2237 } 2238 2239 /* 2240 * 5. Transmit the RPC request, and wait for reply 2241 */ 2242 static void 2243 call_transmit(struct rpc_task *task) 2244 { 2245 if (rpc_task_transmitted(task)) { 2246 rpc_task_handle_transmitted(task); 2247 return; 2248 } 2249 2250 task->tk_action = call_transmit_status; 2251 if (!xprt_prepare_transmit(task)) 2252 return; 2253 task->tk_status = 0; 2254 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2255 if (!xprt_connected(task->tk_xprt)) { 2256 task->tk_status = -ENOTCONN; 2257 return; 2258 } 2259 xprt_transmit(task); 2260 } 2261 xprt_end_transmit(task); 2262 } 2263 2264 /* 2265 * 5a. Handle cleanup after a transmission 2266 */ 2267 static void 2268 call_transmit_status(struct rpc_task *task) 2269 { 2270 task->tk_action = call_status; 2271 2272 /* 2273 * Common case: success. Force the compiler to put this 2274 * test first. 2275 */ 2276 if (rpc_task_transmitted(task)) { 2277 task->tk_status = 0; 2278 xprt_request_wait_receive(task); 2279 return; 2280 } 2281 2282 switch (task->tk_status) { 2283 default: 2284 break; 2285 case -EBADMSG: 2286 task->tk_status = 0; 2287 task->tk_action = call_encode; 2288 break; 2289 /* 2290 * Special cases: if we've been waiting on the 2291 * socket's write_space() callback, or if the 2292 * socket just returned a connection error, 2293 * then hold onto the transport lock. 2294 */ 2295 case -ENOMEM: 2296 case -ENOBUFS: 2297 rpc_delay(task, HZ>>2); 2298 fallthrough; 2299 case -EBADSLT: 2300 case -EAGAIN: 2301 task->tk_action = call_transmit; 2302 task->tk_status = 0; 2303 break; 2304 case -ECONNREFUSED: 2305 case -EHOSTDOWN: 2306 case -ENETDOWN: 2307 case -EHOSTUNREACH: 2308 case -ENETUNREACH: 2309 case -EPERM: 2310 if (RPC_IS_SOFTCONN(task)) { 2311 if (!task->tk_msg.rpc_proc->p_proc) 2312 trace_xprt_ping(task->tk_xprt, 2313 task->tk_status); 2314 rpc_call_rpcerror(task, task->tk_status); 2315 return; 2316 } 2317 fallthrough; 2318 case -ECONNRESET: 2319 case -ECONNABORTED: 2320 case -EADDRINUSE: 2321 case -ENOTCONN: 2322 case -EPIPE: 2323 task->tk_action = call_bind; 2324 task->tk_status = 0; 2325 break; 2326 } 2327 rpc_check_timeout(task); 2328 } 2329 2330 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 2331 static void call_bc_transmit(struct rpc_task *task); 2332 static void call_bc_transmit_status(struct rpc_task *task); 2333 2334 static void 2335 call_bc_encode(struct rpc_task *task) 2336 { 2337 xprt_request_enqueue_transmit(task); 2338 task->tk_action = call_bc_transmit; 2339 } 2340 2341 /* 2342 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 2343 * addition, disconnect on connectivity errors. 2344 */ 2345 static void 2346 call_bc_transmit(struct rpc_task *task) 2347 { 2348 task->tk_action = call_bc_transmit_status; 2349 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2350 if (!xprt_prepare_transmit(task)) 2351 return; 2352 task->tk_status = 0; 2353 xprt_transmit(task); 2354 } 2355 xprt_end_transmit(task); 2356 } 2357 2358 static void 2359 call_bc_transmit_status(struct rpc_task *task) 2360 { 2361 struct rpc_rqst *req = task->tk_rqstp; 2362 2363 if (rpc_task_transmitted(task)) 2364 task->tk_status = 0; 2365 2366 switch (task->tk_status) { 2367 case 0: 2368 /* Success */ 2369 case -ENETDOWN: 2370 case -EHOSTDOWN: 2371 case -EHOSTUNREACH: 2372 case -ENETUNREACH: 2373 case -ECONNRESET: 2374 case -ECONNREFUSED: 2375 case -EADDRINUSE: 2376 case -ENOTCONN: 2377 case -EPIPE: 2378 break; 2379 case -ENOMEM: 2380 case -ENOBUFS: 2381 rpc_delay(task, HZ>>2); 2382 fallthrough; 2383 case -EBADSLT: 2384 case -EAGAIN: 2385 task->tk_status = 0; 2386 task->tk_action = call_bc_transmit; 2387 return; 2388 case -ETIMEDOUT: 2389 /* 2390 * Problem reaching the server. Disconnect and let the 2391 * forechannel reestablish the connection. The server will 2392 * have to retransmit the backchannel request and we'll 2393 * reprocess it. Since these ops are idempotent, there's no 2394 * need to cache our reply at this time. 2395 */ 2396 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2397 "error: %d\n", task->tk_status); 2398 xprt_conditional_disconnect(req->rq_xprt, 2399 req->rq_connect_cookie); 2400 break; 2401 default: 2402 /* 2403 * We were unable to reply and will have to drop the 2404 * request. The server should reconnect and retransmit. 2405 */ 2406 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2407 "error: %d\n", task->tk_status); 2408 break; 2409 } 2410 task->tk_action = rpc_exit_task; 2411 } 2412 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 2413 2414 /* 2415 * 6. Sort out the RPC call status 2416 */ 2417 static void 2418 call_status(struct rpc_task *task) 2419 { 2420 struct rpc_clnt *clnt = task->tk_client; 2421 int status; 2422 2423 if (!task->tk_msg.rpc_proc->p_proc) 2424 trace_xprt_ping(task->tk_xprt, task->tk_status); 2425 2426 status = task->tk_status; 2427 if (status >= 0) { 2428 task->tk_action = call_decode; 2429 return; 2430 } 2431 2432 trace_rpc_call_status(task); 2433 task->tk_status = 0; 2434 switch(status) { 2435 case -EHOSTDOWN: 2436 case -ENETDOWN: 2437 case -EHOSTUNREACH: 2438 case -ENETUNREACH: 2439 case -EPERM: 2440 if (RPC_IS_SOFTCONN(task)) 2441 goto out_exit; 2442 /* 2443 * Delay any retries for 3 seconds, then handle as if it 2444 * were a timeout. 2445 */ 2446 rpc_delay(task, 3*HZ); 2447 fallthrough; 2448 case -ETIMEDOUT: 2449 break; 2450 case -ECONNREFUSED: 2451 case -ECONNRESET: 2452 case -ECONNABORTED: 2453 case -ENOTCONN: 2454 rpc_force_rebind(clnt); 2455 break; 2456 case -EADDRINUSE: 2457 rpc_delay(task, 3*HZ); 2458 fallthrough; 2459 case -EPIPE: 2460 case -EAGAIN: 2461 break; 2462 case -ENFILE: 2463 case -ENOBUFS: 2464 case -ENOMEM: 2465 rpc_delay(task, HZ>>2); 2466 break; 2467 case -EIO: 2468 /* shutdown or soft timeout */ 2469 goto out_exit; 2470 default: 2471 if (clnt->cl_chatty) 2472 printk("%s: RPC call returned error %d\n", 2473 clnt->cl_program->name, -status); 2474 goto out_exit; 2475 } 2476 task->tk_action = call_encode; 2477 if (status != -ECONNRESET && status != -ECONNABORTED) 2478 rpc_check_timeout(task); 2479 return; 2480 out_exit: 2481 rpc_call_rpcerror(task, status); 2482 } 2483 2484 static bool 2485 rpc_check_connected(const struct rpc_rqst *req) 2486 { 2487 /* No allocated request or transport? return true */ 2488 if (!req || !req->rq_xprt) 2489 return true; 2490 return xprt_connected(req->rq_xprt); 2491 } 2492 2493 static void 2494 rpc_check_timeout(struct rpc_task *task) 2495 { 2496 struct rpc_clnt *clnt = task->tk_client; 2497 2498 if (RPC_SIGNALLED(task)) 2499 return; 2500 2501 if (xprt_adjust_timeout(task->tk_rqstp) == 0) 2502 return; 2503 2504 trace_rpc_timeout_status(task); 2505 task->tk_timeouts++; 2506 2507 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { 2508 rpc_call_rpcerror(task, -ETIMEDOUT); 2509 return; 2510 } 2511 2512 if (RPC_IS_SOFT(task)) { 2513 /* 2514 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has 2515 * been sent, it should time out only if the transport 2516 * connection gets terminally broken. 2517 */ 2518 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && 2519 rpc_check_connected(task->tk_rqstp)) 2520 return; 2521 2522 if (clnt->cl_chatty) { 2523 pr_notice_ratelimited( 2524 "%s: server %s not responding, timed out\n", 2525 clnt->cl_program->name, 2526 task->tk_xprt->servername); 2527 } 2528 if (task->tk_flags & RPC_TASK_TIMEOUT) 2529 rpc_call_rpcerror(task, -ETIMEDOUT); 2530 else 2531 __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); 2532 return; 2533 } 2534 2535 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 2536 task->tk_flags |= RPC_CALL_MAJORSEEN; 2537 if (clnt->cl_chatty) { 2538 pr_notice_ratelimited( 2539 "%s: server %s not responding, still trying\n", 2540 clnt->cl_program->name, 2541 task->tk_xprt->servername); 2542 } 2543 } 2544 rpc_force_rebind(clnt); 2545 /* 2546 * Did our request time out due to an RPCSEC_GSS out-of-sequence 2547 * event? RFC2203 requires the server to drop all such requests. 2548 */ 2549 rpcauth_invalcred(task); 2550 } 2551 2552 /* 2553 * 7. Decode the RPC reply 2554 */ 2555 static void 2556 call_decode(struct rpc_task *task) 2557 { 2558 struct rpc_clnt *clnt = task->tk_client; 2559 struct rpc_rqst *req = task->tk_rqstp; 2560 struct xdr_stream xdr; 2561 int err; 2562 2563 if (!task->tk_msg.rpc_proc->p_decode) { 2564 task->tk_action = rpc_exit_task; 2565 return; 2566 } 2567 2568 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 2569 if (clnt->cl_chatty) { 2570 pr_notice_ratelimited("%s: server %s OK\n", 2571 clnt->cl_program->name, 2572 task->tk_xprt->servername); 2573 } 2574 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 2575 } 2576 2577 /* 2578 * Did we ever call xprt_complete_rqst()? If not, we should assume 2579 * the message is incomplete. 2580 */ 2581 err = -EAGAIN; 2582 if (!req->rq_reply_bytes_recvd) 2583 goto out; 2584 2585 /* Ensure that we see all writes made by xprt_complete_rqst() 2586 * before it changed req->rq_reply_bytes_recvd. 2587 */ 2588 smp_rmb(); 2589 2590 req->rq_rcv_buf.len = req->rq_private_buf.len; 2591 trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf); 2592 2593 /* Check that the softirq receive buffer is valid */ 2594 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 2595 sizeof(req->rq_rcv_buf)) != 0); 2596 2597 xdr_init_decode(&xdr, &req->rq_rcv_buf, 2598 req->rq_rcv_buf.head[0].iov_base, req); 2599 err = rpc_decode_header(task, &xdr); 2600 out: 2601 switch (err) { 2602 case 0: 2603 task->tk_action = rpc_exit_task; 2604 task->tk_status = rpcauth_unwrap_resp(task, &xdr); 2605 return; 2606 case -EAGAIN: 2607 task->tk_status = 0; 2608 if (task->tk_client->cl_discrtry) 2609 xprt_conditional_disconnect(req->rq_xprt, 2610 req->rq_connect_cookie); 2611 task->tk_action = call_encode; 2612 rpc_check_timeout(task); 2613 break; 2614 case -EKEYREJECTED: 2615 task->tk_action = call_reserve; 2616 rpc_check_timeout(task); 2617 rpcauth_invalcred(task); 2618 /* Ensure we obtain a new XID if we retry! */ 2619 xprt_release(task); 2620 } 2621 } 2622 2623 static int 2624 rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr) 2625 { 2626 struct rpc_clnt *clnt = task->tk_client; 2627 struct rpc_rqst *req = task->tk_rqstp; 2628 __be32 *p; 2629 int error; 2630 2631 error = -EMSGSIZE; 2632 p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2); 2633 if (!p) 2634 goto out_fail; 2635 *p++ = req->rq_xid; 2636 *p++ = rpc_call; 2637 *p++ = cpu_to_be32(RPC_VERSION); 2638 *p++ = cpu_to_be32(clnt->cl_prog); 2639 *p++ = cpu_to_be32(clnt->cl_vers); 2640 *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); 2641 2642 error = rpcauth_marshcred(task, xdr); 2643 if (error < 0) 2644 goto out_fail; 2645 return 0; 2646 out_fail: 2647 trace_rpc_bad_callhdr(task); 2648 rpc_call_rpcerror(task, error); 2649 return error; 2650 } 2651 2652 static noinline int 2653 rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) 2654 { 2655 struct rpc_clnt *clnt = task->tk_client; 2656 int error; 2657 __be32 *p; 2658 2659 /* RFC-1014 says that the representation of XDR data must be a 2660 * multiple of four bytes 2661 * - if it isn't pointer subtraction in the NFS client may give 2662 * undefined results 2663 */ 2664 if (task->tk_rqstp->rq_rcv_buf.len & 3) 2665 goto out_unparsable; 2666 2667 p = xdr_inline_decode(xdr, 3 * sizeof(*p)); 2668 if (!p) 2669 goto out_unparsable; 2670 p++; /* skip XID */ 2671 if (*p++ != rpc_reply) 2672 goto out_unparsable; 2673 if (*p++ != rpc_msg_accepted) 2674 goto out_msg_denied; 2675 2676 error = rpcauth_checkverf(task, xdr); 2677 if (error) 2678 goto out_verifier; 2679 2680 p = xdr_inline_decode(xdr, sizeof(*p)); 2681 if (!p) 2682 goto out_unparsable; 2683 switch (*p) { 2684 case rpc_success: 2685 return 0; 2686 case rpc_prog_unavail: 2687 trace_rpc__prog_unavail(task); 2688 error = -EPFNOSUPPORT; 2689 goto out_err; 2690 case rpc_prog_mismatch: 2691 trace_rpc__prog_mismatch(task); 2692 error = -EPROTONOSUPPORT; 2693 goto out_err; 2694 case rpc_proc_unavail: 2695 trace_rpc__proc_unavail(task); 2696 error = -EOPNOTSUPP; 2697 goto out_err; 2698 case rpc_garbage_args: 2699 case rpc_system_err: 2700 trace_rpc__garbage_args(task); 2701 error = -EIO; 2702 break; 2703 default: 2704 goto out_unparsable; 2705 } 2706 2707 out_garbage: 2708 clnt->cl_stats->rpcgarbage++; 2709 if (task->tk_garb_retry) { 2710 task->tk_garb_retry--; 2711 task->tk_action = call_encode; 2712 return -EAGAIN; 2713 } 2714 out_err: 2715 rpc_call_rpcerror(task, error); 2716 return error; 2717 2718 out_unparsable: 2719 trace_rpc__unparsable(task); 2720 error = -EIO; 2721 goto out_garbage; 2722 2723 out_verifier: 2724 trace_rpc_bad_verifier(task); 2725 goto out_err; 2726 2727 out_msg_denied: 2728 error = -EACCES; 2729 p = xdr_inline_decode(xdr, sizeof(*p)); 2730 if (!p) 2731 goto out_unparsable; 2732 switch (*p++) { 2733 case rpc_auth_error: 2734 break; 2735 case rpc_mismatch: 2736 trace_rpc__mismatch(task); 2737 error = -EPROTONOSUPPORT; 2738 goto out_err; 2739 default: 2740 goto out_unparsable; 2741 } 2742 2743 p = xdr_inline_decode(xdr, sizeof(*p)); 2744 if (!p) 2745 goto out_unparsable; 2746 switch (*p++) { 2747 case rpc_autherr_rejectedcred: 2748 case rpc_autherr_rejectedverf: 2749 case rpcsec_gsserr_credproblem: 2750 case rpcsec_gsserr_ctxproblem: 2751 if (!task->tk_cred_retry) 2752 break; 2753 task->tk_cred_retry--; 2754 trace_rpc__stale_creds(task); 2755 return -EKEYREJECTED; 2756 case rpc_autherr_badcred: 2757 case rpc_autherr_badverf: 2758 /* possibly garbled cred/verf? */ 2759 if (!task->tk_garb_retry) 2760 break; 2761 task->tk_garb_retry--; 2762 trace_rpc__bad_creds(task); 2763 task->tk_action = call_encode; 2764 return -EAGAIN; 2765 case rpc_autherr_tooweak: 2766 trace_rpc__auth_tooweak(task); 2767 pr_warn("RPC: server %s requires stronger authentication.\n", 2768 task->tk_xprt->servername); 2769 break; 2770 default: 2771 goto out_unparsable; 2772 } 2773 goto out_err; 2774 } 2775 2776 static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2777 const void *obj) 2778 { 2779 } 2780 2781 static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2782 void *obj) 2783 { 2784 return 0; 2785 } 2786 2787 static const struct rpc_procinfo rpcproc_null = { 2788 .p_encode = rpcproc_encode_null, 2789 .p_decode = rpcproc_decode_null, 2790 }; 2791 2792 static const struct rpc_procinfo rpcproc_null_noreply = { 2793 .p_encode = rpcproc_encode_null, 2794 }; 2795 2796 static void 2797 rpc_null_call_prepare(struct rpc_task *task, void *data) 2798 { 2799 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; 2800 rpc_call_start(task); 2801 } 2802 2803 static const struct rpc_call_ops rpc_null_ops = { 2804 .rpc_call_prepare = rpc_null_call_prepare, 2805 .rpc_call_done = rpc_default_callback, 2806 }; 2807 2808 static 2809 struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt, 2810 struct rpc_xprt *xprt, struct rpc_cred *cred, int flags, 2811 const struct rpc_call_ops *ops, void *data) 2812 { 2813 struct rpc_message msg = { 2814 .rpc_proc = &rpcproc_null, 2815 }; 2816 struct rpc_task_setup task_setup_data = { 2817 .rpc_client = clnt, 2818 .rpc_xprt = xprt, 2819 .rpc_message = &msg, 2820 .rpc_op_cred = cred, 2821 .callback_ops = ops ?: &rpc_null_ops, 2822 .callback_data = data, 2823 .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN | 2824 RPC_TASK_NULLCREDS, 2825 }; 2826 2827 return rpc_run_task(&task_setup_data); 2828 } 2829 2830 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 2831 { 2832 return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL); 2833 } 2834 EXPORT_SYMBOL_GPL(rpc_call_null); 2835 2836 static int rpc_ping(struct rpc_clnt *clnt) 2837 { 2838 struct rpc_task *task; 2839 int status; 2840 2841 if (clnt->cl_auth->au_ops->ping) 2842 return clnt->cl_auth->au_ops->ping(clnt); 2843 2844 task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL); 2845 if (IS_ERR(task)) 2846 return PTR_ERR(task); 2847 status = task->tk_status; 2848 rpc_put_task(task); 2849 return status; 2850 } 2851 2852 static int rpc_ping_noreply(struct rpc_clnt *clnt) 2853 { 2854 struct rpc_message msg = { 2855 .rpc_proc = &rpcproc_null_noreply, 2856 }; 2857 struct rpc_task_setup task_setup_data = { 2858 .rpc_client = clnt, 2859 .rpc_message = &msg, 2860 .callback_ops = &rpc_null_ops, 2861 .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, 2862 }; 2863 struct rpc_task *task; 2864 int status; 2865 2866 task = rpc_run_task(&task_setup_data); 2867 if (IS_ERR(task)) 2868 return PTR_ERR(task); 2869 status = task->tk_status; 2870 rpc_put_task(task); 2871 return status; 2872 } 2873 2874 struct rpc_cb_add_xprt_calldata { 2875 struct rpc_xprt_switch *xps; 2876 struct rpc_xprt *xprt; 2877 }; 2878 2879 static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) 2880 { 2881 struct rpc_cb_add_xprt_calldata *data = calldata; 2882 2883 if (task->tk_status == 0) 2884 rpc_xprt_switch_add_xprt(data->xps, data->xprt); 2885 } 2886 2887 static void rpc_cb_add_xprt_release(void *calldata) 2888 { 2889 struct rpc_cb_add_xprt_calldata *data = calldata; 2890 2891 xprt_put(data->xprt); 2892 xprt_switch_put(data->xps); 2893 kfree(data); 2894 } 2895 2896 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = { 2897 .rpc_call_prepare = rpc_null_call_prepare, 2898 .rpc_call_done = rpc_cb_add_xprt_done, 2899 .rpc_release = rpc_cb_add_xprt_release, 2900 }; 2901 2902 /** 2903 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt 2904 * @clnt: pointer to struct rpc_clnt 2905 * @xps: pointer to struct rpc_xprt_switch, 2906 * @xprt: pointer struct rpc_xprt 2907 * @dummy: unused 2908 */ 2909 int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, 2910 struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, 2911 void *dummy) 2912 { 2913 struct rpc_cb_add_xprt_calldata *data; 2914 struct rpc_task *task; 2915 2916 if (xps->xps_nunique_destaddr_xprts + 1 > clnt->cl_max_connect) { 2917 rcu_read_lock(); 2918 pr_warn("SUNRPC: reached max allowed number (%d) did not add " 2919 "transport to server: %s\n", clnt->cl_max_connect, 2920 rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 2921 rcu_read_unlock(); 2922 return -EINVAL; 2923 } 2924 2925 data = kmalloc(sizeof(*data), GFP_KERNEL); 2926 if (!data) 2927 return -ENOMEM; 2928 data->xps = xprt_switch_get(xps); 2929 data->xprt = xprt_get(xprt); 2930 if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) { 2931 rpc_cb_add_xprt_release(data); 2932 goto success; 2933 } 2934 2935 task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, 2936 &rpc_cb_add_xprt_call_ops, data); 2937 if (IS_ERR(task)) 2938 return PTR_ERR(task); 2939 2940 data->xps->xps_nunique_destaddr_xprts++; 2941 rpc_put_task(task); 2942 success: 2943 return 1; 2944 } 2945 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); 2946 2947 static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt, 2948 struct rpc_xprt *xprt, 2949 struct rpc_add_xprt_test *data) 2950 { 2951 struct rpc_task *task; 2952 int status = -EADDRINUSE; 2953 2954 /* Test the connection */ 2955 task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL); 2956 if (IS_ERR(task)) 2957 return PTR_ERR(task); 2958 2959 status = task->tk_status; 2960 rpc_put_task(task); 2961 2962 if (status < 0) 2963 return status; 2964 2965 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ 2966 data->add_xprt_test(clnt, xprt, data->data); 2967 2968 return 0; 2969 } 2970 2971 /** 2972 * rpc_clnt_setup_test_and_add_xprt() 2973 * 2974 * This is an rpc_clnt_add_xprt setup() function which returns 1 so: 2975 * 1) caller of the test function must dereference the rpc_xprt_switch 2976 * and the rpc_xprt. 2977 * 2) test function must call rpc_xprt_switch_add_xprt, usually in 2978 * the rpc_call_done routine. 2979 * 2980 * Upon success (return of 1), the test function adds the new 2981 * transport to the rpc_clnt xprt switch 2982 * 2983 * @clnt: struct rpc_clnt to get the new transport 2984 * @xps: the rpc_xprt_switch to hold the new transport 2985 * @xprt: the rpc_xprt to test 2986 * @data: a struct rpc_add_xprt_test pointer that holds the test function 2987 * and test function call data 2988 */ 2989 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, 2990 struct rpc_xprt_switch *xps, 2991 struct rpc_xprt *xprt, 2992 void *data) 2993 { 2994 int status = -EADDRINUSE; 2995 2996 xprt = xprt_get(xprt); 2997 xprt_switch_get(xps); 2998 2999 if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) 3000 goto out_err; 3001 3002 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3003 if (status < 0) 3004 goto out_err; 3005 3006 status = 1; 3007 out_err: 3008 xprt_put(xprt); 3009 xprt_switch_put(xps); 3010 if (status < 0) 3011 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not " 3012 "added\n", status, 3013 xprt->address_strings[RPC_DISPLAY_ADDR]); 3014 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ 3015 return status; 3016 } 3017 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); 3018 3019 /** 3020 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt 3021 * @clnt: pointer to struct rpc_clnt 3022 * @xprtargs: pointer to struct xprt_create 3023 * @setup: callback to test and/or set up the connection 3024 * @data: pointer to setup function data 3025 * 3026 * Creates a new transport using the parameters set in args and 3027 * adds it to clnt. 3028 * If ping is set, then test that connectivity succeeds before 3029 * adding the new transport. 3030 * 3031 */ 3032 int rpc_clnt_add_xprt(struct rpc_clnt *clnt, 3033 struct xprt_create *xprtargs, 3034 int (*setup)(struct rpc_clnt *, 3035 struct rpc_xprt_switch *, 3036 struct rpc_xprt *, 3037 void *), 3038 void *data) 3039 { 3040 struct rpc_xprt_switch *xps; 3041 struct rpc_xprt *xprt; 3042 unsigned long connect_timeout; 3043 unsigned long reconnect_timeout; 3044 unsigned char resvport, reuseport; 3045 int ret = 0, ident; 3046 3047 rcu_read_lock(); 3048 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3049 xprt = xprt_iter_xprt(&clnt->cl_xpi); 3050 if (xps == NULL || xprt == NULL) { 3051 rcu_read_unlock(); 3052 xprt_switch_put(xps); 3053 return -EAGAIN; 3054 } 3055 resvport = xprt->resvport; 3056 reuseport = xprt->reuseport; 3057 connect_timeout = xprt->connect_timeout; 3058 reconnect_timeout = xprt->max_reconnect_timeout; 3059 ident = xprt->xprt_class->ident; 3060 rcu_read_unlock(); 3061 3062 if (!xprtargs->ident) 3063 xprtargs->ident = ident; 3064 xprtargs->xprtsec = clnt->cl_xprtsec; 3065 xprt = xprt_create_transport(xprtargs); 3066 if (IS_ERR(xprt)) { 3067 ret = PTR_ERR(xprt); 3068 goto out_put_switch; 3069 } 3070 xprt->resvport = resvport; 3071 xprt->reuseport = reuseport; 3072 if (xprt->ops->set_connect_timeout != NULL) 3073 xprt->ops->set_connect_timeout(xprt, 3074 connect_timeout, 3075 reconnect_timeout); 3076 3077 rpc_xprt_switch_set_roundrobin(xps); 3078 if (setup) { 3079 ret = setup(clnt, xps, xprt, data); 3080 if (ret != 0) 3081 goto out_put_xprt; 3082 } 3083 rpc_xprt_switch_add_xprt(xps, xprt); 3084 out_put_xprt: 3085 xprt_put(xprt); 3086 out_put_switch: 3087 xprt_switch_put(xps); 3088 return ret; 3089 } 3090 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); 3091 3092 static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt, 3093 struct rpc_xprt *xprt, 3094 struct rpc_add_xprt_test *data) 3095 { 3096 struct rpc_xprt_switch *xps; 3097 struct rpc_xprt *main_xprt; 3098 int status = 0; 3099 3100 xprt_get(xprt); 3101 3102 rcu_read_lock(); 3103 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3104 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3105 status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3106 (struct sockaddr *)&main_xprt->addr); 3107 rcu_read_unlock(); 3108 xprt_put(main_xprt); 3109 if (status || !test_bit(XPRT_OFFLINE, &xprt->state)) 3110 goto out; 3111 3112 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3113 out: 3114 xprt_put(xprt); 3115 xprt_switch_put(xps); 3116 return status; 3117 } 3118 3119 /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking 3120 * @clnt rpc_clnt structure 3121 * 3122 * For each offlined transport found in the rpc_clnt structure call 3123 * the function rpc_xprt_probe_trunked() which will determine if this 3124 * transport still belongs to the trunking group. 3125 */ 3126 void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt, 3127 struct rpc_add_xprt_test *data) 3128 { 3129 struct rpc_xprt_iter xpi; 3130 int ret; 3131 3132 ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi); 3133 if (ret) 3134 return; 3135 for (;;) { 3136 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 3137 3138 if (!xprt) 3139 break; 3140 ret = rpc_xprt_probe_trunked(clnt, xprt, data); 3141 xprt_put(xprt); 3142 if (ret < 0) 3143 break; 3144 xprt_iter_rewind(&xpi); 3145 } 3146 xprt_iter_destroy(&xpi); 3147 } 3148 EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts); 3149 3150 static int rpc_xprt_offline(struct rpc_clnt *clnt, 3151 struct rpc_xprt *xprt, 3152 void *data) 3153 { 3154 struct rpc_xprt *main_xprt; 3155 struct rpc_xprt_switch *xps; 3156 int err = 0; 3157 3158 xprt_get(xprt); 3159 3160 rcu_read_lock(); 3161 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3162 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3163 err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3164 (struct sockaddr *)&main_xprt->addr); 3165 rcu_read_unlock(); 3166 xprt_put(main_xprt); 3167 if (err) 3168 goto out; 3169 3170 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { 3171 err = -EINTR; 3172 goto out; 3173 } 3174 xprt_set_offline_locked(xprt, xps); 3175 3176 xprt_release_write(xprt, NULL); 3177 out: 3178 xprt_put(xprt); 3179 xprt_switch_put(xps); 3180 return err; 3181 } 3182 3183 /* rpc_clnt_manage_trunked_xprts -- offline trunked transports 3184 * @clnt rpc_clnt structure 3185 * 3186 * For each active transport found in the rpc_clnt structure call 3187 * the function rpc_xprt_offline() which will identify trunked transports 3188 * and will mark them offline. 3189 */ 3190 void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt) 3191 { 3192 rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL); 3193 } 3194 EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts); 3195 3196 struct connect_timeout_data { 3197 unsigned long connect_timeout; 3198 unsigned long reconnect_timeout; 3199 }; 3200 3201 static int 3202 rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, 3203 struct rpc_xprt *xprt, 3204 void *data) 3205 { 3206 struct connect_timeout_data *timeo = data; 3207 3208 if (xprt->ops->set_connect_timeout) 3209 xprt->ops->set_connect_timeout(xprt, 3210 timeo->connect_timeout, 3211 timeo->reconnect_timeout); 3212 return 0; 3213 } 3214 3215 void 3216 rpc_set_connect_timeout(struct rpc_clnt *clnt, 3217 unsigned long connect_timeout, 3218 unsigned long reconnect_timeout) 3219 { 3220 struct connect_timeout_data timeout = { 3221 .connect_timeout = connect_timeout, 3222 .reconnect_timeout = reconnect_timeout, 3223 }; 3224 rpc_clnt_iterate_for_each_xprt(clnt, 3225 rpc_xprt_set_connect_timeout, 3226 &timeout); 3227 } 3228 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); 3229 3230 void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) 3231 { 3232 rcu_read_lock(); 3233 xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3234 rcu_read_unlock(); 3235 } 3236 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put); 3237 3238 void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3239 { 3240 struct rpc_xprt_switch *xps; 3241 3242 rcu_read_lock(); 3243 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3244 rcu_read_unlock(); 3245 xprt_set_online_locked(xprt, xps); 3246 } 3247 3248 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3249 { 3250 if (rpc_clnt_xprt_switch_has_addr(clnt, 3251 (const struct sockaddr *)&xprt->addr)) { 3252 return rpc_clnt_xprt_set_online(clnt, xprt); 3253 } 3254 rcu_read_lock(); 3255 rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 3256 xprt); 3257 rcu_read_unlock(); 3258 } 3259 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); 3260 3261 void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3262 { 3263 struct rpc_xprt_switch *xps; 3264 3265 rcu_read_lock(); 3266 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3267 rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 3268 xprt, 0); 3269 xps->xps_nunique_destaddr_xprts--; 3270 rcu_read_unlock(); 3271 } 3272 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt); 3273 3274 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 3275 const struct sockaddr *sap) 3276 { 3277 struct rpc_xprt_switch *xps; 3278 bool ret; 3279 3280 rcu_read_lock(); 3281 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3282 ret = rpc_xprt_switch_has_addr(xps, sap); 3283 rcu_read_unlock(); 3284 return ret; 3285 } 3286 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); 3287 3288 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3289 static void rpc_show_header(void) 3290 { 3291 printk(KERN_INFO "-pid- flgs status -client- --rqstp- " 3292 "-timeout ---ops--\n"); 3293 } 3294 3295 static void rpc_show_task(const struct rpc_clnt *clnt, 3296 const struct rpc_task *task) 3297 { 3298 const char *rpc_waitq = "none"; 3299 3300 if (RPC_IS_QUEUED(task)) 3301 rpc_waitq = rpc_qname(task->tk_waitqueue); 3302 3303 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", 3304 task->tk_pid, task->tk_flags, task->tk_status, 3305 clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, 3306 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), 3307 task->tk_action, rpc_waitq); 3308 } 3309 3310 void rpc_show_tasks(struct net *net) 3311 { 3312 struct rpc_clnt *clnt; 3313 struct rpc_task *task; 3314 int header = 0; 3315 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 3316 3317 spin_lock(&sn->rpc_client_lock); 3318 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 3319 spin_lock(&clnt->cl_lock); 3320 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 3321 if (!header) { 3322 rpc_show_header(); 3323 header++; 3324 } 3325 rpc_show_task(clnt, task); 3326 } 3327 spin_unlock(&clnt->cl_lock); 3328 } 3329 spin_unlock(&sn->rpc_client_lock); 3330 } 3331 #endif 3332 3333 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 3334 static int 3335 rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, 3336 struct rpc_xprt *xprt, 3337 void *dummy) 3338 { 3339 return xprt_enable_swap(xprt); 3340 } 3341 3342 int 3343 rpc_clnt_swap_activate(struct rpc_clnt *clnt) 3344 { 3345 while (clnt != clnt->cl_parent) 3346 clnt = clnt->cl_parent; 3347 if (atomic_inc_return(&clnt->cl_swapper) == 1) 3348 return rpc_clnt_iterate_for_each_xprt(clnt, 3349 rpc_clnt_swap_activate_callback, NULL); 3350 return 0; 3351 } 3352 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); 3353 3354 static int 3355 rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, 3356 struct rpc_xprt *xprt, 3357 void *dummy) 3358 { 3359 xprt_disable_swap(xprt); 3360 return 0; 3361 } 3362 3363 void 3364 rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) 3365 { 3366 while (clnt != clnt->cl_parent) 3367 clnt = clnt->cl_parent; 3368 if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) 3369 rpc_clnt_iterate_for_each_xprt(clnt, 3370 rpc_clnt_swap_deactivate_callback, NULL); 3371 } 3372 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); 3373 #endif /* CONFIG_SUNRPC_SWAP */ 3374