1 /* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <asm/system.h> 25 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/slab.h> 30 #include <linux/smp_lock.h> 31 #include <linux/utsname.h> 32 #include <linux/workqueue.h> 33 34 #include <linux/sunrpc/clnt.h> 35 #include <linux/sunrpc/rpc_pipe_fs.h> 36 #include <linux/sunrpc/metrics.h> 37 38 39 #ifdef RPC_DEBUG 40 # define RPCDBG_FACILITY RPCDBG_CALL 41 #endif 42 43 #define dprint_status(t) \ 44 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ 45 __FUNCTION__, t->tk_status) 46 47 /* 48 * All RPC clients are linked into this list 49 */ 50 static LIST_HEAD(all_clients); 51 static DEFINE_SPINLOCK(rpc_client_lock); 52 53 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 54 55 56 static void call_start(struct rpc_task *task); 57 static void call_reserve(struct rpc_task *task); 58 static void call_reserveresult(struct rpc_task *task); 59 static void call_allocate(struct rpc_task *task); 60 static void call_encode(struct rpc_task *task); 61 static void call_decode(struct rpc_task *task); 62 static void call_bind(struct rpc_task *task); 63 static void call_bind_status(struct rpc_task *task); 64 static void call_transmit(struct rpc_task *task); 65 static void call_status(struct rpc_task *task); 66 static void call_transmit_status(struct rpc_task *task); 67 static void call_refresh(struct rpc_task *task); 68 static void call_refreshresult(struct rpc_task *task); 69 static void call_timeout(struct rpc_task *task); 70 static void call_connect(struct rpc_task *task); 71 static void call_connect_status(struct rpc_task *task); 72 static __be32 * call_header(struct rpc_task *task); 73 static __be32 * call_verify(struct rpc_task *task); 74 75 static int rpc_ping(struct rpc_clnt *clnt, int flags); 76 77 static void rpc_register_client(struct rpc_clnt *clnt) 78 { 79 spin_lock(&rpc_client_lock); 80 list_add(&clnt->cl_clients, &all_clients); 81 spin_unlock(&rpc_client_lock); 82 } 83 84 static void rpc_unregister_client(struct rpc_clnt *clnt) 85 { 86 spin_lock(&rpc_client_lock); 87 list_del(&clnt->cl_clients); 88 spin_unlock(&rpc_client_lock); 89 } 90 91 static int 92 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 93 { 94 static uint32_t clntid; 95 int error; 96 97 clnt->cl_vfsmnt = ERR_PTR(-ENOENT); 98 clnt->cl_dentry = ERR_PTR(-ENOENT); 99 if (dir_name == NULL) 100 return 0; 101 102 clnt->cl_vfsmnt = rpc_get_mount(); 103 if (IS_ERR(clnt->cl_vfsmnt)) 104 return PTR_ERR(clnt->cl_vfsmnt); 105 106 for (;;) { 107 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 108 "%s/clnt%x", dir_name, 109 (unsigned int)clntid++); 110 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 111 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 112 if (!IS_ERR(clnt->cl_dentry)) 113 return 0; 114 error = PTR_ERR(clnt->cl_dentry); 115 if (error != -EEXIST) { 116 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 117 clnt->cl_pathname, error); 118 rpc_put_mount(); 119 return error; 120 } 121 } 122 } 123 124 static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *program, u32 vers, rpc_authflavor_t flavor) 125 { 126 struct rpc_version *version; 127 struct rpc_clnt *clnt = NULL; 128 struct rpc_auth *auth; 129 int err; 130 int len; 131 132 dprintk("RPC: creating %s client for %s (xprt %p)\n", 133 program->name, servname, xprt); 134 135 err = rpciod_up(); 136 if (err) 137 goto out_no_rpciod; 138 err = -EINVAL; 139 if (!xprt) 140 goto out_no_xprt; 141 if (vers >= program->nrvers || !(version = program->version[vers])) 142 goto out_err; 143 144 err = -ENOMEM; 145 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 146 if (!clnt) 147 goto out_err; 148 clnt->cl_parent = clnt; 149 150 clnt->cl_server = clnt->cl_inline_name; 151 len = strlen(servname) + 1; 152 if (len > sizeof(clnt->cl_inline_name)) { 153 char *buf = kmalloc(len, GFP_KERNEL); 154 if (buf != 0) 155 clnt->cl_server = buf; 156 else 157 len = sizeof(clnt->cl_inline_name); 158 } 159 strlcpy(clnt->cl_server, servname, len); 160 161 clnt->cl_xprt = xprt; 162 clnt->cl_procinfo = version->procs; 163 clnt->cl_maxproc = version->nrprocs; 164 clnt->cl_protname = program->name; 165 clnt->cl_prog = program->number; 166 clnt->cl_vers = version->number; 167 clnt->cl_stats = program->stats; 168 clnt->cl_metrics = rpc_alloc_iostats(clnt); 169 err = -ENOMEM; 170 if (clnt->cl_metrics == NULL) 171 goto out_no_stats; 172 clnt->cl_program = program; 173 INIT_LIST_HEAD(&clnt->cl_tasks); 174 spin_lock_init(&clnt->cl_lock); 175 176 if (!xprt_bound(clnt->cl_xprt)) 177 clnt->cl_autobind = 1; 178 179 clnt->cl_rtt = &clnt->cl_rtt_default; 180 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 181 182 kref_init(&clnt->cl_kref); 183 184 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 185 if (err < 0) 186 goto out_no_path; 187 188 auth = rpcauth_create(flavor, clnt); 189 if (IS_ERR(auth)) { 190 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 191 flavor); 192 err = PTR_ERR(auth); 193 goto out_no_auth; 194 } 195 196 /* save the nodename */ 197 clnt->cl_nodelen = strlen(utsname()->nodename); 198 if (clnt->cl_nodelen > UNX_MAXNODENAME) 199 clnt->cl_nodelen = UNX_MAXNODENAME; 200 memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); 201 rpc_register_client(clnt); 202 return clnt; 203 204 out_no_auth: 205 if (!IS_ERR(clnt->cl_dentry)) { 206 rpc_rmdir(clnt->cl_dentry); 207 rpc_put_mount(); 208 } 209 out_no_path: 210 rpc_free_iostats(clnt->cl_metrics); 211 out_no_stats: 212 if (clnt->cl_server != clnt->cl_inline_name) 213 kfree(clnt->cl_server); 214 kfree(clnt); 215 out_err: 216 xprt_put(xprt); 217 out_no_xprt: 218 rpciod_down(); 219 out_no_rpciod: 220 return ERR_PTR(err); 221 } 222 223 /* 224 * rpc_create - create an RPC client and transport with one call 225 * @args: rpc_clnt create argument structure 226 * 227 * Creates and initializes an RPC transport and an RPC client. 228 * 229 * It can ping the server in order to determine if it is up, and to see if 230 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 231 * this behavior so asynchronous tasks can also use rpc_create. 232 */ 233 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 234 { 235 struct rpc_xprt *xprt; 236 struct rpc_clnt *clnt; 237 struct rpc_xprtsock_create xprtargs = { 238 .proto = args->protocol, 239 .srcaddr = args->saddress, 240 .dstaddr = args->address, 241 .addrlen = args->addrsize, 242 .timeout = args->timeout 243 }; 244 char servername[20]; 245 246 xprt = xprt_create_transport(&xprtargs); 247 if (IS_ERR(xprt)) 248 return (struct rpc_clnt *)xprt; 249 250 /* 251 * If the caller chooses not to specify a hostname, whip 252 * up a string representation of the passed-in address. 253 */ 254 if (args->servername == NULL) { 255 struct sockaddr_in *addr = 256 (struct sockaddr_in *) &args->address; 257 snprintf(servername, sizeof(servername), NIPQUAD_FMT, 258 NIPQUAD(addr->sin_addr.s_addr)); 259 args->servername = servername; 260 } 261 262 /* 263 * By default, kernel RPC client connects from a reserved port. 264 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 265 * but it is always enabled for rpciod, which handles the connect 266 * operation. 267 */ 268 xprt->resvport = 1; 269 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 270 xprt->resvport = 0; 271 272 dprintk("RPC: creating %s client for %s (xprt %p)\n", 273 args->program->name, args->servername, xprt); 274 275 clnt = rpc_new_client(xprt, args->servername, args->program, 276 args->version, args->authflavor); 277 if (IS_ERR(clnt)) 278 return clnt; 279 280 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 281 int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 282 if (err != 0) { 283 rpc_shutdown_client(clnt); 284 return ERR_PTR(err); 285 } 286 } 287 288 clnt->cl_softrtry = 1; 289 if (args->flags & RPC_CLNT_CREATE_HARDRTRY) 290 clnt->cl_softrtry = 0; 291 292 if (args->flags & RPC_CLNT_CREATE_INTR) 293 clnt->cl_intr = 1; 294 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 295 clnt->cl_autobind = 1; 296 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 297 clnt->cl_discrtry = 1; 298 299 return clnt; 300 } 301 EXPORT_SYMBOL_GPL(rpc_create); 302 303 /* 304 * This function clones the RPC client structure. It allows us to share the 305 * same transport while varying parameters such as the authentication 306 * flavour. 307 */ 308 struct rpc_clnt * 309 rpc_clone_client(struct rpc_clnt *clnt) 310 { 311 struct rpc_clnt *new; 312 int err = -ENOMEM; 313 314 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 315 if (!new) 316 goto out_no_clnt; 317 new->cl_parent = clnt; 318 /* Turn off autobind on clones */ 319 new->cl_autobind = 0; 320 INIT_LIST_HEAD(&new->cl_tasks); 321 spin_lock_init(&new->cl_lock); 322 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 323 new->cl_metrics = rpc_alloc_iostats(clnt); 324 if (new->cl_metrics == NULL) 325 goto out_no_stats; 326 kref_init(&new->cl_kref); 327 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 328 if (err != 0) 329 goto out_no_path; 330 if (new->cl_auth) 331 atomic_inc(&new->cl_auth->au_count); 332 xprt_get(clnt->cl_xprt); 333 kref_get(&clnt->cl_kref); 334 rpc_register_client(new); 335 rpciod_up(); 336 return new; 337 out_no_path: 338 rpc_free_iostats(new->cl_metrics); 339 out_no_stats: 340 kfree(new); 341 out_no_clnt: 342 dprintk("RPC: %s: returned error %d\n", __FUNCTION__, err); 343 return ERR_PTR(err); 344 } 345 346 /* 347 * Properly shut down an RPC client, terminating all outstanding 348 * requests. 349 */ 350 void rpc_shutdown_client(struct rpc_clnt *clnt) 351 { 352 dprintk("RPC: shutting down %s client for %s\n", 353 clnt->cl_protname, clnt->cl_server); 354 355 while (!list_empty(&clnt->cl_tasks)) { 356 rpc_killall_tasks(clnt); 357 wait_event_timeout(destroy_wait, 358 list_empty(&clnt->cl_tasks), 1*HZ); 359 } 360 361 rpc_release_client(clnt); 362 } 363 364 /* 365 * Free an RPC client 366 */ 367 static void 368 rpc_free_client(struct kref *kref) 369 { 370 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 371 372 dprintk("RPC: destroying %s client for %s\n", 373 clnt->cl_protname, clnt->cl_server); 374 if (!IS_ERR(clnt->cl_dentry)) { 375 rpc_rmdir(clnt->cl_dentry); 376 rpc_put_mount(); 377 } 378 if (clnt->cl_parent != clnt) { 379 rpc_release_client(clnt->cl_parent); 380 goto out_free; 381 } 382 if (clnt->cl_server != clnt->cl_inline_name) 383 kfree(clnt->cl_server); 384 out_free: 385 rpc_unregister_client(clnt); 386 rpc_free_iostats(clnt->cl_metrics); 387 clnt->cl_metrics = NULL; 388 xprt_put(clnt->cl_xprt); 389 rpciod_down(); 390 kfree(clnt); 391 } 392 393 /* 394 * Free an RPC client 395 */ 396 static void 397 rpc_free_auth(struct kref *kref) 398 { 399 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 400 401 if (clnt->cl_auth == NULL) { 402 rpc_free_client(kref); 403 return; 404 } 405 406 /* 407 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 408 * release remaining GSS contexts. This mechanism ensures 409 * that it can do so safely. 410 */ 411 kref_init(kref); 412 rpcauth_release(clnt->cl_auth); 413 clnt->cl_auth = NULL; 414 kref_put(kref, rpc_free_client); 415 } 416 417 /* 418 * Release reference to the RPC client 419 */ 420 void 421 rpc_release_client(struct rpc_clnt *clnt) 422 { 423 dprintk("RPC: rpc_release_client(%p)\n", clnt); 424 425 if (list_empty(&clnt->cl_tasks)) 426 wake_up(&destroy_wait); 427 kref_put(&clnt->cl_kref, rpc_free_auth); 428 } 429 430 /** 431 * rpc_bind_new_program - bind a new RPC program to an existing client 432 * @old - old rpc_client 433 * @program - rpc program to set 434 * @vers - rpc program version 435 * 436 * Clones the rpc client and sets up a new RPC program. This is mainly 437 * of use for enabling different RPC programs to share the same transport. 438 * The Sun NFSv2/v3 ACL protocol can do this. 439 */ 440 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 441 struct rpc_program *program, 442 int vers) 443 { 444 struct rpc_clnt *clnt; 445 struct rpc_version *version; 446 int err; 447 448 BUG_ON(vers >= program->nrvers || !program->version[vers]); 449 version = program->version[vers]; 450 clnt = rpc_clone_client(old); 451 if (IS_ERR(clnt)) 452 goto out; 453 clnt->cl_procinfo = version->procs; 454 clnt->cl_maxproc = version->nrprocs; 455 clnt->cl_protname = program->name; 456 clnt->cl_prog = program->number; 457 clnt->cl_vers = version->number; 458 clnt->cl_stats = program->stats; 459 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 460 if (err != 0) { 461 rpc_shutdown_client(clnt); 462 clnt = ERR_PTR(err); 463 } 464 out: 465 return clnt; 466 } 467 468 /* 469 * Default callback for async RPC calls 470 */ 471 static void 472 rpc_default_callback(struct rpc_task *task, void *data) 473 { 474 } 475 476 static const struct rpc_call_ops rpc_default_ops = { 477 .rpc_call_done = rpc_default_callback, 478 }; 479 480 /* 481 * Export the signal mask handling for synchronous code that 482 * sleeps on RPC calls 483 */ 484 #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 485 486 static void rpc_save_sigmask(sigset_t *oldset, int intr) 487 { 488 unsigned long sigallow = sigmask(SIGKILL); 489 sigset_t sigmask; 490 491 /* Block all signals except those listed in sigallow */ 492 if (intr) 493 sigallow |= RPC_INTR_SIGNALS; 494 siginitsetinv(&sigmask, sigallow); 495 sigprocmask(SIG_BLOCK, &sigmask, oldset); 496 } 497 498 static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 499 { 500 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 501 } 502 503 static inline void rpc_restore_sigmask(sigset_t *oldset) 504 { 505 sigprocmask(SIG_SETMASK, oldset, NULL); 506 } 507 508 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 509 { 510 rpc_save_sigmask(oldset, clnt->cl_intr); 511 } 512 513 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 514 { 515 rpc_restore_sigmask(oldset); 516 } 517 518 static 519 struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt, 520 struct rpc_message *msg, 521 int flags, 522 const struct rpc_call_ops *ops, 523 void *data) 524 { 525 struct rpc_task *task, *ret; 526 sigset_t oldset; 527 528 task = rpc_new_task(clnt, flags, ops, data); 529 if (task == NULL) { 530 rpc_release_calldata(ops, data); 531 return ERR_PTR(-ENOMEM); 532 } 533 534 /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */ 535 rpc_task_sigmask(task, &oldset); 536 if (msg != NULL) { 537 rpc_call_setup(task, msg, 0); 538 if (task->tk_status != 0) { 539 ret = ERR_PTR(task->tk_status); 540 rpc_put_task(task); 541 goto out; 542 } 543 } 544 atomic_inc(&task->tk_count); 545 rpc_execute(task); 546 ret = task; 547 out: 548 rpc_restore_sigmask(&oldset); 549 return ret; 550 } 551 552 /** 553 * rpc_call_sync - Perform a synchronous RPC call 554 * @clnt: pointer to RPC client 555 * @msg: RPC call parameters 556 * @flags: RPC call flags 557 */ 558 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 559 { 560 struct rpc_task *task; 561 int status; 562 563 BUG_ON(flags & RPC_TASK_ASYNC); 564 565 task = rpc_do_run_task(clnt, msg, flags, &rpc_default_ops, NULL); 566 if (IS_ERR(task)) 567 return PTR_ERR(task); 568 status = task->tk_status; 569 rpc_put_task(task); 570 return status; 571 } 572 573 /** 574 * rpc_call_async - Perform an asynchronous RPC call 575 * @clnt: pointer to RPC client 576 * @msg: RPC call parameters 577 * @flags: RPC call flags 578 * @ops: RPC call ops 579 * @data: user call data 580 */ 581 int 582 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 583 const struct rpc_call_ops *tk_ops, void *data) 584 { 585 struct rpc_task *task; 586 587 task = rpc_do_run_task(clnt, msg, flags|RPC_TASK_ASYNC, tk_ops, data); 588 if (IS_ERR(task)) 589 return PTR_ERR(task); 590 rpc_put_task(task); 591 return 0; 592 } 593 594 /** 595 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 596 * @clnt: pointer to RPC client 597 * @flags: RPC flags 598 * @ops: RPC call ops 599 * @data: user call data 600 */ 601 struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, 602 const struct rpc_call_ops *tk_ops, 603 void *data) 604 { 605 return rpc_do_run_task(clnt, NULL, flags, tk_ops, data); 606 } 607 EXPORT_SYMBOL(rpc_run_task); 608 609 void 610 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) 611 { 612 task->tk_msg = *msg; 613 task->tk_flags |= flags; 614 /* Bind the user cred */ 615 if (task->tk_msg.rpc_cred != NULL) 616 rpcauth_holdcred(task); 617 else 618 rpcauth_bindcred(task); 619 620 if (task->tk_status == 0) 621 task->tk_action = call_start; 622 else 623 task->tk_action = rpc_exit_task; 624 } 625 626 /** 627 * rpc_peeraddr - extract remote peer address from clnt's xprt 628 * @clnt: RPC client structure 629 * @buf: target buffer 630 * @size: length of target buffer 631 * 632 * Returns the number of bytes that are actually in the stored address. 633 */ 634 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 635 { 636 size_t bytes; 637 struct rpc_xprt *xprt = clnt->cl_xprt; 638 639 bytes = sizeof(xprt->addr); 640 if (bytes > bufsize) 641 bytes = bufsize; 642 memcpy(buf, &clnt->cl_xprt->addr, bytes); 643 return xprt->addrlen; 644 } 645 EXPORT_SYMBOL_GPL(rpc_peeraddr); 646 647 /** 648 * rpc_peeraddr2str - return remote peer address in printable format 649 * @clnt: RPC client structure 650 * @format: address format 651 * 652 */ 653 char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) 654 { 655 struct rpc_xprt *xprt = clnt->cl_xprt; 656 657 if (xprt->address_strings[format] != NULL) 658 return xprt->address_strings[format]; 659 else 660 return "unprintable"; 661 } 662 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 663 664 void 665 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 666 { 667 struct rpc_xprt *xprt = clnt->cl_xprt; 668 if (xprt->ops->set_buffer_size) 669 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 670 } 671 672 /* 673 * Return size of largest payload RPC client can support, in bytes 674 * 675 * For stream transports, this is one RPC record fragment (see RFC 676 * 1831), as we don't support multi-record requests yet. For datagram 677 * transports, this is the size of an IP packet minus the IP, UDP, and 678 * RPC header sizes. 679 */ 680 size_t rpc_max_payload(struct rpc_clnt *clnt) 681 { 682 return clnt->cl_xprt->max_payload; 683 } 684 EXPORT_SYMBOL_GPL(rpc_max_payload); 685 686 /** 687 * rpc_force_rebind - force transport to check that remote port is unchanged 688 * @clnt: client to rebind 689 * 690 */ 691 void rpc_force_rebind(struct rpc_clnt *clnt) 692 { 693 if (clnt->cl_autobind) 694 xprt_clear_bound(clnt->cl_xprt); 695 } 696 EXPORT_SYMBOL_GPL(rpc_force_rebind); 697 698 /* 699 * Restart an (async) RPC call. Usually called from within the 700 * exit handler. 701 */ 702 void 703 rpc_restart_call(struct rpc_task *task) 704 { 705 if (RPC_ASSASSINATED(task)) 706 return; 707 708 task->tk_action = call_start; 709 } 710 711 /* 712 * 0. Initial state 713 * 714 * Other FSM states can be visited zero or more times, but 715 * this state is visited exactly once for each RPC. 716 */ 717 static void 718 call_start(struct rpc_task *task) 719 { 720 struct rpc_clnt *clnt = task->tk_client; 721 722 dprintk("RPC: %5u call_start %s%d proc %d (%s)\n", task->tk_pid, 723 clnt->cl_protname, clnt->cl_vers, 724 task->tk_msg.rpc_proc->p_proc, 725 (RPC_IS_ASYNC(task) ? "async" : "sync")); 726 727 /* Increment call count */ 728 task->tk_msg.rpc_proc->p_count++; 729 clnt->cl_stats->rpccnt++; 730 task->tk_action = call_reserve; 731 } 732 733 /* 734 * 1. Reserve an RPC call slot 735 */ 736 static void 737 call_reserve(struct rpc_task *task) 738 { 739 dprint_status(task); 740 741 if (!rpcauth_uptodatecred(task)) { 742 task->tk_action = call_refresh; 743 return; 744 } 745 746 task->tk_status = 0; 747 task->tk_action = call_reserveresult; 748 xprt_reserve(task); 749 } 750 751 /* 752 * 1b. Grok the result of xprt_reserve() 753 */ 754 static void 755 call_reserveresult(struct rpc_task *task) 756 { 757 int status = task->tk_status; 758 759 dprint_status(task); 760 761 /* 762 * After a call to xprt_reserve(), we must have either 763 * a request slot or else an error status. 764 */ 765 task->tk_status = 0; 766 if (status >= 0) { 767 if (task->tk_rqstp) { 768 task->tk_action = call_allocate; 769 return; 770 } 771 772 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 773 __FUNCTION__, status); 774 rpc_exit(task, -EIO); 775 return; 776 } 777 778 /* 779 * Even though there was an error, we may have acquired 780 * a request slot somehow. Make sure not to leak it. 781 */ 782 if (task->tk_rqstp) { 783 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 784 __FUNCTION__, status); 785 xprt_release(task); 786 } 787 788 switch (status) { 789 case -EAGAIN: /* woken up; retry */ 790 task->tk_action = call_reserve; 791 return; 792 case -EIO: /* probably a shutdown */ 793 break; 794 default: 795 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 796 __FUNCTION__, status); 797 break; 798 } 799 rpc_exit(task, status); 800 } 801 802 /* 803 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 804 * (Note: buffer memory is freed in xprt_release). 805 */ 806 static void 807 call_allocate(struct rpc_task *task) 808 { 809 unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; 810 struct rpc_rqst *req = task->tk_rqstp; 811 struct rpc_xprt *xprt = task->tk_xprt; 812 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 813 814 dprint_status(task); 815 816 task->tk_status = 0; 817 task->tk_action = call_bind; 818 819 if (req->rq_buffer) 820 return; 821 822 if (proc->p_proc != 0) { 823 BUG_ON(proc->p_arglen == 0); 824 if (proc->p_decode != NULL) 825 BUG_ON(proc->p_replen == 0); 826 } 827 828 /* 829 * Calculate the size (in quads) of the RPC call 830 * and reply headers, and convert both values 831 * to byte sizes. 832 */ 833 req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; 834 req->rq_callsize <<= 2; 835 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; 836 req->rq_rcvsize <<= 2; 837 838 req->rq_buffer = xprt->ops->buf_alloc(task, 839 req->rq_callsize + req->rq_rcvsize); 840 if (req->rq_buffer != NULL) 841 return; 842 843 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 844 845 if (RPC_IS_ASYNC(task) || !signalled()) { 846 xprt_release(task); 847 task->tk_action = call_reserve; 848 rpc_delay(task, HZ>>4); 849 return; 850 } 851 852 rpc_exit(task, -ERESTARTSYS); 853 } 854 855 static inline int 856 rpc_task_need_encode(struct rpc_task *task) 857 { 858 return task->tk_rqstp->rq_snd_buf.len == 0; 859 } 860 861 static inline void 862 rpc_task_force_reencode(struct rpc_task *task) 863 { 864 task->tk_rqstp->rq_snd_buf.len = 0; 865 } 866 867 static inline void 868 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) 869 { 870 buf->head[0].iov_base = start; 871 buf->head[0].iov_len = len; 872 buf->tail[0].iov_len = 0; 873 buf->page_len = 0; 874 buf->len = 0; 875 buf->buflen = len; 876 } 877 878 /* 879 * 3. Encode arguments of an RPC call 880 */ 881 static void 882 call_encode(struct rpc_task *task) 883 { 884 struct rpc_rqst *req = task->tk_rqstp; 885 kxdrproc_t encode; 886 __be32 *p; 887 888 dprint_status(task); 889 890 rpc_xdr_buf_init(&req->rq_snd_buf, 891 req->rq_buffer, 892 req->rq_callsize); 893 rpc_xdr_buf_init(&req->rq_rcv_buf, 894 (char *)req->rq_buffer + req->rq_callsize, 895 req->rq_rcvsize); 896 897 /* Encode header and provided arguments */ 898 encode = task->tk_msg.rpc_proc->p_encode; 899 if (!(p = call_header(task))) { 900 printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); 901 rpc_exit(task, -EIO); 902 return; 903 } 904 if (encode == NULL) 905 return; 906 907 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 908 task->tk_msg.rpc_argp); 909 if (task->tk_status == -ENOMEM) { 910 /* XXX: Is this sane? */ 911 rpc_delay(task, 3*HZ); 912 task->tk_status = -EAGAIN; 913 } 914 } 915 916 /* 917 * 4. Get the server port number if not yet set 918 */ 919 static void 920 call_bind(struct rpc_task *task) 921 { 922 struct rpc_xprt *xprt = task->tk_xprt; 923 924 dprint_status(task); 925 926 task->tk_action = call_connect; 927 if (!xprt_bound(xprt)) { 928 task->tk_action = call_bind_status; 929 task->tk_timeout = xprt->bind_timeout; 930 xprt->ops->rpcbind(task); 931 } 932 } 933 934 /* 935 * 4a. Sort out bind result 936 */ 937 static void 938 call_bind_status(struct rpc_task *task) 939 { 940 int status = -EACCES; 941 942 if (task->tk_status >= 0) { 943 dprint_status(task); 944 task->tk_status = 0; 945 task->tk_action = call_connect; 946 return; 947 } 948 949 switch (task->tk_status) { 950 case -EACCES: 951 dprintk("RPC: %5u remote rpcbind: RPC program/version " 952 "unavailable\n", task->tk_pid); 953 rpc_delay(task, 3*HZ); 954 goto retry_timeout; 955 case -ETIMEDOUT: 956 dprintk("RPC: %5u rpcbind request timed out\n", 957 task->tk_pid); 958 goto retry_timeout; 959 case -EPFNOSUPPORT: 960 dprintk("RPC: %5u remote rpcbind service unavailable\n", 961 task->tk_pid); 962 break; 963 case -EPROTONOSUPPORT: 964 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", 965 task->tk_pid); 966 task->tk_status = 0; 967 task->tk_action = call_bind; 968 return; 969 default: 970 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", 971 task->tk_pid, -task->tk_status); 972 status = -EIO; 973 } 974 975 rpc_exit(task, status); 976 return; 977 978 retry_timeout: 979 task->tk_action = call_timeout; 980 } 981 982 /* 983 * 4b. Connect to the RPC server 984 */ 985 static void 986 call_connect(struct rpc_task *task) 987 { 988 struct rpc_xprt *xprt = task->tk_xprt; 989 990 dprintk("RPC: %5u call_connect xprt %p %s connected\n", 991 task->tk_pid, xprt, 992 (xprt_connected(xprt) ? "is" : "is not")); 993 994 task->tk_action = call_transmit; 995 if (!xprt_connected(xprt)) { 996 task->tk_action = call_connect_status; 997 if (task->tk_status < 0) 998 return; 999 xprt_connect(task); 1000 } 1001 } 1002 1003 /* 1004 * 4c. Sort out connect result 1005 */ 1006 static void 1007 call_connect_status(struct rpc_task *task) 1008 { 1009 struct rpc_clnt *clnt = task->tk_client; 1010 int status = task->tk_status; 1011 1012 dprint_status(task); 1013 1014 task->tk_status = 0; 1015 if (status >= 0) { 1016 clnt->cl_stats->netreconn++; 1017 task->tk_action = call_transmit; 1018 return; 1019 } 1020 1021 /* Something failed: remote service port may have changed */ 1022 rpc_force_rebind(clnt); 1023 1024 switch (status) { 1025 case -ENOTCONN: 1026 case -EAGAIN: 1027 task->tk_action = call_bind; 1028 if (!RPC_IS_SOFT(task)) 1029 return; 1030 /* if soft mounted, test if we've timed out */ 1031 case -ETIMEDOUT: 1032 task->tk_action = call_timeout; 1033 return; 1034 } 1035 rpc_exit(task, -EIO); 1036 } 1037 1038 /* 1039 * 5. Transmit the RPC request, and wait for reply 1040 */ 1041 static void 1042 call_transmit(struct rpc_task *task) 1043 { 1044 dprint_status(task); 1045 1046 task->tk_action = call_status; 1047 if (task->tk_status < 0) 1048 return; 1049 task->tk_status = xprt_prepare_transmit(task); 1050 if (task->tk_status != 0) 1051 return; 1052 task->tk_action = call_transmit_status; 1053 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1054 if (rpc_task_need_encode(task)) { 1055 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); 1056 call_encode(task); 1057 /* Did the encode result in an error condition? */ 1058 if (task->tk_status != 0) 1059 return; 1060 } 1061 xprt_transmit(task); 1062 if (task->tk_status < 0) 1063 return; 1064 /* 1065 * On success, ensure that we call xprt_end_transmit() before sleeping 1066 * in order to allow access to the socket to other RPC requests. 1067 */ 1068 call_transmit_status(task); 1069 if (task->tk_msg.rpc_proc->p_decode != NULL) 1070 return; 1071 task->tk_action = rpc_exit_task; 1072 rpc_wake_up_task(task); 1073 } 1074 1075 /* 1076 * 5a. Handle cleanup after a transmission 1077 */ 1078 static void 1079 call_transmit_status(struct rpc_task *task) 1080 { 1081 task->tk_action = call_status; 1082 /* 1083 * Special case: if we've been waiting on the socket's write_space() 1084 * callback, then don't call xprt_end_transmit(). 1085 */ 1086 if (task->tk_status == -EAGAIN) 1087 return; 1088 xprt_end_transmit(task); 1089 rpc_task_force_reencode(task); 1090 } 1091 1092 /* 1093 * 6. Sort out the RPC call status 1094 */ 1095 static void 1096 call_status(struct rpc_task *task) 1097 { 1098 struct rpc_clnt *clnt = task->tk_client; 1099 struct rpc_rqst *req = task->tk_rqstp; 1100 int status; 1101 1102 if (req->rq_received > 0 && !req->rq_bytes_sent) 1103 task->tk_status = req->rq_received; 1104 1105 dprint_status(task); 1106 1107 status = task->tk_status; 1108 if (status >= 0) { 1109 task->tk_action = call_decode; 1110 return; 1111 } 1112 1113 task->tk_status = 0; 1114 switch(status) { 1115 case -EHOSTDOWN: 1116 case -EHOSTUNREACH: 1117 case -ENETUNREACH: 1118 /* 1119 * Delay any retries for 3 seconds, then handle as if it 1120 * were a timeout. 1121 */ 1122 rpc_delay(task, 3*HZ); 1123 case -ETIMEDOUT: 1124 task->tk_action = call_timeout; 1125 if (task->tk_client->cl_discrtry) 1126 xprt_disconnect(task->tk_xprt); 1127 break; 1128 case -ECONNREFUSED: 1129 case -ENOTCONN: 1130 rpc_force_rebind(clnt); 1131 task->tk_action = call_bind; 1132 break; 1133 case -EAGAIN: 1134 task->tk_action = call_transmit; 1135 break; 1136 case -EIO: 1137 /* shutdown or soft timeout */ 1138 rpc_exit(task, status); 1139 break; 1140 default: 1141 printk("%s: RPC call returned error %d\n", 1142 clnt->cl_protname, -status); 1143 rpc_exit(task, status); 1144 } 1145 } 1146 1147 /* 1148 * 6a. Handle RPC timeout 1149 * We do not release the request slot, so we keep using the 1150 * same XID for all retransmits. 1151 */ 1152 static void 1153 call_timeout(struct rpc_task *task) 1154 { 1155 struct rpc_clnt *clnt = task->tk_client; 1156 1157 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 1158 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); 1159 goto retry; 1160 } 1161 1162 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); 1163 task->tk_timeouts++; 1164 1165 if (RPC_IS_SOFT(task)) { 1166 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1167 clnt->cl_protname, clnt->cl_server); 1168 rpc_exit(task, -EIO); 1169 return; 1170 } 1171 1172 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1173 task->tk_flags |= RPC_CALL_MAJORSEEN; 1174 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1175 clnt->cl_protname, clnt->cl_server); 1176 } 1177 rpc_force_rebind(clnt); 1178 1179 retry: 1180 clnt->cl_stats->rpcretrans++; 1181 task->tk_action = call_bind; 1182 task->tk_status = 0; 1183 } 1184 1185 /* 1186 * 7. Decode the RPC reply 1187 */ 1188 static void 1189 call_decode(struct rpc_task *task) 1190 { 1191 struct rpc_clnt *clnt = task->tk_client; 1192 struct rpc_rqst *req = task->tk_rqstp; 1193 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1194 __be32 *p; 1195 1196 dprintk("RPC: %5u call_decode (status %d)\n", 1197 task->tk_pid, task->tk_status); 1198 1199 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1200 printk(KERN_NOTICE "%s: server %s OK\n", 1201 clnt->cl_protname, clnt->cl_server); 1202 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1203 } 1204 1205 if (task->tk_status < 12) { 1206 if (!RPC_IS_SOFT(task)) { 1207 task->tk_action = call_bind; 1208 clnt->cl_stats->rpcretrans++; 1209 goto out_retry; 1210 } 1211 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", 1212 clnt->cl_protname, task->tk_status); 1213 task->tk_action = call_timeout; 1214 goto out_retry; 1215 } 1216 1217 /* 1218 * Ensure that we see all writes made by xprt_complete_rqst() 1219 * before it changed req->rq_received. 1220 */ 1221 smp_rmb(); 1222 req->rq_rcv_buf.len = req->rq_private_buf.len; 1223 1224 /* Check that the softirq receive buffer is valid */ 1225 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1226 sizeof(req->rq_rcv_buf)) != 0); 1227 1228 /* Verify the RPC header */ 1229 p = call_verify(task); 1230 if (IS_ERR(p)) { 1231 if (p == ERR_PTR(-EAGAIN)) 1232 goto out_retry; 1233 return; 1234 } 1235 1236 task->tk_action = rpc_exit_task; 1237 1238 if (decode) { 1239 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1240 task->tk_msg.rpc_resp); 1241 } 1242 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, 1243 task->tk_status); 1244 return; 1245 out_retry: 1246 req->rq_received = req->rq_private_buf.len = 0; 1247 task->tk_status = 0; 1248 if (task->tk_client->cl_discrtry) 1249 xprt_disconnect(task->tk_xprt); 1250 } 1251 1252 /* 1253 * 8. Refresh the credentials if rejected by the server 1254 */ 1255 static void 1256 call_refresh(struct rpc_task *task) 1257 { 1258 dprint_status(task); 1259 1260 xprt_release(task); /* Must do to obtain new XID */ 1261 task->tk_action = call_refreshresult; 1262 task->tk_status = 0; 1263 task->tk_client->cl_stats->rpcauthrefresh++; 1264 rpcauth_refreshcred(task); 1265 } 1266 1267 /* 1268 * 8a. Process the results of a credential refresh 1269 */ 1270 static void 1271 call_refreshresult(struct rpc_task *task) 1272 { 1273 int status = task->tk_status; 1274 1275 dprint_status(task); 1276 1277 task->tk_status = 0; 1278 task->tk_action = call_reserve; 1279 if (status >= 0 && rpcauth_uptodatecred(task)) 1280 return; 1281 if (status == -EACCES) { 1282 rpc_exit(task, -EACCES); 1283 return; 1284 } 1285 task->tk_action = call_refresh; 1286 if (status != -ETIMEDOUT) 1287 rpc_delay(task, 3*HZ); 1288 return; 1289 } 1290 1291 /* 1292 * Call header serialization 1293 */ 1294 static __be32 * 1295 call_header(struct rpc_task *task) 1296 { 1297 struct rpc_clnt *clnt = task->tk_client; 1298 struct rpc_rqst *req = task->tk_rqstp; 1299 __be32 *p = req->rq_svec[0].iov_base; 1300 1301 /* FIXME: check buffer size? */ 1302 1303 p = xprt_skip_transport_header(task->tk_xprt, p); 1304 *p++ = req->rq_xid; /* XID */ 1305 *p++ = htonl(RPC_CALL); /* CALL */ 1306 *p++ = htonl(RPC_VERSION); /* RPC version */ 1307 *p++ = htonl(clnt->cl_prog); /* program number */ 1308 *p++ = htonl(clnt->cl_vers); /* program version */ 1309 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1310 p = rpcauth_marshcred(task, p); 1311 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1312 return p; 1313 } 1314 1315 /* 1316 * Reply header verification 1317 */ 1318 static __be32 * 1319 call_verify(struct rpc_task *task) 1320 { 1321 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1322 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1323 __be32 *p = iov->iov_base; 1324 u32 n; 1325 int error = -EACCES; 1326 1327 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { 1328 /* RFC-1014 says that the representation of XDR data must be a 1329 * multiple of four bytes 1330 * - if it isn't pointer subtraction in the NFS client may give 1331 * undefined results 1332 */ 1333 dprintk("RPC: %5u %s: XDR representation not a multiple of" 1334 " 4 bytes: 0x%x\n", task->tk_pid, __FUNCTION__, 1335 task->tk_rqstp->rq_rcv_buf.len); 1336 goto out_eio; 1337 } 1338 if ((len -= 3) < 0) 1339 goto out_overflow; 1340 p += 1; /* skip XID */ 1341 1342 if ((n = ntohl(*p++)) != RPC_REPLY) { 1343 dprintk("RPC: %5u %s: not an RPC reply: %x\n", 1344 task->tk_pid, __FUNCTION__, n); 1345 goto out_garbage; 1346 } 1347 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1348 if (--len < 0) 1349 goto out_overflow; 1350 switch ((n = ntohl(*p++))) { 1351 case RPC_AUTH_ERROR: 1352 break; 1353 case RPC_MISMATCH: 1354 dprintk("RPC: %5u %s: RPC call version " 1355 "mismatch!\n", 1356 task->tk_pid, __FUNCTION__); 1357 error = -EPROTONOSUPPORT; 1358 goto out_err; 1359 default: 1360 dprintk("RPC: %5u %s: RPC call rejected, " 1361 "unknown error: %x\n", 1362 task->tk_pid, __FUNCTION__, n); 1363 goto out_eio; 1364 } 1365 if (--len < 0) 1366 goto out_overflow; 1367 switch ((n = ntohl(*p++))) { 1368 case RPC_AUTH_REJECTEDCRED: 1369 case RPC_AUTH_REJECTEDVERF: 1370 case RPCSEC_GSS_CREDPROBLEM: 1371 case RPCSEC_GSS_CTXPROBLEM: 1372 if (!task->tk_cred_retry) 1373 break; 1374 task->tk_cred_retry--; 1375 dprintk("RPC: %5u %s: retry stale creds\n", 1376 task->tk_pid, __FUNCTION__); 1377 rpcauth_invalcred(task); 1378 task->tk_action = call_refresh; 1379 goto out_retry; 1380 case RPC_AUTH_BADCRED: 1381 case RPC_AUTH_BADVERF: 1382 /* possibly garbled cred/verf? */ 1383 if (!task->tk_garb_retry) 1384 break; 1385 task->tk_garb_retry--; 1386 dprintk("RPC: %5u %s: retry garbled creds\n", 1387 task->tk_pid, __FUNCTION__); 1388 task->tk_action = call_bind; 1389 goto out_retry; 1390 case RPC_AUTH_TOOWEAK: 1391 printk(KERN_NOTICE "call_verify: server %s requires stronger " 1392 "authentication.\n", task->tk_client->cl_server); 1393 break; 1394 default: 1395 dprintk("RPC: %5u %s: unknown auth error: %x\n", 1396 task->tk_pid, __FUNCTION__, n); 1397 error = -EIO; 1398 } 1399 dprintk("RPC: %5u %s: call rejected %d\n", 1400 task->tk_pid, __FUNCTION__, n); 1401 goto out_err; 1402 } 1403 if (!(p = rpcauth_checkverf(task, p))) { 1404 dprintk("RPC: %5u %s: auth check failed\n", 1405 task->tk_pid, __FUNCTION__); 1406 goto out_garbage; /* bad verifier, retry */ 1407 } 1408 len = p - (__be32 *)iov->iov_base - 1; 1409 if (len < 0) 1410 goto out_overflow; 1411 switch ((n = ntohl(*p++))) { 1412 case RPC_SUCCESS: 1413 return p; 1414 case RPC_PROG_UNAVAIL: 1415 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", 1416 task->tk_pid, __FUNCTION__, 1417 (unsigned int)task->tk_client->cl_prog, 1418 task->tk_client->cl_server); 1419 error = -EPFNOSUPPORT; 1420 goto out_err; 1421 case RPC_PROG_MISMATCH: 1422 dprintk("RPC: %5u %s: program %u, version %u unsupported by " 1423 "server %s\n", task->tk_pid, __FUNCTION__, 1424 (unsigned int)task->tk_client->cl_prog, 1425 (unsigned int)task->tk_client->cl_vers, 1426 task->tk_client->cl_server); 1427 error = -EPROTONOSUPPORT; 1428 goto out_err; 1429 case RPC_PROC_UNAVAIL: 1430 dprintk("RPC: %5u %s: proc %p unsupported by program %u, " 1431 "version %u on server %s\n", 1432 task->tk_pid, __FUNCTION__, 1433 task->tk_msg.rpc_proc, 1434 task->tk_client->cl_prog, 1435 task->tk_client->cl_vers, 1436 task->tk_client->cl_server); 1437 error = -EOPNOTSUPP; 1438 goto out_err; 1439 case RPC_GARBAGE_ARGS: 1440 dprintk("RPC: %5u %s: server saw garbage\n", 1441 task->tk_pid, __FUNCTION__); 1442 break; /* retry */ 1443 default: 1444 dprintk("RPC: %5u %s: server accept status: %x\n", 1445 task->tk_pid, __FUNCTION__, n); 1446 /* Also retry */ 1447 } 1448 1449 out_garbage: 1450 task->tk_client->cl_stats->rpcgarbage++; 1451 if (task->tk_garb_retry) { 1452 task->tk_garb_retry--; 1453 dprintk("RPC: %5u %s: retrying\n", 1454 task->tk_pid, __FUNCTION__); 1455 task->tk_action = call_bind; 1456 out_retry: 1457 return ERR_PTR(-EAGAIN); 1458 } 1459 out_eio: 1460 error = -EIO; 1461 out_err: 1462 rpc_exit(task, error); 1463 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, 1464 __FUNCTION__, error); 1465 return ERR_PTR(error); 1466 out_overflow: 1467 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, 1468 __FUNCTION__); 1469 goto out_garbage; 1470 } 1471 1472 static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) 1473 { 1474 return 0; 1475 } 1476 1477 static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) 1478 { 1479 return 0; 1480 } 1481 1482 static struct rpc_procinfo rpcproc_null = { 1483 .p_encode = rpcproc_encode_null, 1484 .p_decode = rpcproc_decode_null, 1485 }; 1486 1487 static int rpc_ping(struct rpc_clnt *clnt, int flags) 1488 { 1489 struct rpc_message msg = { 1490 .rpc_proc = &rpcproc_null, 1491 }; 1492 int err; 1493 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1494 err = rpc_call_sync(clnt, &msg, flags); 1495 put_rpccred(msg.rpc_cred); 1496 return err; 1497 } 1498 1499 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 1500 { 1501 struct rpc_message msg = { 1502 .rpc_proc = &rpcproc_null, 1503 .rpc_cred = cred, 1504 }; 1505 return rpc_do_run_task(clnt, &msg, flags, &rpc_default_ops, NULL); 1506 } 1507 EXPORT_SYMBOL(rpc_call_null); 1508 1509 #ifdef RPC_DEBUG 1510 void rpc_show_tasks(void) 1511 { 1512 struct rpc_clnt *clnt; 1513 struct rpc_task *t; 1514 1515 spin_lock(&rpc_client_lock); 1516 if (list_empty(&all_clients)) 1517 goto out; 1518 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " 1519 "-rpcwait -action- ---ops--\n"); 1520 list_for_each_entry(clnt, &all_clients, cl_clients) { 1521 if (list_empty(&clnt->cl_tasks)) 1522 continue; 1523 spin_lock(&clnt->cl_lock); 1524 list_for_each_entry(t, &clnt->cl_tasks, tk_task) { 1525 const char *rpc_waitq = "none"; 1526 1527 if (RPC_IS_QUEUED(t)) 1528 rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); 1529 1530 printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", 1531 t->tk_pid, 1532 (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), 1533 t->tk_flags, t->tk_status, 1534 t->tk_client, 1535 (t->tk_client ? t->tk_client->cl_prog : 0), 1536 t->tk_rqstp, t->tk_timeout, 1537 rpc_waitq, 1538 t->tk_action, t->tk_ops); 1539 } 1540 spin_unlock(&clnt->cl_lock); 1541 } 1542 out: 1543 spin_unlock(&rpc_client_lock); 1544 } 1545 #endif 1546