1 /* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <asm/system.h> 25 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/slab.h> 30 #include <linux/utsname.h> 31 32 #include <linux/sunrpc/clnt.h> 33 #include <linux/workqueue.h> 34 #include <linux/sunrpc/rpc_pipe_fs.h> 35 36 #include <linux/nfs.h> 37 38 39 #define RPC_SLACK_SPACE (1024) /* total overkill */ 40 41 #ifdef RPC_DEBUG 42 # define RPCDBG_FACILITY RPCDBG_CALL 43 #endif 44 45 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 46 47 48 static void call_start(struct rpc_task *task); 49 static void call_reserve(struct rpc_task *task); 50 static void call_reserveresult(struct rpc_task *task); 51 static void call_allocate(struct rpc_task *task); 52 static void call_encode(struct rpc_task *task); 53 static void call_decode(struct rpc_task *task); 54 static void call_bind(struct rpc_task *task); 55 static void call_bind_status(struct rpc_task *task); 56 static void call_transmit(struct rpc_task *task); 57 static void call_status(struct rpc_task *task); 58 static void call_transmit_status(struct rpc_task *task); 59 static void call_refresh(struct rpc_task *task); 60 static void call_refreshresult(struct rpc_task *task); 61 static void call_timeout(struct rpc_task *task); 62 static void call_connect(struct rpc_task *task); 63 static void call_connect_status(struct rpc_task *task); 64 static u32 * call_header(struct rpc_task *task); 65 static u32 * call_verify(struct rpc_task *task); 66 67 68 static int 69 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 70 { 71 static uint32_t clntid; 72 int error; 73 74 if (dir_name == NULL) 75 return 0; 76 for (;;) { 77 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 78 "%s/clnt%x", dir_name, 79 (unsigned int)clntid++); 80 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 81 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 82 if (!IS_ERR(clnt->cl_dentry)) 83 return 0; 84 error = PTR_ERR(clnt->cl_dentry); 85 if (error != -EEXIST) { 86 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 87 clnt->cl_pathname, error); 88 return error; 89 } 90 } 91 } 92 93 /* 94 * Create an RPC client 95 * FIXME: This should also take a flags argument (as in task->tk_flags). 96 * It's called (among others) from pmap_create_client, which may in 97 * turn be called by an async task. In this case, rpciod should not be 98 * made to sleep too long. 99 */ 100 struct rpc_clnt * 101 rpc_new_client(struct rpc_xprt *xprt, char *servname, 102 struct rpc_program *program, u32 vers, 103 rpc_authflavor_t flavor) 104 { 105 struct rpc_version *version; 106 struct rpc_clnt *clnt = NULL; 107 struct rpc_auth *auth; 108 int err; 109 int len; 110 111 dprintk("RPC: creating %s client for %s (xprt %p)\n", 112 program->name, servname, xprt); 113 114 err = -EINVAL; 115 if (!xprt) 116 goto out_err; 117 if (vers >= program->nrvers || !(version = program->version[vers])) 118 goto out_err; 119 120 err = -ENOMEM; 121 clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL); 122 if (!clnt) 123 goto out_err; 124 memset(clnt, 0, sizeof(*clnt)); 125 atomic_set(&clnt->cl_users, 0); 126 atomic_set(&clnt->cl_count, 1); 127 clnt->cl_parent = clnt; 128 129 clnt->cl_server = clnt->cl_inline_name; 130 len = strlen(servname) + 1; 131 if (len > sizeof(clnt->cl_inline_name)) { 132 char *buf = kmalloc(len, GFP_KERNEL); 133 if (buf != 0) 134 clnt->cl_server = buf; 135 else 136 len = sizeof(clnt->cl_inline_name); 137 } 138 strlcpy(clnt->cl_server, servname, len); 139 140 clnt->cl_xprt = xprt; 141 clnt->cl_procinfo = version->procs; 142 clnt->cl_maxproc = version->nrprocs; 143 clnt->cl_protname = program->name; 144 clnt->cl_pmap = &clnt->cl_pmap_default; 145 clnt->cl_port = xprt->addr.sin_port; 146 clnt->cl_prog = program->number; 147 clnt->cl_vers = version->number; 148 clnt->cl_prot = xprt->prot; 149 clnt->cl_stats = program->stats; 150 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); 151 152 if (!clnt->cl_port) 153 clnt->cl_autobind = 1; 154 155 clnt->cl_rtt = &clnt->cl_rtt_default; 156 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 157 158 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 159 if (err < 0) 160 goto out_no_path; 161 162 auth = rpcauth_create(flavor, clnt); 163 if (IS_ERR(auth)) { 164 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 165 flavor); 166 err = PTR_ERR(auth); 167 goto out_no_auth; 168 } 169 170 /* save the nodename */ 171 clnt->cl_nodelen = strlen(system_utsname.nodename); 172 if (clnt->cl_nodelen > UNX_MAXNODENAME) 173 clnt->cl_nodelen = UNX_MAXNODENAME; 174 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); 175 return clnt; 176 177 out_no_auth: 178 rpc_rmdir(clnt->cl_pathname); 179 out_no_path: 180 if (clnt->cl_server != clnt->cl_inline_name) 181 kfree(clnt->cl_server); 182 kfree(clnt); 183 out_err: 184 xprt_destroy(xprt); 185 return ERR_PTR(err); 186 } 187 188 /** 189 * Create an RPC client 190 * @xprt - pointer to xprt struct 191 * @servname - name of server 192 * @info - rpc_program 193 * @version - rpc_program version 194 * @authflavor - rpc_auth flavour to use 195 * 196 * Creates an RPC client structure, then pings the server in order to 197 * determine if it is up, and if it supports this program and version. 198 * 199 * This function should never be called by asynchronous tasks such as 200 * the portmapper. 201 */ 202 struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 203 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) 204 { 205 struct rpc_clnt *clnt; 206 int err; 207 208 clnt = rpc_new_client(xprt, servname, info, version, authflavor); 209 if (IS_ERR(clnt)) 210 return clnt; 211 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 212 if (err == 0) 213 return clnt; 214 rpc_shutdown_client(clnt); 215 return ERR_PTR(err); 216 } 217 218 /* 219 * This function clones the RPC client structure. It allows us to share the 220 * same transport while varying parameters such as the authentication 221 * flavour. 222 */ 223 struct rpc_clnt * 224 rpc_clone_client(struct rpc_clnt *clnt) 225 { 226 struct rpc_clnt *new; 227 228 new = (struct rpc_clnt *)kmalloc(sizeof(*new), GFP_KERNEL); 229 if (!new) 230 goto out_no_clnt; 231 memcpy(new, clnt, sizeof(*new)); 232 atomic_set(&new->cl_count, 1); 233 atomic_set(&new->cl_users, 0); 234 new->cl_parent = clnt; 235 atomic_inc(&clnt->cl_count); 236 /* Duplicate portmapper */ 237 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 238 /* Turn off autobind on clones */ 239 new->cl_autobind = 0; 240 new->cl_oneshot = 0; 241 new->cl_dead = 0; 242 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 243 if (new->cl_auth) 244 atomic_inc(&new->cl_auth->au_count); 245 new->cl_pmap = &new->cl_pmap_default; 246 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 247 return new; 248 out_no_clnt: 249 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 250 return ERR_PTR(-ENOMEM); 251 } 252 253 /* 254 * Properly shut down an RPC client, terminating all outstanding 255 * requests. Note that we must be certain that cl_oneshot and 256 * cl_dead are cleared, or else the client would be destroyed 257 * when the last task releases it. 258 */ 259 int 260 rpc_shutdown_client(struct rpc_clnt *clnt) 261 { 262 dprintk("RPC: shutting down %s client for %s, tasks=%d\n", 263 clnt->cl_protname, clnt->cl_server, 264 atomic_read(&clnt->cl_users)); 265 266 while (atomic_read(&clnt->cl_users) > 0) { 267 /* Don't let rpc_release_client destroy us */ 268 clnt->cl_oneshot = 0; 269 clnt->cl_dead = 0; 270 rpc_killall_tasks(clnt); 271 wait_event_timeout(destroy_wait, 272 atomic_read(&clnt->cl_users) > 0, 1*HZ); 273 } 274 275 if (atomic_read(&clnt->cl_users) < 0) { 276 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", 277 clnt, atomic_read(&clnt->cl_users)); 278 #ifdef RPC_DEBUG 279 rpc_show_tasks(); 280 #endif 281 BUG(); 282 } 283 284 return rpc_destroy_client(clnt); 285 } 286 287 /* 288 * Delete an RPC client 289 */ 290 int 291 rpc_destroy_client(struct rpc_clnt *clnt) 292 { 293 if (!atomic_dec_and_test(&clnt->cl_count)) 294 return 1; 295 BUG_ON(atomic_read(&clnt->cl_users) != 0); 296 297 dprintk("RPC: destroying %s client for %s\n", 298 clnt->cl_protname, clnt->cl_server); 299 if (clnt->cl_auth) { 300 rpcauth_destroy(clnt->cl_auth); 301 clnt->cl_auth = NULL; 302 } 303 if (clnt->cl_parent != clnt) { 304 rpc_destroy_client(clnt->cl_parent); 305 goto out_free; 306 } 307 if (clnt->cl_pathname[0]) 308 rpc_rmdir(clnt->cl_pathname); 309 if (clnt->cl_xprt) { 310 xprt_destroy(clnt->cl_xprt); 311 clnt->cl_xprt = NULL; 312 } 313 if (clnt->cl_server != clnt->cl_inline_name) 314 kfree(clnt->cl_server); 315 out_free: 316 kfree(clnt); 317 return 0; 318 } 319 320 /* 321 * Release an RPC client 322 */ 323 void 324 rpc_release_client(struct rpc_clnt *clnt) 325 { 326 dprintk("RPC: rpc_release_client(%p, %d)\n", 327 clnt, atomic_read(&clnt->cl_users)); 328 329 if (!atomic_dec_and_test(&clnt->cl_users)) 330 return; 331 wake_up(&destroy_wait); 332 if (clnt->cl_oneshot || clnt->cl_dead) 333 rpc_destroy_client(clnt); 334 } 335 336 /** 337 * rpc_bind_new_program - bind a new RPC program to an existing client 338 * @old - old rpc_client 339 * @program - rpc program to set 340 * @vers - rpc program version 341 * 342 * Clones the rpc client and sets up a new RPC program. This is mainly 343 * of use for enabling different RPC programs to share the same transport. 344 * The Sun NFSv2/v3 ACL protocol can do this. 345 */ 346 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 347 struct rpc_program *program, 348 int vers) 349 { 350 struct rpc_clnt *clnt; 351 struct rpc_version *version; 352 int err; 353 354 BUG_ON(vers >= program->nrvers || !program->version[vers]); 355 version = program->version[vers]; 356 clnt = rpc_clone_client(old); 357 if (IS_ERR(clnt)) 358 goto out; 359 clnt->cl_procinfo = version->procs; 360 clnt->cl_maxproc = version->nrprocs; 361 clnt->cl_protname = program->name; 362 clnt->cl_prog = program->number; 363 clnt->cl_vers = version->number; 364 clnt->cl_stats = program->stats; 365 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 366 if (err != 0) { 367 rpc_shutdown_client(clnt); 368 clnt = ERR_PTR(err); 369 } 370 out: 371 return clnt; 372 } 373 374 /* 375 * Default callback for async RPC calls 376 */ 377 static void 378 rpc_default_callback(struct rpc_task *task, void *data) 379 { 380 } 381 382 static const struct rpc_call_ops rpc_default_ops = { 383 .rpc_call_done = rpc_default_callback, 384 }; 385 386 /* 387 * Export the signal mask handling for synchronous code that 388 * sleeps on RPC calls 389 */ 390 #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 391 392 static void rpc_save_sigmask(sigset_t *oldset, int intr) 393 { 394 unsigned long sigallow = sigmask(SIGKILL); 395 sigset_t sigmask; 396 397 /* Block all signals except those listed in sigallow */ 398 if (intr) 399 sigallow |= RPC_INTR_SIGNALS; 400 siginitsetinv(&sigmask, sigallow); 401 sigprocmask(SIG_BLOCK, &sigmask, oldset); 402 } 403 404 static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 405 { 406 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 407 } 408 409 static inline void rpc_restore_sigmask(sigset_t *oldset) 410 { 411 sigprocmask(SIG_SETMASK, oldset, NULL); 412 } 413 414 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 415 { 416 rpc_save_sigmask(oldset, clnt->cl_intr); 417 } 418 419 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 420 { 421 rpc_restore_sigmask(oldset); 422 } 423 424 /* 425 * New rpc_call implementation 426 */ 427 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 428 { 429 struct rpc_task *task; 430 sigset_t oldset; 431 int status; 432 433 /* If this client is slain all further I/O fails */ 434 if (clnt->cl_dead) 435 return -EIO; 436 437 BUG_ON(flags & RPC_TASK_ASYNC); 438 439 status = -ENOMEM; 440 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); 441 if (task == NULL) 442 goto out; 443 444 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 445 rpc_task_sigmask(task, &oldset); 446 447 rpc_call_setup(task, msg, 0); 448 449 /* Set up the call info struct and execute the task */ 450 status = task->tk_status; 451 if (status == 0) { 452 atomic_inc(&task->tk_count); 453 status = rpc_execute(task); 454 if (status == 0) 455 status = task->tk_status; 456 } 457 rpc_restore_sigmask(&oldset); 458 rpc_release_task(task); 459 out: 460 return status; 461 } 462 463 /* 464 * New rpc_call implementation 465 */ 466 int 467 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 468 const struct rpc_call_ops *tk_ops, void *data) 469 { 470 struct rpc_task *task; 471 sigset_t oldset; 472 int status; 473 474 /* If this client is slain all further I/O fails */ 475 if (clnt->cl_dead) 476 return -EIO; 477 478 flags |= RPC_TASK_ASYNC; 479 480 /* Create/initialize a new RPC task */ 481 status = -ENOMEM; 482 if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) 483 goto out; 484 485 /* Mask signals on GSS_AUTH upcalls */ 486 rpc_task_sigmask(task, &oldset); 487 488 rpc_call_setup(task, msg, 0); 489 490 /* Set up the call info struct and execute the task */ 491 status = task->tk_status; 492 if (status == 0) 493 rpc_execute(task); 494 else 495 rpc_release_task(task); 496 497 rpc_restore_sigmask(&oldset); 498 out: 499 return status; 500 } 501 502 503 void 504 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) 505 { 506 task->tk_msg = *msg; 507 task->tk_flags |= flags; 508 /* Bind the user cred */ 509 if (task->tk_msg.rpc_cred != NULL) 510 rpcauth_holdcred(task); 511 else 512 rpcauth_bindcred(task); 513 514 if (task->tk_status == 0) 515 task->tk_action = call_start; 516 else 517 task->tk_action = rpc_exit_task; 518 } 519 520 void 521 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 522 { 523 struct rpc_xprt *xprt = clnt->cl_xprt; 524 if (xprt->ops->set_buffer_size) 525 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 526 } 527 528 /* 529 * Return size of largest payload RPC client can support, in bytes 530 * 531 * For stream transports, this is one RPC record fragment (see RFC 532 * 1831), as we don't support multi-record requests yet. For datagram 533 * transports, this is the size of an IP packet minus the IP, UDP, and 534 * RPC header sizes. 535 */ 536 size_t rpc_max_payload(struct rpc_clnt *clnt) 537 { 538 return clnt->cl_xprt->max_payload; 539 } 540 EXPORT_SYMBOL(rpc_max_payload); 541 542 /** 543 * rpc_force_rebind - force transport to check that remote port is unchanged 544 * @clnt: client to rebind 545 * 546 */ 547 void rpc_force_rebind(struct rpc_clnt *clnt) 548 { 549 if (clnt->cl_autobind) 550 clnt->cl_port = 0; 551 } 552 EXPORT_SYMBOL(rpc_force_rebind); 553 554 /* 555 * Restart an (async) RPC call. Usually called from within the 556 * exit handler. 557 */ 558 void 559 rpc_restart_call(struct rpc_task *task) 560 { 561 if (RPC_ASSASSINATED(task)) 562 return; 563 564 task->tk_action = call_start; 565 } 566 567 /* 568 * 0. Initial state 569 * 570 * Other FSM states can be visited zero or more times, but 571 * this state is visited exactly once for each RPC. 572 */ 573 static void 574 call_start(struct rpc_task *task) 575 { 576 struct rpc_clnt *clnt = task->tk_client; 577 578 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, 579 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, 580 (RPC_IS_ASYNC(task) ? "async" : "sync")); 581 582 /* Increment call count */ 583 task->tk_msg.rpc_proc->p_count++; 584 clnt->cl_stats->rpccnt++; 585 task->tk_action = call_reserve; 586 } 587 588 /* 589 * 1. Reserve an RPC call slot 590 */ 591 static void 592 call_reserve(struct rpc_task *task) 593 { 594 dprintk("RPC: %4d call_reserve\n", task->tk_pid); 595 596 if (!rpcauth_uptodatecred(task)) { 597 task->tk_action = call_refresh; 598 return; 599 } 600 601 task->tk_status = 0; 602 task->tk_action = call_reserveresult; 603 xprt_reserve(task); 604 } 605 606 /* 607 * 1b. Grok the result of xprt_reserve() 608 */ 609 static void 610 call_reserveresult(struct rpc_task *task) 611 { 612 int status = task->tk_status; 613 614 dprintk("RPC: %4d call_reserveresult (status %d)\n", 615 task->tk_pid, task->tk_status); 616 617 /* 618 * After a call to xprt_reserve(), we must have either 619 * a request slot or else an error status. 620 */ 621 task->tk_status = 0; 622 if (status >= 0) { 623 if (task->tk_rqstp) { 624 task->tk_action = call_allocate; 625 return; 626 } 627 628 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 629 __FUNCTION__, status); 630 rpc_exit(task, -EIO); 631 return; 632 } 633 634 /* 635 * Even though there was an error, we may have acquired 636 * a request slot somehow. Make sure not to leak it. 637 */ 638 if (task->tk_rqstp) { 639 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 640 __FUNCTION__, status); 641 xprt_release(task); 642 } 643 644 switch (status) { 645 case -EAGAIN: /* woken up; retry */ 646 task->tk_action = call_reserve; 647 return; 648 case -EIO: /* probably a shutdown */ 649 break; 650 default: 651 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 652 __FUNCTION__, status); 653 break; 654 } 655 rpc_exit(task, status); 656 } 657 658 /* 659 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 660 * (Note: buffer memory is freed in xprt_release). 661 */ 662 static void 663 call_allocate(struct rpc_task *task) 664 { 665 struct rpc_rqst *req = task->tk_rqstp; 666 struct rpc_xprt *xprt = task->tk_xprt; 667 unsigned int bufsiz; 668 669 dprintk("RPC: %4d call_allocate (status %d)\n", 670 task->tk_pid, task->tk_status); 671 task->tk_action = call_bind; 672 if (req->rq_buffer) 673 return; 674 675 /* FIXME: compute buffer requirements more exactly using 676 * auth->au_wslack */ 677 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; 678 679 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) 680 return; 681 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 682 683 if (RPC_IS_ASYNC(task) || !signalled()) { 684 xprt_release(task); 685 task->tk_action = call_reserve; 686 rpc_delay(task, HZ>>4); 687 return; 688 } 689 690 rpc_exit(task, -ERESTARTSYS); 691 } 692 693 static inline int 694 rpc_task_need_encode(struct rpc_task *task) 695 { 696 return task->tk_rqstp->rq_snd_buf.len == 0; 697 } 698 699 static inline void 700 rpc_task_force_reencode(struct rpc_task *task) 701 { 702 task->tk_rqstp->rq_snd_buf.len = 0; 703 } 704 705 /* 706 * 3. Encode arguments of an RPC call 707 */ 708 static void 709 call_encode(struct rpc_task *task) 710 { 711 struct rpc_rqst *req = task->tk_rqstp; 712 struct xdr_buf *sndbuf = &req->rq_snd_buf; 713 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 714 unsigned int bufsiz; 715 kxdrproc_t encode; 716 u32 *p; 717 718 dprintk("RPC: %4d call_encode (status %d)\n", 719 task->tk_pid, task->tk_status); 720 721 /* Default buffer setup */ 722 bufsiz = req->rq_bufsize >> 1; 723 sndbuf->head[0].iov_base = (void *)req->rq_buffer; 724 sndbuf->head[0].iov_len = bufsiz; 725 sndbuf->tail[0].iov_len = 0; 726 sndbuf->page_len = 0; 727 sndbuf->len = 0; 728 sndbuf->buflen = bufsiz; 729 rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz); 730 rcvbuf->head[0].iov_len = bufsiz; 731 rcvbuf->tail[0].iov_len = 0; 732 rcvbuf->page_len = 0; 733 rcvbuf->len = 0; 734 rcvbuf->buflen = bufsiz; 735 736 /* Encode header and provided arguments */ 737 encode = task->tk_msg.rpc_proc->p_encode; 738 if (!(p = call_header(task))) { 739 printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); 740 rpc_exit(task, -EIO); 741 return; 742 } 743 if (encode == NULL) 744 return; 745 746 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 747 task->tk_msg.rpc_argp); 748 if (task->tk_status == -ENOMEM) { 749 /* XXX: Is this sane? */ 750 rpc_delay(task, 3*HZ); 751 task->tk_status = -EAGAIN; 752 } 753 } 754 755 /* 756 * 4. Get the server port number if not yet set 757 */ 758 static void 759 call_bind(struct rpc_task *task) 760 { 761 struct rpc_clnt *clnt = task->tk_client; 762 763 dprintk("RPC: %4d call_bind (status %d)\n", 764 task->tk_pid, task->tk_status); 765 766 task->tk_action = call_connect; 767 if (!clnt->cl_port) { 768 task->tk_action = call_bind_status; 769 task->tk_timeout = task->tk_xprt->bind_timeout; 770 rpc_getport(task, clnt); 771 } 772 } 773 774 /* 775 * 4a. Sort out bind result 776 */ 777 static void 778 call_bind_status(struct rpc_task *task) 779 { 780 int status = -EACCES; 781 782 if (task->tk_status >= 0) { 783 dprintk("RPC: %4d call_bind_status (status %d)\n", 784 task->tk_pid, task->tk_status); 785 task->tk_status = 0; 786 task->tk_action = call_connect; 787 return; 788 } 789 790 switch (task->tk_status) { 791 case -EACCES: 792 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", 793 task->tk_pid); 794 rpc_delay(task, 3*HZ); 795 goto retry_bind; 796 case -ETIMEDOUT: 797 dprintk("RPC: %4d rpcbind request timed out\n", 798 task->tk_pid); 799 if (RPC_IS_SOFT(task)) { 800 status = -EIO; 801 break; 802 } 803 goto retry_bind; 804 case -EPFNOSUPPORT: 805 dprintk("RPC: %4d remote rpcbind service unavailable\n", 806 task->tk_pid); 807 break; 808 case -EPROTONOSUPPORT: 809 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", 810 task->tk_pid); 811 break; 812 default: 813 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", 814 task->tk_pid, -task->tk_status); 815 status = -EIO; 816 break; 817 } 818 819 rpc_exit(task, status); 820 return; 821 822 retry_bind: 823 task->tk_status = 0; 824 task->tk_action = call_bind; 825 return; 826 } 827 828 /* 829 * 4b. Connect to the RPC server 830 */ 831 static void 832 call_connect(struct rpc_task *task) 833 { 834 struct rpc_xprt *xprt = task->tk_xprt; 835 836 dprintk("RPC: %4d call_connect xprt %p %s connected\n", 837 task->tk_pid, xprt, 838 (xprt_connected(xprt) ? "is" : "is not")); 839 840 task->tk_action = call_transmit; 841 if (!xprt_connected(xprt)) { 842 task->tk_action = call_connect_status; 843 if (task->tk_status < 0) 844 return; 845 xprt_connect(task); 846 } 847 } 848 849 /* 850 * 4c. Sort out connect result 851 */ 852 static void 853 call_connect_status(struct rpc_task *task) 854 { 855 struct rpc_clnt *clnt = task->tk_client; 856 int status = task->tk_status; 857 858 dprintk("RPC: %5u call_connect_status (status %d)\n", 859 task->tk_pid, task->tk_status); 860 861 task->tk_status = 0; 862 if (status >= 0) { 863 clnt->cl_stats->netreconn++; 864 task->tk_action = call_transmit; 865 return; 866 } 867 868 /* Something failed: remote service port may have changed */ 869 rpc_force_rebind(clnt); 870 871 switch (status) { 872 case -ENOTCONN: 873 case -ETIMEDOUT: 874 case -EAGAIN: 875 task->tk_action = call_bind; 876 break; 877 default: 878 rpc_exit(task, -EIO); 879 break; 880 } 881 } 882 883 /* 884 * 5. Transmit the RPC request, and wait for reply 885 */ 886 static void 887 call_transmit(struct rpc_task *task) 888 { 889 dprintk("RPC: %4d call_transmit (status %d)\n", 890 task->tk_pid, task->tk_status); 891 892 task->tk_action = call_status; 893 if (task->tk_status < 0) 894 return; 895 task->tk_status = xprt_prepare_transmit(task); 896 if (task->tk_status != 0) 897 return; 898 /* Encode here so that rpcsec_gss can use correct sequence number. */ 899 if (rpc_task_need_encode(task)) { 900 task->tk_rqstp->rq_bytes_sent = 0; 901 call_encode(task); 902 /* Did the encode result in an error condition? */ 903 if (task->tk_status != 0) 904 goto out_nosend; 905 } 906 task->tk_action = call_transmit_status; 907 xprt_transmit(task); 908 if (task->tk_status < 0) 909 return; 910 if (!task->tk_msg.rpc_proc->p_decode) { 911 task->tk_action = rpc_exit_task; 912 rpc_wake_up_task(task); 913 } 914 return; 915 out_nosend: 916 /* release socket write lock before attempting to handle error */ 917 xprt_abort_transmit(task); 918 rpc_task_force_reencode(task); 919 } 920 921 /* 922 * 6. Sort out the RPC call status 923 */ 924 static void 925 call_status(struct rpc_task *task) 926 { 927 struct rpc_clnt *clnt = task->tk_client; 928 struct rpc_rqst *req = task->tk_rqstp; 929 int status; 930 931 if (req->rq_received > 0 && !req->rq_bytes_sent) 932 task->tk_status = req->rq_received; 933 934 dprintk("RPC: %4d call_status (status %d)\n", 935 task->tk_pid, task->tk_status); 936 937 status = task->tk_status; 938 if (status >= 0) { 939 task->tk_action = call_decode; 940 return; 941 } 942 943 task->tk_status = 0; 944 switch(status) { 945 case -ETIMEDOUT: 946 task->tk_action = call_timeout; 947 break; 948 case -ECONNREFUSED: 949 case -ENOTCONN: 950 rpc_force_rebind(clnt); 951 task->tk_action = call_bind; 952 break; 953 case -EAGAIN: 954 task->tk_action = call_transmit; 955 break; 956 case -EIO: 957 /* shutdown or soft timeout */ 958 rpc_exit(task, status); 959 break; 960 default: 961 printk("%s: RPC call returned error %d\n", 962 clnt->cl_protname, -status); 963 rpc_exit(task, status); 964 break; 965 } 966 } 967 968 /* 969 * 6a. Handle transmission errors. 970 */ 971 static void 972 call_transmit_status(struct rpc_task *task) 973 { 974 if (task->tk_status != -EAGAIN) 975 rpc_task_force_reencode(task); 976 call_status(task); 977 } 978 979 /* 980 * 6b. Handle RPC timeout 981 * We do not release the request slot, so we keep using the 982 * same XID for all retransmits. 983 */ 984 static void 985 call_timeout(struct rpc_task *task) 986 { 987 struct rpc_clnt *clnt = task->tk_client; 988 989 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 990 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); 991 goto retry; 992 } 993 994 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); 995 if (RPC_IS_SOFT(task)) { 996 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 997 clnt->cl_protname, clnt->cl_server); 998 rpc_exit(task, -EIO); 999 return; 1000 } 1001 1002 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1003 task->tk_flags |= RPC_CALL_MAJORSEEN; 1004 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1005 clnt->cl_protname, clnt->cl_server); 1006 } 1007 rpc_force_rebind(clnt); 1008 1009 retry: 1010 clnt->cl_stats->rpcretrans++; 1011 task->tk_action = call_bind; 1012 task->tk_status = 0; 1013 } 1014 1015 /* 1016 * 7. Decode the RPC reply 1017 */ 1018 static void 1019 call_decode(struct rpc_task *task) 1020 { 1021 struct rpc_clnt *clnt = task->tk_client; 1022 struct rpc_rqst *req = task->tk_rqstp; 1023 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1024 u32 *p; 1025 1026 dprintk("RPC: %4d call_decode (status %d)\n", 1027 task->tk_pid, task->tk_status); 1028 1029 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1030 printk(KERN_NOTICE "%s: server %s OK\n", 1031 clnt->cl_protname, clnt->cl_server); 1032 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1033 } 1034 1035 if (task->tk_status < 12) { 1036 if (!RPC_IS_SOFT(task)) { 1037 task->tk_action = call_bind; 1038 clnt->cl_stats->rpcretrans++; 1039 goto out_retry; 1040 } 1041 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", 1042 clnt->cl_protname, task->tk_status); 1043 rpc_exit(task, -EIO); 1044 return; 1045 } 1046 1047 req->rq_rcv_buf.len = req->rq_private_buf.len; 1048 1049 /* Check that the softirq receive buffer is valid */ 1050 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1051 sizeof(req->rq_rcv_buf)) != 0); 1052 1053 /* Verify the RPC header */ 1054 p = call_verify(task); 1055 if (IS_ERR(p)) { 1056 if (p == ERR_PTR(-EAGAIN)) 1057 goto out_retry; 1058 return; 1059 } 1060 1061 task->tk_action = rpc_exit_task; 1062 1063 if (decode) 1064 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1065 task->tk_msg.rpc_resp); 1066 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, 1067 task->tk_status); 1068 return; 1069 out_retry: 1070 req->rq_received = req->rq_private_buf.len = 0; 1071 task->tk_status = 0; 1072 } 1073 1074 /* 1075 * 8. Refresh the credentials if rejected by the server 1076 */ 1077 static void 1078 call_refresh(struct rpc_task *task) 1079 { 1080 dprintk("RPC: %4d call_refresh\n", task->tk_pid); 1081 1082 xprt_release(task); /* Must do to obtain new XID */ 1083 task->tk_action = call_refreshresult; 1084 task->tk_status = 0; 1085 task->tk_client->cl_stats->rpcauthrefresh++; 1086 rpcauth_refreshcred(task); 1087 } 1088 1089 /* 1090 * 8a. Process the results of a credential refresh 1091 */ 1092 static void 1093 call_refreshresult(struct rpc_task *task) 1094 { 1095 int status = task->tk_status; 1096 dprintk("RPC: %4d call_refreshresult (status %d)\n", 1097 task->tk_pid, task->tk_status); 1098 1099 task->tk_status = 0; 1100 task->tk_action = call_reserve; 1101 if (status >= 0 && rpcauth_uptodatecred(task)) 1102 return; 1103 if (status == -EACCES) { 1104 rpc_exit(task, -EACCES); 1105 return; 1106 } 1107 task->tk_action = call_refresh; 1108 if (status != -ETIMEDOUT) 1109 rpc_delay(task, 3*HZ); 1110 return; 1111 } 1112 1113 /* 1114 * Call header serialization 1115 */ 1116 static u32 * 1117 call_header(struct rpc_task *task) 1118 { 1119 struct rpc_clnt *clnt = task->tk_client; 1120 struct rpc_rqst *req = task->tk_rqstp; 1121 u32 *p = req->rq_svec[0].iov_base; 1122 1123 /* FIXME: check buffer size? */ 1124 1125 p = xprt_skip_transport_header(task->tk_xprt, p); 1126 *p++ = req->rq_xid; /* XID */ 1127 *p++ = htonl(RPC_CALL); /* CALL */ 1128 *p++ = htonl(RPC_VERSION); /* RPC version */ 1129 *p++ = htonl(clnt->cl_prog); /* program number */ 1130 *p++ = htonl(clnt->cl_vers); /* program version */ 1131 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1132 p = rpcauth_marshcred(task, p); 1133 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1134 return p; 1135 } 1136 1137 /* 1138 * Reply header verification 1139 */ 1140 static u32 * 1141 call_verify(struct rpc_task *task) 1142 { 1143 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1144 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1145 u32 *p = iov->iov_base, n; 1146 int error = -EACCES; 1147 1148 if ((len -= 3) < 0) 1149 goto out_overflow; 1150 p += 1; /* skip XID */ 1151 1152 if ((n = ntohl(*p++)) != RPC_REPLY) { 1153 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); 1154 goto out_garbage; 1155 } 1156 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1157 if (--len < 0) 1158 goto out_overflow; 1159 switch ((n = ntohl(*p++))) { 1160 case RPC_AUTH_ERROR: 1161 break; 1162 case RPC_MISMATCH: 1163 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); 1164 error = -EPROTONOSUPPORT; 1165 goto out_err; 1166 default: 1167 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); 1168 goto out_eio; 1169 } 1170 if (--len < 0) 1171 goto out_overflow; 1172 switch ((n = ntohl(*p++))) { 1173 case RPC_AUTH_REJECTEDCRED: 1174 case RPC_AUTH_REJECTEDVERF: 1175 case RPCSEC_GSS_CREDPROBLEM: 1176 case RPCSEC_GSS_CTXPROBLEM: 1177 if (!task->tk_cred_retry) 1178 break; 1179 task->tk_cred_retry--; 1180 dprintk("RPC: %4d call_verify: retry stale creds\n", 1181 task->tk_pid); 1182 rpcauth_invalcred(task); 1183 task->tk_action = call_refresh; 1184 goto out_retry; 1185 case RPC_AUTH_BADCRED: 1186 case RPC_AUTH_BADVERF: 1187 /* possibly garbled cred/verf? */ 1188 if (!task->tk_garb_retry) 1189 break; 1190 task->tk_garb_retry--; 1191 dprintk("RPC: %4d call_verify: retry garbled creds\n", 1192 task->tk_pid); 1193 task->tk_action = call_bind; 1194 goto out_retry; 1195 case RPC_AUTH_TOOWEAK: 1196 printk(KERN_NOTICE "call_verify: server requires stronger " 1197 "authentication.\n"); 1198 break; 1199 default: 1200 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1201 error = -EIO; 1202 } 1203 dprintk("RPC: %4d call_verify: call rejected %d\n", 1204 task->tk_pid, n); 1205 goto out_err; 1206 } 1207 if (!(p = rpcauth_checkverf(task, p))) { 1208 printk(KERN_WARNING "call_verify: auth check failed\n"); 1209 goto out_garbage; /* bad verifier, retry */ 1210 } 1211 len = p - (u32 *)iov->iov_base - 1; 1212 if (len < 0) 1213 goto out_overflow; 1214 switch ((n = ntohl(*p++))) { 1215 case RPC_SUCCESS: 1216 return p; 1217 case RPC_PROG_UNAVAIL: 1218 dprintk("RPC: call_verify: program %u is unsupported by server %s\n", 1219 (unsigned int)task->tk_client->cl_prog, 1220 task->tk_client->cl_server); 1221 error = -EPFNOSUPPORT; 1222 goto out_err; 1223 case RPC_PROG_MISMATCH: 1224 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", 1225 (unsigned int)task->tk_client->cl_prog, 1226 (unsigned int)task->tk_client->cl_vers, 1227 task->tk_client->cl_server); 1228 error = -EPROTONOSUPPORT; 1229 goto out_err; 1230 case RPC_PROC_UNAVAIL: 1231 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", 1232 task->tk_msg.rpc_proc, 1233 task->tk_client->cl_prog, 1234 task->tk_client->cl_vers, 1235 task->tk_client->cl_server); 1236 error = -EOPNOTSUPP; 1237 goto out_err; 1238 case RPC_GARBAGE_ARGS: 1239 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); 1240 break; /* retry */ 1241 default: 1242 printk(KERN_WARNING "call_verify: server accept status: %x\n", n); 1243 /* Also retry */ 1244 } 1245 1246 out_garbage: 1247 task->tk_client->cl_stats->rpcgarbage++; 1248 if (task->tk_garb_retry) { 1249 task->tk_garb_retry--; 1250 dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); 1251 task->tk_action = call_bind; 1252 out_retry: 1253 return ERR_PTR(-EAGAIN); 1254 } 1255 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); 1256 out_eio: 1257 error = -EIO; 1258 out_err: 1259 rpc_exit(task, error); 1260 return ERR_PTR(error); 1261 out_overflow: 1262 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1263 goto out_garbage; 1264 } 1265 1266 static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1267 { 1268 return 0; 1269 } 1270 1271 static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1272 { 1273 return 0; 1274 } 1275 1276 static struct rpc_procinfo rpcproc_null = { 1277 .p_encode = rpcproc_encode_null, 1278 .p_decode = rpcproc_decode_null, 1279 }; 1280 1281 int rpc_ping(struct rpc_clnt *clnt, int flags) 1282 { 1283 struct rpc_message msg = { 1284 .rpc_proc = &rpcproc_null, 1285 }; 1286 int err; 1287 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1288 err = rpc_call_sync(clnt, &msg, flags); 1289 put_rpccred(msg.rpc_cred); 1290 return err; 1291 } 1292