1 /* 2 * linux/net/sunrpc/xprtsock.c 3 * 4 * Client-side transport implementation for sockets. 5 * 6 * TCP callback races fixes (C) 1998 Red Hat 7 * TCP send fixes (C) 1998 Red Hat 8 * TCP NFS related read + write fixes 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 10 * 11 * Rewrite of larges part of the code in order to stabilize TCP stuff. 12 * Fix behaviour when socket buffer is full. 13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 14 * 15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> 16 * 17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005. 18 * <gilles.quillard@bull.net> 19 */ 20 21 #include <linux/types.h> 22 #include <linux/slab.h> 23 #include <linux/module.h> 24 #include <linux/capability.h> 25 #include <linux/pagemap.h> 26 #include <linux/errno.h> 27 #include <linux/socket.h> 28 #include <linux/in.h> 29 #include <linux/net.h> 30 #include <linux/mm.h> 31 #include <linux/udp.h> 32 #include <linux/tcp.h> 33 #include <linux/sunrpc/clnt.h> 34 #include <linux/sunrpc/sched.h> 35 #include <linux/sunrpc/svcsock.h> 36 #include <linux/sunrpc/xprtsock.h> 37 #include <linux/file.h> 38 #ifdef CONFIG_NFS_V4_1 39 #include <linux/sunrpc/bc_xprt.h> 40 #endif 41 42 #include <net/sock.h> 43 #include <net/checksum.h> 44 #include <net/udp.h> 45 #include <net/tcp.h> 46 47 #include "sunrpc.h" 48 /* 49 * xprtsock tunables 50 */ 51 unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 52 unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; 53 54 unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 55 unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 56 57 #define XS_TCP_LINGER_TO (15U * HZ) 58 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; 59 60 /* 61 * We can register our own files under /proc/sys/sunrpc by 62 * calling register_sysctl_table() again. The files in that 63 * directory become the union of all files registered there. 64 * 65 * We simply need to make sure that we don't collide with 66 * someone else's file names! 67 */ 68 69 #ifdef RPC_DEBUG 70 71 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 72 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 73 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 74 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 75 76 static struct ctl_table_header *sunrpc_table_header; 77 78 /* 79 * FIXME: changing the UDP slot table size should also resize the UDP 80 * socket buffers for existing UDP transports 81 */ 82 static ctl_table xs_tunables_table[] = { 83 { 84 .procname = "udp_slot_table_entries", 85 .data = &xprt_udp_slot_table_entries, 86 .maxlen = sizeof(unsigned int), 87 .mode = 0644, 88 .proc_handler = proc_dointvec_minmax, 89 .extra1 = &min_slot_table_size, 90 .extra2 = &max_slot_table_size 91 }, 92 { 93 .procname = "tcp_slot_table_entries", 94 .data = &xprt_tcp_slot_table_entries, 95 .maxlen = sizeof(unsigned int), 96 .mode = 0644, 97 .proc_handler = proc_dointvec_minmax, 98 .extra1 = &min_slot_table_size, 99 .extra2 = &max_slot_table_size 100 }, 101 { 102 .procname = "min_resvport", 103 .data = &xprt_min_resvport, 104 .maxlen = sizeof(unsigned int), 105 .mode = 0644, 106 .proc_handler = proc_dointvec_minmax, 107 .extra1 = &xprt_min_resvport_limit, 108 .extra2 = &xprt_max_resvport_limit 109 }, 110 { 111 .procname = "max_resvport", 112 .data = &xprt_max_resvport, 113 .maxlen = sizeof(unsigned int), 114 .mode = 0644, 115 .proc_handler = proc_dointvec_minmax, 116 .extra1 = &xprt_min_resvport_limit, 117 .extra2 = &xprt_max_resvport_limit 118 }, 119 { 120 .procname = "tcp_fin_timeout", 121 .data = &xs_tcp_fin_timeout, 122 .maxlen = sizeof(xs_tcp_fin_timeout), 123 .mode = 0644, 124 .proc_handler = proc_dointvec_jiffies, 125 }, 126 { }, 127 }; 128 129 static ctl_table sunrpc_table[] = { 130 { 131 .procname = "sunrpc", 132 .mode = 0555, 133 .child = xs_tunables_table 134 }, 135 { }, 136 }; 137 138 #endif 139 140 /* 141 * Time out for an RPC UDP socket connect. UDP socket connects are 142 * synchronous, but we set a timeout anyway in case of resource 143 * exhaustion on the local host. 144 */ 145 #define XS_UDP_CONN_TO (5U * HZ) 146 147 /* 148 * Wait duration for an RPC TCP connection to be established. Solaris 149 * NFS over TCP uses 60 seconds, for example, which is in line with how 150 * long a server takes to reboot. 151 */ 152 #define XS_TCP_CONN_TO (60U * HZ) 153 154 /* 155 * Wait duration for a reply from the RPC portmapper. 156 */ 157 #define XS_BIND_TO (60U * HZ) 158 159 /* 160 * Delay if a UDP socket connect error occurs. This is most likely some 161 * kind of resource problem on the local host. 162 */ 163 #define XS_UDP_REEST_TO (2U * HZ) 164 165 /* 166 * The reestablish timeout allows clients to delay for a bit before attempting 167 * to reconnect to a server that just dropped our connection. 168 * 169 * We implement an exponential backoff when trying to reestablish a TCP 170 * transport connection with the server. Some servers like to drop a TCP 171 * connection when they are overworked, so we start with a short timeout and 172 * increase over time if the server is down or not responding. 173 */ 174 #define XS_TCP_INIT_REEST_TO (3U * HZ) 175 #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) 176 177 /* 178 * TCP idle timeout; client drops the transport socket if it is idle 179 * for this long. Note that we also timeout UDP sockets to prevent 180 * holding port numbers when there is no RPC traffic. 181 */ 182 #define XS_IDLE_DISC_TO (5U * 60 * HZ) 183 184 #ifdef RPC_DEBUG 185 # undef RPC_DEBUG_DATA 186 # define RPCDBG_FACILITY RPCDBG_TRANS 187 #endif 188 189 #ifdef RPC_DEBUG_DATA 190 static void xs_pktdump(char *msg, u32 *packet, unsigned int count) 191 { 192 u8 *buf = (u8 *) packet; 193 int j; 194 195 dprintk("RPC: %s\n", msg); 196 for (j = 0; j < count && j < 128; j += 4) { 197 if (!(j & 31)) { 198 if (j) 199 dprintk("\n"); 200 dprintk("0x%04x ", j); 201 } 202 dprintk("%02x%02x%02x%02x ", 203 buf[j], buf[j+1], buf[j+2], buf[j+3]); 204 } 205 dprintk("\n"); 206 } 207 #else 208 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) 209 { 210 /* NOP */ 211 } 212 #endif 213 214 struct sock_xprt { 215 struct rpc_xprt xprt; 216 217 /* 218 * Network layer 219 */ 220 struct socket * sock; 221 struct sock * inet; 222 223 /* 224 * State of TCP reply receive 225 */ 226 __be32 tcp_fraghdr, 227 tcp_xid; 228 229 u32 tcp_offset, 230 tcp_reclen; 231 232 unsigned long tcp_copied, 233 tcp_flags; 234 235 /* 236 * Connection of transports 237 */ 238 struct delayed_work connect_worker; 239 struct sockaddr_storage srcaddr; 240 unsigned short srcport; 241 242 /* 243 * UDP socket buffer size parameters 244 */ 245 size_t rcvsize, 246 sndsize; 247 248 /* 249 * Saved socket callback addresses 250 */ 251 void (*old_data_ready)(struct sock *, int); 252 void (*old_state_change)(struct sock *); 253 void (*old_write_space)(struct sock *); 254 void (*old_error_report)(struct sock *); 255 }; 256 257 /* 258 * TCP receive state flags 259 */ 260 #define TCP_RCV_LAST_FRAG (1UL << 0) 261 #define TCP_RCV_COPY_FRAGHDR (1UL << 1) 262 #define TCP_RCV_COPY_XID (1UL << 2) 263 #define TCP_RCV_COPY_DATA (1UL << 3) 264 #define TCP_RCV_READ_CALLDIR (1UL << 4) 265 #define TCP_RCV_COPY_CALLDIR (1UL << 5) 266 267 /* 268 * TCP RPC flags 269 */ 270 #define TCP_RPC_REPLY (1UL << 6) 271 272 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 273 { 274 return (struct sockaddr *) &xprt->addr; 275 } 276 277 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 278 { 279 return (struct sockaddr_in *) &xprt->addr; 280 } 281 282 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt) 283 { 284 return (struct sockaddr_in6 *) &xprt->addr; 285 } 286 287 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) 288 { 289 struct sockaddr *sap = xs_addr(xprt); 290 struct sockaddr_in6 *sin6; 291 struct sockaddr_in *sin; 292 char buf[128]; 293 294 (void)rpc_ntop(sap, buf, sizeof(buf)); 295 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 296 297 switch (sap->sa_family) { 298 case AF_INET: 299 sin = xs_addr_in(xprt); 300 (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", 301 NIPQUAD(sin->sin_addr.s_addr)); 302 break; 303 case AF_INET6: 304 sin6 = xs_addr_in6(xprt); 305 (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 306 break; 307 default: 308 BUG(); 309 } 310 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 311 } 312 313 static void xs_format_common_peer_ports(struct rpc_xprt *xprt) 314 { 315 struct sockaddr *sap = xs_addr(xprt); 316 char buf[128]; 317 318 (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 319 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 320 321 (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 322 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 323 } 324 325 static void xs_format_peer_addresses(struct rpc_xprt *xprt, 326 const char *protocol, 327 const char *netid) 328 { 329 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; 330 xprt->address_strings[RPC_DISPLAY_NETID] = netid; 331 xs_format_common_peer_addresses(xprt); 332 xs_format_common_peer_ports(xprt); 333 } 334 335 static void xs_update_peer_port(struct rpc_xprt *xprt) 336 { 337 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 338 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 339 340 xs_format_common_peer_ports(xprt); 341 } 342 343 static void xs_free_peer_addresses(struct rpc_xprt *xprt) 344 { 345 unsigned int i; 346 347 for (i = 0; i < RPC_DISPLAY_MAX; i++) 348 switch (i) { 349 case RPC_DISPLAY_PROTO: 350 case RPC_DISPLAY_NETID: 351 continue; 352 default: 353 kfree(xprt->address_strings[i]); 354 } 355 } 356 357 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 358 359 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) 360 { 361 struct msghdr msg = { 362 .msg_name = addr, 363 .msg_namelen = addrlen, 364 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0), 365 }; 366 struct kvec iov = { 367 .iov_base = vec->iov_base + base, 368 .iov_len = vec->iov_len - base, 369 }; 370 371 if (iov.iov_len != 0) 372 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 373 return kernel_sendmsg(sock, &msg, NULL, 0, 0); 374 } 375 376 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more) 377 { 378 struct page **ppage; 379 unsigned int remainder; 380 int err, sent = 0; 381 382 remainder = xdr->page_len - base; 383 base += xdr->page_base; 384 ppage = xdr->pages + (base >> PAGE_SHIFT); 385 base &= ~PAGE_MASK; 386 for(;;) { 387 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); 388 int flags = XS_SENDMSG_FLAGS; 389 390 remainder -= len; 391 if (remainder != 0 || more) 392 flags |= MSG_MORE; 393 err = sock->ops->sendpage(sock, *ppage, base, len, flags); 394 if (remainder == 0 || err != len) 395 break; 396 sent += err; 397 ppage++; 398 base = 0; 399 } 400 if (sent == 0) 401 return err; 402 if (err > 0) 403 sent += err; 404 return sent; 405 } 406 407 /** 408 * xs_sendpages - write pages directly to a socket 409 * @sock: socket to send on 410 * @addr: UDP only -- address of destination 411 * @addrlen: UDP only -- length of destination address 412 * @xdr: buffer containing this request 413 * @base: starting position in the buffer 414 * 415 */ 416 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) 417 { 418 unsigned int remainder = xdr->len - base; 419 int err, sent = 0; 420 421 if (unlikely(!sock)) 422 return -ENOTSOCK; 423 424 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 425 if (base != 0) { 426 addr = NULL; 427 addrlen = 0; 428 } 429 430 if (base < xdr->head[0].iov_len || addr != NULL) { 431 unsigned int len = xdr->head[0].iov_len - base; 432 remainder -= len; 433 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); 434 if (remainder == 0 || err != len) 435 goto out; 436 sent += err; 437 base = 0; 438 } else 439 base -= xdr->head[0].iov_len; 440 441 if (base < xdr->page_len) { 442 unsigned int len = xdr->page_len - base; 443 remainder -= len; 444 err = xs_send_pagedata(sock, xdr, base, remainder != 0); 445 if (remainder == 0 || err != len) 446 goto out; 447 sent += err; 448 base = 0; 449 } else 450 base -= xdr->page_len; 451 452 if (base >= xdr->tail[0].iov_len) 453 return sent; 454 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); 455 out: 456 if (sent == 0) 457 return err; 458 if (err > 0) 459 sent += err; 460 return sent; 461 } 462 463 static void xs_nospace_callback(struct rpc_task *task) 464 { 465 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); 466 467 transport->inet->sk_write_pending--; 468 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 469 } 470 471 /** 472 * xs_nospace - place task on wait queue if transmit was incomplete 473 * @task: task to put to sleep 474 * 475 */ 476 static int xs_nospace(struct rpc_task *task) 477 { 478 struct rpc_rqst *req = task->tk_rqstp; 479 struct rpc_xprt *xprt = req->rq_xprt; 480 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 481 int ret = 0; 482 483 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 484 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 485 req->rq_slen); 486 487 /* Protect against races with write_space */ 488 spin_lock_bh(&xprt->transport_lock); 489 490 /* Don't race with disconnect */ 491 if (xprt_connected(xprt)) { 492 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 493 ret = -EAGAIN; 494 /* 495 * Notify TCP that we're limited by the application 496 * window size 497 */ 498 set_bit(SOCK_NOSPACE, &transport->sock->flags); 499 transport->inet->sk_write_pending++; 500 /* ...and wait for more buffer space */ 501 xprt_wait_for_buffer_space(task, xs_nospace_callback); 502 } 503 } else { 504 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 505 ret = -ENOTCONN; 506 } 507 508 spin_unlock_bh(&xprt->transport_lock); 509 return ret; 510 } 511 512 /** 513 * xs_udp_send_request - write an RPC request to a UDP socket 514 * @task: address of RPC task that manages the state of an RPC request 515 * 516 * Return values: 517 * 0: The request has been sent 518 * EAGAIN: The socket was blocked, please call again later to 519 * complete the request 520 * ENOTCONN: Caller needs to invoke connect logic then call again 521 * other: Some other error occured, the request was not sent 522 */ 523 static int xs_udp_send_request(struct rpc_task *task) 524 { 525 struct rpc_rqst *req = task->tk_rqstp; 526 struct rpc_xprt *xprt = req->rq_xprt; 527 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 528 struct xdr_buf *xdr = &req->rq_snd_buf; 529 int status; 530 531 xs_pktdump("packet data:", 532 req->rq_svec->iov_base, 533 req->rq_svec->iov_len); 534 535 if (!xprt_bound(xprt)) 536 return -ENOTCONN; 537 status = xs_sendpages(transport->sock, 538 xs_addr(xprt), 539 xprt->addrlen, xdr, 540 req->rq_bytes_sent); 541 542 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 543 xdr->len - req->rq_bytes_sent, status); 544 545 if (status >= 0) { 546 task->tk_bytes_sent += status; 547 if (status >= req->rq_slen) 548 return 0; 549 /* Still some bytes left; set up for a retry later. */ 550 status = -EAGAIN; 551 } 552 if (!transport->sock) 553 goto out; 554 555 switch (status) { 556 case -ENOTSOCK: 557 status = -ENOTCONN; 558 /* Should we call xs_close() here? */ 559 break; 560 case -EAGAIN: 561 status = xs_nospace(task); 562 break; 563 default: 564 dprintk("RPC: sendmsg returned unrecognized error %d\n", 565 -status); 566 case -ENETUNREACH: 567 case -EPIPE: 568 case -ECONNREFUSED: 569 /* When the server has died, an ICMP port unreachable message 570 * prompts ECONNREFUSED. */ 571 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 572 } 573 out: 574 return status; 575 } 576 577 /** 578 * xs_tcp_shutdown - gracefully shut down a TCP socket 579 * @xprt: transport 580 * 581 * Initiates a graceful shutdown of the TCP socket by calling the 582 * equivalent of shutdown(SHUT_WR); 583 */ 584 static void xs_tcp_shutdown(struct rpc_xprt *xprt) 585 { 586 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 587 struct socket *sock = transport->sock; 588 589 if (sock != NULL) 590 kernel_sock_shutdown(sock, SHUT_WR); 591 } 592 593 static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) 594 { 595 u32 reclen = buf->len - sizeof(rpc_fraghdr); 596 rpc_fraghdr *base = buf->head[0].iov_base; 597 *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); 598 } 599 600 /** 601 * xs_tcp_send_request - write an RPC request to a TCP socket 602 * @task: address of RPC task that manages the state of an RPC request 603 * 604 * Return values: 605 * 0: The request has been sent 606 * EAGAIN: The socket was blocked, please call again later to 607 * complete the request 608 * ENOTCONN: Caller needs to invoke connect logic then call again 609 * other: Some other error occured, the request was not sent 610 * 611 * XXX: In the case of soft timeouts, should we eventually give up 612 * if sendmsg is not able to make progress? 613 */ 614 static int xs_tcp_send_request(struct rpc_task *task) 615 { 616 struct rpc_rqst *req = task->tk_rqstp; 617 struct rpc_xprt *xprt = req->rq_xprt; 618 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 619 struct xdr_buf *xdr = &req->rq_snd_buf; 620 int status; 621 622 xs_encode_tcp_record_marker(&req->rq_snd_buf); 623 624 xs_pktdump("packet data:", 625 req->rq_svec->iov_base, 626 req->rq_svec->iov_len); 627 628 /* Continue transmitting the packet/record. We must be careful 629 * to cope with writespace callbacks arriving _after_ we have 630 * called sendmsg(). */ 631 while (1) { 632 status = xs_sendpages(transport->sock, 633 NULL, 0, xdr, req->rq_bytes_sent); 634 635 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 636 xdr->len - req->rq_bytes_sent, status); 637 638 if (unlikely(status < 0)) 639 break; 640 641 /* If we've sent the entire packet, immediately 642 * reset the count of bytes sent. */ 643 req->rq_bytes_sent += status; 644 task->tk_bytes_sent += status; 645 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 646 req->rq_bytes_sent = 0; 647 return 0; 648 } 649 650 if (status != 0) 651 continue; 652 status = -EAGAIN; 653 break; 654 } 655 if (!transport->sock) 656 goto out; 657 658 switch (status) { 659 case -ENOTSOCK: 660 status = -ENOTCONN; 661 /* Should we call xs_close() here? */ 662 break; 663 case -EAGAIN: 664 status = xs_nospace(task); 665 break; 666 default: 667 dprintk("RPC: sendmsg returned unrecognized error %d\n", 668 -status); 669 case -ECONNRESET: 670 case -EPIPE: 671 xs_tcp_shutdown(xprt); 672 case -ECONNREFUSED: 673 case -ENOTCONN: 674 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 675 } 676 out: 677 return status; 678 } 679 680 /** 681 * xs_tcp_release_xprt - clean up after a tcp transmission 682 * @xprt: transport 683 * @task: rpc task 684 * 685 * This cleans up if an error causes us to abort the transmission of a request. 686 * In this case, the socket may need to be reset in order to avoid confusing 687 * the server. 688 */ 689 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 690 { 691 struct rpc_rqst *req; 692 693 if (task != xprt->snd_task) 694 return; 695 if (task == NULL) 696 goto out_release; 697 req = task->tk_rqstp; 698 if (req->rq_bytes_sent == 0) 699 goto out_release; 700 if (req->rq_bytes_sent == req->rq_snd_buf.len) 701 goto out_release; 702 set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state); 703 out_release: 704 xprt_release_xprt(xprt, task); 705 } 706 707 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) 708 { 709 transport->old_data_ready = sk->sk_data_ready; 710 transport->old_state_change = sk->sk_state_change; 711 transport->old_write_space = sk->sk_write_space; 712 transport->old_error_report = sk->sk_error_report; 713 } 714 715 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 716 { 717 sk->sk_data_ready = transport->old_data_ready; 718 sk->sk_state_change = transport->old_state_change; 719 sk->sk_write_space = transport->old_write_space; 720 sk->sk_error_report = transport->old_error_report; 721 } 722 723 static void xs_reset_transport(struct sock_xprt *transport) 724 { 725 struct socket *sock = transport->sock; 726 struct sock *sk = transport->inet; 727 728 if (sk == NULL) 729 return; 730 731 write_lock_bh(&sk->sk_callback_lock); 732 transport->inet = NULL; 733 transport->sock = NULL; 734 735 sk->sk_user_data = NULL; 736 737 xs_restore_old_callbacks(transport, sk); 738 write_unlock_bh(&sk->sk_callback_lock); 739 740 sk->sk_no_check = 0; 741 742 sock_release(sock); 743 } 744 745 /** 746 * xs_close - close a socket 747 * @xprt: transport 748 * 749 * This is used when all requests are complete; ie, no DRC state remains 750 * on the server we want to save. 751 * 752 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with 753 * xs_reset_transport() zeroing the socket from underneath a writer. 754 */ 755 static void xs_close(struct rpc_xprt *xprt) 756 { 757 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 758 759 dprintk("RPC: xs_close xprt %p\n", xprt); 760 761 xs_reset_transport(transport); 762 xprt->reestablish_timeout = 0; 763 764 smp_mb__before_clear_bit(); 765 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 766 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 767 clear_bit(XPRT_CLOSING, &xprt->state); 768 smp_mb__after_clear_bit(); 769 xprt_disconnect_done(xprt); 770 } 771 772 static void xs_tcp_close(struct rpc_xprt *xprt) 773 { 774 if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state)) 775 xs_close(xprt); 776 else 777 xs_tcp_shutdown(xprt); 778 } 779 780 /** 781 * xs_destroy - prepare to shutdown a transport 782 * @xprt: doomed transport 783 * 784 */ 785 static void xs_destroy(struct rpc_xprt *xprt) 786 { 787 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 788 789 dprintk("RPC: xs_destroy xprt %p\n", xprt); 790 791 cancel_rearming_delayed_work(&transport->connect_worker); 792 793 xs_close(xprt); 794 xs_free_peer_addresses(xprt); 795 kfree(xprt->slot); 796 kfree(xprt); 797 module_put(THIS_MODULE); 798 } 799 800 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 801 { 802 return (struct rpc_xprt *) sk->sk_user_data; 803 } 804 805 /** 806 * xs_udp_data_ready - "data ready" callback for UDP sockets 807 * @sk: socket with data to read 808 * @len: how much data to read 809 * 810 */ 811 static void xs_udp_data_ready(struct sock *sk, int len) 812 { 813 struct rpc_task *task; 814 struct rpc_xprt *xprt; 815 struct rpc_rqst *rovr; 816 struct sk_buff *skb; 817 int err, repsize, copied; 818 u32 _xid; 819 __be32 *xp; 820 821 read_lock(&sk->sk_callback_lock); 822 dprintk("RPC: xs_udp_data_ready...\n"); 823 if (!(xprt = xprt_from_sock(sk))) 824 goto out; 825 826 if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) 827 goto out; 828 829 if (xprt->shutdown) 830 goto dropit; 831 832 repsize = skb->len - sizeof(struct udphdr); 833 if (repsize < 4) { 834 dprintk("RPC: impossible RPC reply size %d!\n", repsize); 835 goto dropit; 836 } 837 838 /* Copy the XID from the skb... */ 839 xp = skb_header_pointer(skb, sizeof(struct udphdr), 840 sizeof(_xid), &_xid); 841 if (xp == NULL) 842 goto dropit; 843 844 /* Look up and lock the request corresponding to the given XID */ 845 spin_lock(&xprt->transport_lock); 846 rovr = xprt_lookup_rqst(xprt, *xp); 847 if (!rovr) 848 goto out_unlock; 849 task = rovr->rq_task; 850 851 if ((copied = rovr->rq_private_buf.buflen) > repsize) 852 copied = repsize; 853 854 /* Suck it into the iovec, verify checksum if not done by hw. */ 855 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { 856 UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS); 857 goto out_unlock; 858 } 859 860 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); 861 862 /* Something worked... */ 863 dst_confirm(skb_dst(skb)); 864 865 xprt_adjust_cwnd(task, copied); 866 xprt_update_rtt(task); 867 xprt_complete_rqst(task, copied); 868 869 out_unlock: 870 spin_unlock(&xprt->transport_lock); 871 dropit: 872 skb_free_datagram(sk, skb); 873 out: 874 read_unlock(&sk->sk_callback_lock); 875 } 876 877 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 878 { 879 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 880 size_t len, used; 881 char *p; 882 883 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; 884 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; 885 used = xdr_skb_read_bits(desc, p, len); 886 transport->tcp_offset += used; 887 if (used != len) 888 return; 889 890 transport->tcp_reclen = ntohl(transport->tcp_fraghdr); 891 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) 892 transport->tcp_flags |= TCP_RCV_LAST_FRAG; 893 else 894 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; 895 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; 896 897 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; 898 transport->tcp_offset = 0; 899 900 /* Sanity check of the record length */ 901 if (unlikely(transport->tcp_reclen < 8)) { 902 dprintk("RPC: invalid TCP record fragment length\n"); 903 xprt_force_disconnect(xprt); 904 return; 905 } 906 dprintk("RPC: reading TCP record fragment of length %d\n", 907 transport->tcp_reclen); 908 } 909 910 static void xs_tcp_check_fraghdr(struct sock_xprt *transport) 911 { 912 if (transport->tcp_offset == transport->tcp_reclen) { 913 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; 914 transport->tcp_offset = 0; 915 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { 916 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 917 transport->tcp_flags |= TCP_RCV_COPY_XID; 918 transport->tcp_copied = 0; 919 } 920 } 921 } 922 923 static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc) 924 { 925 size_t len, used; 926 char *p; 927 928 len = sizeof(transport->tcp_xid) - transport->tcp_offset; 929 dprintk("RPC: reading XID (%Zu bytes)\n", len); 930 p = ((char *) &transport->tcp_xid) + transport->tcp_offset; 931 used = xdr_skb_read_bits(desc, p, len); 932 transport->tcp_offset += used; 933 if (used != len) 934 return; 935 transport->tcp_flags &= ~TCP_RCV_COPY_XID; 936 transport->tcp_flags |= TCP_RCV_READ_CALLDIR; 937 transport->tcp_copied = 4; 938 dprintk("RPC: reading %s XID %08x\n", 939 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for" 940 : "request with", 941 ntohl(transport->tcp_xid)); 942 xs_tcp_check_fraghdr(transport); 943 } 944 945 static inline void xs_tcp_read_calldir(struct sock_xprt *transport, 946 struct xdr_skb_reader *desc) 947 { 948 size_t len, used; 949 u32 offset; 950 __be32 calldir; 951 952 /* 953 * We want transport->tcp_offset to be 8 at the end of this routine 954 * (4 bytes for the xid and 4 bytes for the call/reply flag). 955 * When this function is called for the first time, 956 * transport->tcp_offset is 4 (after having already read the xid). 957 */ 958 offset = transport->tcp_offset - sizeof(transport->tcp_xid); 959 len = sizeof(calldir) - offset; 960 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); 961 used = xdr_skb_read_bits(desc, &calldir, len); 962 transport->tcp_offset += used; 963 if (used != len) 964 return; 965 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; 966 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; 967 transport->tcp_flags |= TCP_RCV_COPY_DATA; 968 /* 969 * We don't yet have the XDR buffer, so we will write the calldir 970 * out after we get the buffer from the 'struct rpc_rqst' 971 */ 972 if (ntohl(calldir) == RPC_REPLY) 973 transport->tcp_flags |= TCP_RPC_REPLY; 974 else 975 transport->tcp_flags &= ~TCP_RPC_REPLY; 976 dprintk("RPC: reading %s CALL/REPLY flag %08x\n", 977 (transport->tcp_flags & TCP_RPC_REPLY) ? 978 "reply for" : "request with", calldir); 979 xs_tcp_check_fraghdr(transport); 980 } 981 982 static inline void xs_tcp_read_common(struct rpc_xprt *xprt, 983 struct xdr_skb_reader *desc, 984 struct rpc_rqst *req) 985 { 986 struct sock_xprt *transport = 987 container_of(xprt, struct sock_xprt, xprt); 988 struct xdr_buf *rcvbuf; 989 size_t len; 990 ssize_t r; 991 992 rcvbuf = &req->rq_private_buf; 993 994 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) { 995 /* 996 * Save the RPC direction in the XDR buffer 997 */ 998 __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ? 999 htonl(RPC_REPLY) : 0; 1000 1001 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, 1002 &calldir, sizeof(calldir)); 1003 transport->tcp_copied += sizeof(calldir); 1004 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; 1005 } 1006 1007 len = desc->count; 1008 if (len > transport->tcp_reclen - transport->tcp_offset) { 1009 struct xdr_skb_reader my_desc; 1010 1011 len = transport->tcp_reclen - transport->tcp_offset; 1012 memcpy(&my_desc, desc, sizeof(my_desc)); 1013 my_desc.count = len; 1014 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 1015 &my_desc, xdr_skb_read_bits); 1016 desc->count -= r; 1017 desc->offset += r; 1018 } else 1019 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 1020 desc, xdr_skb_read_bits); 1021 1022 if (r > 0) { 1023 transport->tcp_copied += r; 1024 transport->tcp_offset += r; 1025 } 1026 if (r != len) { 1027 /* Error when copying to the receive buffer, 1028 * usually because we weren't able to allocate 1029 * additional buffer pages. All we can do now 1030 * is turn off TCP_RCV_COPY_DATA, so the request 1031 * will not receive any additional updates, 1032 * and time out. 1033 * Any remaining data from this record will 1034 * be discarded. 1035 */ 1036 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1037 dprintk("RPC: XID %08x truncated request\n", 1038 ntohl(transport->tcp_xid)); 1039 dprintk("RPC: xprt = %p, tcp_copied = %lu, " 1040 "tcp_offset = %u, tcp_reclen = %u\n", 1041 xprt, transport->tcp_copied, 1042 transport->tcp_offset, transport->tcp_reclen); 1043 return; 1044 } 1045 1046 dprintk("RPC: XID %08x read %Zd bytes\n", 1047 ntohl(transport->tcp_xid), r); 1048 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " 1049 "tcp_reclen = %u\n", xprt, transport->tcp_copied, 1050 transport->tcp_offset, transport->tcp_reclen); 1051 1052 if (transport->tcp_copied == req->rq_private_buf.buflen) 1053 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1054 else if (transport->tcp_offset == transport->tcp_reclen) { 1055 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) 1056 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1057 } 1058 1059 return; 1060 } 1061 1062 /* 1063 * Finds the request corresponding to the RPC xid and invokes the common 1064 * tcp read code to read the data. 1065 */ 1066 static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, 1067 struct xdr_skb_reader *desc) 1068 { 1069 struct sock_xprt *transport = 1070 container_of(xprt, struct sock_xprt, xprt); 1071 struct rpc_rqst *req; 1072 1073 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid)); 1074 1075 /* Find and lock the request corresponding to this xid */ 1076 spin_lock(&xprt->transport_lock); 1077 req = xprt_lookup_rqst(xprt, transport->tcp_xid); 1078 if (!req) { 1079 dprintk("RPC: XID %08x request not found!\n", 1080 ntohl(transport->tcp_xid)); 1081 spin_unlock(&xprt->transport_lock); 1082 return -1; 1083 } 1084 1085 xs_tcp_read_common(xprt, desc, req); 1086 1087 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1088 xprt_complete_rqst(req->rq_task, transport->tcp_copied); 1089 1090 spin_unlock(&xprt->transport_lock); 1091 return 0; 1092 } 1093 1094 #if defined(CONFIG_NFS_V4_1) 1095 /* 1096 * Obtains an rpc_rqst previously allocated and invokes the common 1097 * tcp read code to read the data. The result is placed in the callback 1098 * queue. 1099 * If we're unable to obtain the rpc_rqst we schedule the closing of the 1100 * connection and return -1. 1101 */ 1102 static inline int xs_tcp_read_callback(struct rpc_xprt *xprt, 1103 struct xdr_skb_reader *desc) 1104 { 1105 struct sock_xprt *transport = 1106 container_of(xprt, struct sock_xprt, xprt); 1107 struct rpc_rqst *req; 1108 1109 req = xprt_alloc_bc_request(xprt); 1110 if (req == NULL) { 1111 printk(KERN_WARNING "Callback slot table overflowed\n"); 1112 xprt_force_disconnect(xprt); 1113 return -1; 1114 } 1115 1116 req->rq_xid = transport->tcp_xid; 1117 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid)); 1118 xs_tcp_read_common(xprt, desc, req); 1119 1120 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) { 1121 struct svc_serv *bc_serv = xprt->bc_serv; 1122 1123 /* 1124 * Add callback request to callback list. The callback 1125 * service sleeps on the sv_cb_waitq waiting for new 1126 * requests. Wake it up after adding enqueing the 1127 * request. 1128 */ 1129 dprintk("RPC: add callback request to list\n"); 1130 spin_lock(&bc_serv->sv_cb_lock); 1131 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); 1132 spin_unlock(&bc_serv->sv_cb_lock); 1133 wake_up(&bc_serv->sv_cb_waitq); 1134 } 1135 1136 req->rq_private_buf.len = transport->tcp_copied; 1137 1138 return 0; 1139 } 1140 1141 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, 1142 struct xdr_skb_reader *desc) 1143 { 1144 struct sock_xprt *transport = 1145 container_of(xprt, struct sock_xprt, xprt); 1146 1147 return (transport->tcp_flags & TCP_RPC_REPLY) ? 1148 xs_tcp_read_reply(xprt, desc) : 1149 xs_tcp_read_callback(xprt, desc); 1150 } 1151 #else 1152 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, 1153 struct xdr_skb_reader *desc) 1154 { 1155 return xs_tcp_read_reply(xprt, desc); 1156 } 1157 #endif /* CONFIG_NFS_V4_1 */ 1158 1159 /* 1160 * Read data off the transport. This can be either an RPC_CALL or an 1161 * RPC_REPLY. Relay the processing to helper functions. 1162 */ 1163 static void xs_tcp_read_data(struct rpc_xprt *xprt, 1164 struct xdr_skb_reader *desc) 1165 { 1166 struct sock_xprt *transport = 1167 container_of(xprt, struct sock_xprt, xprt); 1168 1169 if (_xs_tcp_read_data(xprt, desc) == 0) 1170 xs_tcp_check_fraghdr(transport); 1171 else { 1172 /* 1173 * The transport_lock protects the request handling. 1174 * There's no need to hold it to update the tcp_flags. 1175 */ 1176 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1177 } 1178 } 1179 1180 static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1181 { 1182 size_t len; 1183 1184 len = transport->tcp_reclen - transport->tcp_offset; 1185 if (len > desc->count) 1186 len = desc->count; 1187 desc->count -= len; 1188 desc->offset += len; 1189 transport->tcp_offset += len; 1190 dprintk("RPC: discarded %Zu bytes\n", len); 1191 xs_tcp_check_fraghdr(transport); 1192 } 1193 1194 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) 1195 { 1196 struct rpc_xprt *xprt = rd_desc->arg.data; 1197 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1198 struct xdr_skb_reader desc = { 1199 .skb = skb, 1200 .offset = offset, 1201 .count = len, 1202 }; 1203 1204 dprintk("RPC: xs_tcp_data_recv started\n"); 1205 do { 1206 /* Read in a new fragment marker if necessary */ 1207 /* Can we ever really expect to get completely empty fragments? */ 1208 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { 1209 xs_tcp_read_fraghdr(xprt, &desc); 1210 continue; 1211 } 1212 /* Read in the xid if necessary */ 1213 if (transport->tcp_flags & TCP_RCV_COPY_XID) { 1214 xs_tcp_read_xid(transport, &desc); 1215 continue; 1216 } 1217 /* Read in the call/reply flag */ 1218 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) { 1219 xs_tcp_read_calldir(transport, &desc); 1220 continue; 1221 } 1222 /* Read in the request data */ 1223 if (transport->tcp_flags & TCP_RCV_COPY_DATA) { 1224 xs_tcp_read_data(xprt, &desc); 1225 continue; 1226 } 1227 /* Skip over any trailing bytes on short reads */ 1228 xs_tcp_read_discard(transport, &desc); 1229 } while (desc.count); 1230 dprintk("RPC: xs_tcp_data_recv done\n"); 1231 return len - desc.count; 1232 } 1233 1234 /** 1235 * xs_tcp_data_ready - "data ready" callback for TCP sockets 1236 * @sk: socket with data to read 1237 * @bytes: how much data to read 1238 * 1239 */ 1240 static void xs_tcp_data_ready(struct sock *sk, int bytes) 1241 { 1242 struct rpc_xprt *xprt; 1243 read_descriptor_t rd_desc; 1244 int read; 1245 1246 dprintk("RPC: xs_tcp_data_ready...\n"); 1247 1248 read_lock(&sk->sk_callback_lock); 1249 if (!(xprt = xprt_from_sock(sk))) 1250 goto out; 1251 if (xprt->shutdown) 1252 goto out; 1253 1254 /* Any data means we had a useful conversation, so 1255 * the we don't need to delay the next reconnect 1256 */ 1257 if (xprt->reestablish_timeout) 1258 xprt->reestablish_timeout = 0; 1259 1260 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ 1261 rd_desc.arg.data = xprt; 1262 do { 1263 rd_desc.count = 65536; 1264 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1265 } while (read > 0); 1266 out: 1267 read_unlock(&sk->sk_callback_lock); 1268 } 1269 1270 /* 1271 * Do the equivalent of linger/linger2 handling for dealing with 1272 * broken servers that don't close the socket in a timely 1273 * fashion 1274 */ 1275 static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt, 1276 unsigned long timeout) 1277 { 1278 struct sock_xprt *transport; 1279 1280 if (xprt_test_and_set_connecting(xprt)) 1281 return; 1282 set_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1283 transport = container_of(xprt, struct sock_xprt, xprt); 1284 queue_delayed_work(rpciod_workqueue, &transport->connect_worker, 1285 timeout); 1286 } 1287 1288 static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) 1289 { 1290 struct sock_xprt *transport; 1291 1292 transport = container_of(xprt, struct sock_xprt, xprt); 1293 1294 if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) || 1295 !cancel_delayed_work(&transport->connect_worker)) 1296 return; 1297 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1298 xprt_clear_connecting(xprt); 1299 } 1300 1301 static void xs_sock_mark_closed(struct rpc_xprt *xprt) 1302 { 1303 smp_mb__before_clear_bit(); 1304 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1305 clear_bit(XPRT_CLOSING, &xprt->state); 1306 smp_mb__after_clear_bit(); 1307 /* Mark transport as closed and wake up all pending tasks */ 1308 xprt_disconnect_done(xprt); 1309 } 1310 1311 /** 1312 * xs_tcp_state_change - callback to handle TCP socket state changes 1313 * @sk: socket whose state has changed 1314 * 1315 */ 1316 static void xs_tcp_state_change(struct sock *sk) 1317 { 1318 struct rpc_xprt *xprt; 1319 1320 read_lock(&sk->sk_callback_lock); 1321 if (!(xprt = xprt_from_sock(sk))) 1322 goto out; 1323 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1324 dprintk("RPC: state %x conn %d dead %d zapped %d\n", 1325 sk->sk_state, xprt_connected(xprt), 1326 sock_flag(sk, SOCK_DEAD), 1327 sock_flag(sk, SOCK_ZAPPED)); 1328 1329 switch (sk->sk_state) { 1330 case TCP_ESTABLISHED: 1331 spin_lock_bh(&xprt->transport_lock); 1332 if (!xprt_test_and_set_connected(xprt)) { 1333 struct sock_xprt *transport = container_of(xprt, 1334 struct sock_xprt, xprt); 1335 1336 /* Reset TCP record info */ 1337 transport->tcp_offset = 0; 1338 transport->tcp_reclen = 0; 1339 transport->tcp_copied = 0; 1340 transport->tcp_flags = 1341 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; 1342 1343 xprt_wake_pending_tasks(xprt, -EAGAIN); 1344 } 1345 spin_unlock_bh(&xprt->transport_lock); 1346 break; 1347 case TCP_FIN_WAIT1: 1348 /* The client initiated a shutdown of the socket */ 1349 xprt->connect_cookie++; 1350 xprt->reestablish_timeout = 0; 1351 set_bit(XPRT_CLOSING, &xprt->state); 1352 smp_mb__before_clear_bit(); 1353 clear_bit(XPRT_CONNECTED, &xprt->state); 1354 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1355 smp_mb__after_clear_bit(); 1356 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1357 break; 1358 case TCP_CLOSE_WAIT: 1359 /* The server initiated a shutdown of the socket */ 1360 xprt_force_disconnect(xprt); 1361 case TCP_SYN_SENT: 1362 xprt->connect_cookie++; 1363 case TCP_CLOSING: 1364 /* 1365 * If the server closed down the connection, make sure that 1366 * we back off before reconnecting 1367 */ 1368 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 1369 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1370 break; 1371 case TCP_LAST_ACK: 1372 set_bit(XPRT_CLOSING, &xprt->state); 1373 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1374 smp_mb__before_clear_bit(); 1375 clear_bit(XPRT_CONNECTED, &xprt->state); 1376 smp_mb__after_clear_bit(); 1377 break; 1378 case TCP_CLOSE: 1379 xs_tcp_cancel_linger_timeout(xprt); 1380 xs_sock_mark_closed(xprt); 1381 } 1382 out: 1383 read_unlock(&sk->sk_callback_lock); 1384 } 1385 1386 /** 1387 * xs_error_report - callback mainly for catching socket errors 1388 * @sk: socket 1389 */ 1390 static void xs_error_report(struct sock *sk) 1391 { 1392 struct rpc_xprt *xprt; 1393 1394 read_lock(&sk->sk_callback_lock); 1395 if (!(xprt = xprt_from_sock(sk))) 1396 goto out; 1397 dprintk("RPC: %s client %p...\n" 1398 "RPC: error %d\n", 1399 __func__, xprt, sk->sk_err); 1400 xprt_wake_pending_tasks(xprt, -EAGAIN); 1401 out: 1402 read_unlock(&sk->sk_callback_lock); 1403 } 1404 1405 static void xs_write_space(struct sock *sk) 1406 { 1407 struct socket *sock; 1408 struct rpc_xprt *xprt; 1409 1410 if (unlikely(!(sock = sk->sk_socket))) 1411 return; 1412 clear_bit(SOCK_NOSPACE, &sock->flags); 1413 1414 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1415 return; 1416 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0) 1417 return; 1418 1419 xprt_write_space(xprt); 1420 } 1421 1422 /** 1423 * xs_udp_write_space - callback invoked when socket buffer space 1424 * becomes available 1425 * @sk: socket whose state has changed 1426 * 1427 * Called when more output buffer space is available for this socket. 1428 * We try not to wake our writers until they can make "significant" 1429 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1430 * with a bunch of small requests. 1431 */ 1432 static void xs_udp_write_space(struct sock *sk) 1433 { 1434 read_lock(&sk->sk_callback_lock); 1435 1436 /* from net/core/sock.c:sock_def_write_space */ 1437 if (sock_writeable(sk)) 1438 xs_write_space(sk); 1439 1440 read_unlock(&sk->sk_callback_lock); 1441 } 1442 1443 /** 1444 * xs_tcp_write_space - callback invoked when socket buffer space 1445 * becomes available 1446 * @sk: socket whose state has changed 1447 * 1448 * Called when more output buffer space is available for this socket. 1449 * We try not to wake our writers until they can make "significant" 1450 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1451 * with a bunch of small requests. 1452 */ 1453 static void xs_tcp_write_space(struct sock *sk) 1454 { 1455 read_lock(&sk->sk_callback_lock); 1456 1457 /* from net/core/stream.c:sk_stream_write_space */ 1458 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 1459 xs_write_space(sk); 1460 1461 read_unlock(&sk->sk_callback_lock); 1462 } 1463 1464 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1465 { 1466 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1467 struct sock *sk = transport->inet; 1468 1469 if (transport->rcvsize) { 1470 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 1471 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; 1472 } 1473 if (transport->sndsize) { 1474 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1475 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; 1476 sk->sk_write_space(sk); 1477 } 1478 } 1479 1480 /** 1481 * xs_udp_set_buffer_size - set send and receive limits 1482 * @xprt: generic transport 1483 * @sndsize: requested size of send buffer, in bytes 1484 * @rcvsize: requested size of receive buffer, in bytes 1485 * 1486 * Set socket send and receive buffer size limits. 1487 */ 1488 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 1489 { 1490 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1491 1492 transport->sndsize = 0; 1493 if (sndsize) 1494 transport->sndsize = sndsize + 1024; 1495 transport->rcvsize = 0; 1496 if (rcvsize) 1497 transport->rcvsize = rcvsize + 1024; 1498 1499 xs_udp_do_set_buffer_size(xprt); 1500 } 1501 1502 /** 1503 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport 1504 * @task: task that timed out 1505 * 1506 * Adjust the congestion window after a retransmit timeout has occurred. 1507 */ 1508 static void xs_udp_timer(struct rpc_task *task) 1509 { 1510 xprt_adjust_cwnd(task, -ETIMEDOUT); 1511 } 1512 1513 static unsigned short xs_get_random_port(void) 1514 { 1515 unsigned short range = xprt_max_resvport - xprt_min_resvport; 1516 unsigned short rand = (unsigned short) net_random() % range; 1517 return rand + xprt_min_resvport; 1518 } 1519 1520 /** 1521 * xs_set_port - reset the port number in the remote endpoint address 1522 * @xprt: generic transport 1523 * @port: new port number 1524 * 1525 */ 1526 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1527 { 1528 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1529 1530 rpc_set_port(xs_addr(xprt), port); 1531 xs_update_peer_port(xprt); 1532 } 1533 1534 static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock) 1535 { 1536 unsigned short port = transport->srcport; 1537 1538 if (port == 0 && transport->xprt.resvport) 1539 port = xs_get_random_port(); 1540 return port; 1541 } 1542 1543 static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port) 1544 { 1545 if (transport->srcport != 0) 1546 transport->srcport = 0; 1547 if (!transport->xprt.resvport) 1548 return 0; 1549 if (port <= xprt_min_resvport || port > xprt_max_resvport) 1550 return xprt_max_resvport; 1551 return --port; 1552 } 1553 1554 static int xs_bind4(struct sock_xprt *transport, struct socket *sock) 1555 { 1556 struct sockaddr_in myaddr = { 1557 .sin_family = AF_INET, 1558 }; 1559 struct sockaddr_in *sa; 1560 int err, nloop = 0; 1561 unsigned short port = xs_get_srcport(transport, sock); 1562 unsigned short last; 1563 1564 sa = (struct sockaddr_in *)&transport->srcaddr; 1565 myaddr.sin_addr = sa->sin_addr; 1566 do { 1567 myaddr.sin_port = htons(port); 1568 err = kernel_bind(sock, (struct sockaddr *) &myaddr, 1569 sizeof(myaddr)); 1570 if (port == 0) 1571 break; 1572 if (err == 0) { 1573 transport->srcport = port; 1574 break; 1575 } 1576 last = port; 1577 port = xs_next_srcport(transport, sock, port); 1578 if (port > last) 1579 nloop++; 1580 } while (err == -EADDRINUSE && nloop != 2); 1581 dprintk("RPC: %s %pI4:%u: %s (%d)\n", 1582 __func__, &myaddr.sin_addr, 1583 port, err ? "failed" : "ok", err); 1584 return err; 1585 } 1586 1587 static int xs_bind6(struct sock_xprt *transport, struct socket *sock) 1588 { 1589 struct sockaddr_in6 myaddr = { 1590 .sin6_family = AF_INET6, 1591 }; 1592 struct sockaddr_in6 *sa; 1593 int err, nloop = 0; 1594 unsigned short port = xs_get_srcport(transport, sock); 1595 unsigned short last; 1596 1597 sa = (struct sockaddr_in6 *)&transport->srcaddr; 1598 myaddr.sin6_addr = sa->sin6_addr; 1599 do { 1600 myaddr.sin6_port = htons(port); 1601 err = kernel_bind(sock, (struct sockaddr *) &myaddr, 1602 sizeof(myaddr)); 1603 if (port == 0) 1604 break; 1605 if (err == 0) { 1606 transport->srcport = port; 1607 break; 1608 } 1609 last = port; 1610 port = xs_next_srcport(transport, sock, port); 1611 if (port > last) 1612 nloop++; 1613 } while (err == -EADDRINUSE && nloop != 2); 1614 dprintk("RPC: xs_bind6 %pI6:%u: %s (%d)\n", 1615 &myaddr.sin6_addr, port, err ? "failed" : "ok", err); 1616 return err; 1617 } 1618 1619 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1620 static struct lock_class_key xs_key[2]; 1621 static struct lock_class_key xs_slock_key[2]; 1622 1623 static inline void xs_reclassify_socket4(struct socket *sock) 1624 { 1625 struct sock *sk = sock->sk; 1626 1627 BUG_ON(sock_owned_by_user(sk)); 1628 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1629 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); 1630 } 1631 1632 static inline void xs_reclassify_socket6(struct socket *sock) 1633 { 1634 struct sock *sk = sock->sk; 1635 1636 BUG_ON(sock_owned_by_user(sk)); 1637 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1638 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); 1639 } 1640 #else 1641 static inline void xs_reclassify_socket4(struct socket *sock) 1642 { 1643 } 1644 1645 static inline void xs_reclassify_socket6(struct socket *sock) 1646 { 1647 } 1648 #endif 1649 1650 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 1651 { 1652 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1653 1654 if (!transport->inet) { 1655 struct sock *sk = sock->sk; 1656 1657 write_lock_bh(&sk->sk_callback_lock); 1658 1659 xs_save_old_callbacks(transport, sk); 1660 1661 sk->sk_user_data = xprt; 1662 sk->sk_data_ready = xs_udp_data_ready; 1663 sk->sk_write_space = xs_udp_write_space; 1664 sk->sk_error_report = xs_error_report; 1665 sk->sk_no_check = UDP_CSUM_NORCV; 1666 sk->sk_allocation = GFP_ATOMIC; 1667 1668 xprt_set_connected(xprt); 1669 1670 /* Reset to new socket */ 1671 transport->sock = sock; 1672 transport->inet = sk; 1673 1674 write_unlock_bh(&sk->sk_callback_lock); 1675 } 1676 xs_udp_do_set_buffer_size(xprt); 1677 } 1678 1679 /** 1680 * xs_udp_connect_worker4 - set up a UDP socket 1681 * @work: RPC transport to connect 1682 * 1683 * Invoked by a work queue tasklet. 1684 */ 1685 static void xs_udp_connect_worker4(struct work_struct *work) 1686 { 1687 struct sock_xprt *transport = 1688 container_of(work, struct sock_xprt, connect_worker.work); 1689 struct rpc_xprt *xprt = &transport->xprt; 1690 struct socket *sock = transport->sock; 1691 int err, status = -EIO; 1692 1693 if (xprt->shutdown) 1694 goto out; 1695 1696 /* Start by resetting any existing state */ 1697 xs_reset_transport(transport); 1698 1699 err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); 1700 if (err < 0) { 1701 dprintk("RPC: can't create UDP transport socket (%d).\n", -err); 1702 goto out; 1703 } 1704 xs_reclassify_socket4(sock); 1705 1706 if (xs_bind4(transport, sock)) { 1707 sock_release(sock); 1708 goto out; 1709 } 1710 1711 dprintk("RPC: worker connecting xprt %p via %s to " 1712 "%s (port %s)\n", xprt, 1713 xprt->address_strings[RPC_DISPLAY_PROTO], 1714 xprt->address_strings[RPC_DISPLAY_ADDR], 1715 xprt->address_strings[RPC_DISPLAY_PORT]); 1716 1717 xs_udp_finish_connecting(xprt, sock); 1718 status = 0; 1719 out: 1720 xprt_clear_connecting(xprt); 1721 xprt_wake_pending_tasks(xprt, status); 1722 } 1723 1724 /** 1725 * xs_udp_connect_worker6 - set up a UDP socket 1726 * @work: RPC transport to connect 1727 * 1728 * Invoked by a work queue tasklet. 1729 */ 1730 static void xs_udp_connect_worker6(struct work_struct *work) 1731 { 1732 struct sock_xprt *transport = 1733 container_of(work, struct sock_xprt, connect_worker.work); 1734 struct rpc_xprt *xprt = &transport->xprt; 1735 struct socket *sock = transport->sock; 1736 int err, status = -EIO; 1737 1738 if (xprt->shutdown) 1739 goto out; 1740 1741 /* Start by resetting any existing state */ 1742 xs_reset_transport(transport); 1743 1744 err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock); 1745 if (err < 0) { 1746 dprintk("RPC: can't create UDP transport socket (%d).\n", -err); 1747 goto out; 1748 } 1749 xs_reclassify_socket6(sock); 1750 1751 if (xs_bind6(transport, sock) < 0) { 1752 sock_release(sock); 1753 goto out; 1754 } 1755 1756 dprintk("RPC: worker connecting xprt %p via %s to " 1757 "%s (port %s)\n", xprt, 1758 xprt->address_strings[RPC_DISPLAY_PROTO], 1759 xprt->address_strings[RPC_DISPLAY_ADDR], 1760 xprt->address_strings[RPC_DISPLAY_PORT]); 1761 1762 xs_udp_finish_connecting(xprt, sock); 1763 status = 0; 1764 out: 1765 xprt_clear_connecting(xprt); 1766 xprt_wake_pending_tasks(xprt, status); 1767 } 1768 1769 /* 1770 * We need to preserve the port number so the reply cache on the server can 1771 * find our cached RPC replies when we get around to reconnecting. 1772 */ 1773 static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transport) 1774 { 1775 int result; 1776 struct sockaddr any; 1777 1778 dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); 1779 1780 /* 1781 * Disconnect the transport socket by doing a connect operation 1782 * with AF_UNSPEC. This should return immediately... 1783 */ 1784 memset(&any, 0, sizeof(any)); 1785 any.sa_family = AF_UNSPEC; 1786 result = kernel_connect(transport->sock, &any, sizeof(any), 0); 1787 if (!result) 1788 xs_sock_mark_closed(xprt); 1789 else 1790 dprintk("RPC: AF_UNSPEC connect return code %d\n", 1791 result); 1792 } 1793 1794 static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *transport) 1795 { 1796 unsigned int state = transport->inet->sk_state; 1797 1798 if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) 1799 return; 1800 if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) 1801 return; 1802 xs_abort_connection(xprt, transport); 1803 } 1804 1805 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 1806 { 1807 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1808 1809 if (!transport->inet) { 1810 struct sock *sk = sock->sk; 1811 1812 write_lock_bh(&sk->sk_callback_lock); 1813 1814 xs_save_old_callbacks(transport, sk); 1815 1816 sk->sk_user_data = xprt; 1817 sk->sk_data_ready = xs_tcp_data_ready; 1818 sk->sk_state_change = xs_tcp_state_change; 1819 sk->sk_write_space = xs_tcp_write_space; 1820 sk->sk_error_report = xs_error_report; 1821 sk->sk_allocation = GFP_ATOMIC; 1822 1823 /* socket options */ 1824 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; 1825 sock_reset_flag(sk, SOCK_LINGER); 1826 tcp_sk(sk)->linger2 = 0; 1827 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1828 1829 xprt_clear_connected(xprt); 1830 1831 /* Reset to new socket */ 1832 transport->sock = sock; 1833 transport->inet = sk; 1834 1835 write_unlock_bh(&sk->sk_callback_lock); 1836 } 1837 1838 if (!xprt_bound(xprt)) 1839 return -ENOTCONN; 1840 1841 /* Tell the socket layer to start connecting... */ 1842 xprt->stat.connect_count++; 1843 xprt->stat.connect_start = jiffies; 1844 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 1845 } 1846 1847 /** 1848 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint 1849 * @xprt: RPC transport to connect 1850 * @transport: socket transport to connect 1851 * @create_sock: function to create a socket of the correct type 1852 * 1853 * Invoked by a work queue tasklet. 1854 */ 1855 static void xs_tcp_setup_socket(struct rpc_xprt *xprt, 1856 struct sock_xprt *transport, 1857 struct socket *(*create_sock)(struct rpc_xprt *, 1858 struct sock_xprt *)) 1859 { 1860 struct socket *sock = transport->sock; 1861 int status = -EIO; 1862 1863 if (xprt->shutdown) 1864 goto out; 1865 1866 if (!sock) { 1867 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1868 sock = create_sock(xprt, transport); 1869 if (IS_ERR(sock)) { 1870 status = PTR_ERR(sock); 1871 goto out; 1872 } 1873 } else { 1874 int abort_and_exit; 1875 1876 abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT, 1877 &xprt->state); 1878 /* "close" the socket, preserving the local port */ 1879 xs_tcp_reuse_connection(xprt, transport); 1880 1881 if (abort_and_exit) 1882 goto out_eagain; 1883 } 1884 1885 dprintk("RPC: worker connecting xprt %p via %s to " 1886 "%s (port %s)\n", xprt, 1887 xprt->address_strings[RPC_DISPLAY_PROTO], 1888 xprt->address_strings[RPC_DISPLAY_ADDR], 1889 xprt->address_strings[RPC_DISPLAY_PORT]); 1890 1891 status = xs_tcp_finish_connecting(xprt, sock); 1892 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 1893 xprt, -status, xprt_connected(xprt), 1894 sock->sk->sk_state); 1895 switch (status) { 1896 default: 1897 printk("%s: connect returned unhandled error %d\n", 1898 __func__, status); 1899 case -EADDRNOTAVAIL: 1900 /* We're probably in TIME_WAIT. Get rid of existing socket, 1901 * and retry 1902 */ 1903 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); 1904 xprt_force_disconnect(xprt); 1905 break; 1906 case -ECONNREFUSED: 1907 case -ECONNRESET: 1908 case -ENETUNREACH: 1909 /* retry with existing socket, after a delay */ 1910 case 0: 1911 case -EINPROGRESS: 1912 case -EALREADY: 1913 xprt_clear_connecting(xprt); 1914 return; 1915 } 1916 out_eagain: 1917 status = -EAGAIN; 1918 out: 1919 xprt_clear_connecting(xprt); 1920 xprt_wake_pending_tasks(xprt, status); 1921 } 1922 1923 static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt, 1924 struct sock_xprt *transport) 1925 { 1926 struct socket *sock; 1927 int err; 1928 1929 /* start from scratch */ 1930 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); 1931 if (err < 0) { 1932 dprintk("RPC: can't create TCP transport socket (%d).\n", 1933 -err); 1934 goto out_err; 1935 } 1936 xs_reclassify_socket4(sock); 1937 1938 if (xs_bind4(transport, sock) < 0) { 1939 sock_release(sock); 1940 goto out_err; 1941 } 1942 return sock; 1943 out_err: 1944 return ERR_PTR(-EIO); 1945 } 1946 1947 /** 1948 * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint 1949 * @work: RPC transport to connect 1950 * 1951 * Invoked by a work queue tasklet. 1952 */ 1953 static void xs_tcp_connect_worker4(struct work_struct *work) 1954 { 1955 struct sock_xprt *transport = 1956 container_of(work, struct sock_xprt, connect_worker.work); 1957 struct rpc_xprt *xprt = &transport->xprt; 1958 1959 xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock4); 1960 } 1961 1962 static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt, 1963 struct sock_xprt *transport) 1964 { 1965 struct socket *sock; 1966 int err; 1967 1968 /* start from scratch */ 1969 err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock); 1970 if (err < 0) { 1971 dprintk("RPC: can't create TCP transport socket (%d).\n", 1972 -err); 1973 goto out_err; 1974 } 1975 xs_reclassify_socket6(sock); 1976 1977 if (xs_bind6(transport, sock) < 0) { 1978 sock_release(sock); 1979 goto out_err; 1980 } 1981 return sock; 1982 out_err: 1983 return ERR_PTR(-EIO); 1984 } 1985 1986 /** 1987 * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint 1988 * @work: RPC transport to connect 1989 * 1990 * Invoked by a work queue tasklet. 1991 */ 1992 static void xs_tcp_connect_worker6(struct work_struct *work) 1993 { 1994 struct sock_xprt *transport = 1995 container_of(work, struct sock_xprt, connect_worker.work); 1996 struct rpc_xprt *xprt = &transport->xprt; 1997 1998 xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock6); 1999 } 2000 2001 /** 2002 * xs_connect - connect a socket to a remote endpoint 2003 * @task: address of RPC task that manages state of connect request 2004 * 2005 * TCP: If the remote end dropped the connection, delay reconnecting. 2006 * 2007 * UDP socket connects are synchronous, but we use a work queue anyway 2008 * to guarantee that even unprivileged user processes can set up a 2009 * socket on a privileged port. 2010 * 2011 * If a UDP socket connect fails, the delay behavior here prevents 2012 * retry floods (hard mounts). 2013 */ 2014 static void xs_connect(struct rpc_task *task) 2015 { 2016 struct rpc_xprt *xprt = task->tk_xprt; 2017 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2018 2019 if (xprt_test_and_set_connecting(xprt)) 2020 return; 2021 2022 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { 2023 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2024 "seconds\n", 2025 xprt, xprt->reestablish_timeout / HZ); 2026 queue_delayed_work(rpciod_workqueue, 2027 &transport->connect_worker, 2028 xprt->reestablish_timeout); 2029 xprt->reestablish_timeout <<= 1; 2030 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2031 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2032 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 2033 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 2034 } else { 2035 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2036 queue_delayed_work(rpciod_workqueue, 2037 &transport->connect_worker, 0); 2038 } 2039 } 2040 2041 static void xs_tcp_connect(struct rpc_task *task) 2042 { 2043 struct rpc_xprt *xprt = task->tk_xprt; 2044 2045 /* Exit if we need to wait for socket shutdown to complete */ 2046 if (test_bit(XPRT_CLOSING, &xprt->state)) 2047 return; 2048 xs_connect(task); 2049 } 2050 2051 /** 2052 * xs_udp_print_stats - display UDP socket-specifc stats 2053 * @xprt: rpc_xprt struct containing statistics 2054 * @seq: output file 2055 * 2056 */ 2057 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2058 { 2059 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2060 2061 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", 2062 transport->srcport, 2063 xprt->stat.bind_count, 2064 xprt->stat.sends, 2065 xprt->stat.recvs, 2066 xprt->stat.bad_xids, 2067 xprt->stat.req_u, 2068 xprt->stat.bklog_u); 2069 } 2070 2071 /** 2072 * xs_tcp_print_stats - display TCP socket-specifc stats 2073 * @xprt: rpc_xprt struct containing statistics 2074 * @seq: output file 2075 * 2076 */ 2077 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2078 { 2079 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2080 long idle_time = 0; 2081 2082 if (xprt_connected(xprt)) 2083 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2084 2085 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", 2086 transport->srcport, 2087 xprt->stat.bind_count, 2088 xprt->stat.connect_count, 2089 xprt->stat.connect_time, 2090 idle_time, 2091 xprt->stat.sends, 2092 xprt->stat.recvs, 2093 xprt->stat.bad_xids, 2094 xprt->stat.req_u, 2095 xprt->stat.bklog_u); 2096 } 2097 2098 /* 2099 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason 2100 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2101 * to use the server side send routines. 2102 */ 2103 void *bc_malloc(struct rpc_task *task, size_t size) 2104 { 2105 struct page *page; 2106 struct rpc_buffer *buf; 2107 2108 BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer)); 2109 page = alloc_page(GFP_KERNEL); 2110 2111 if (!page) 2112 return NULL; 2113 2114 buf = page_address(page); 2115 buf->len = PAGE_SIZE; 2116 2117 return buf->data; 2118 } 2119 2120 /* 2121 * Free the space allocated in the bc_alloc routine 2122 */ 2123 void bc_free(void *buffer) 2124 { 2125 struct rpc_buffer *buf; 2126 2127 if (!buffer) 2128 return; 2129 2130 buf = container_of(buffer, struct rpc_buffer, data); 2131 free_page((unsigned long)buf); 2132 } 2133 2134 /* 2135 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex 2136 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request. 2137 */ 2138 static int bc_sendto(struct rpc_rqst *req) 2139 { 2140 int len; 2141 struct xdr_buf *xbufp = &req->rq_snd_buf; 2142 struct rpc_xprt *xprt = req->rq_xprt; 2143 struct sock_xprt *transport = 2144 container_of(xprt, struct sock_xprt, xprt); 2145 struct socket *sock = transport->sock; 2146 unsigned long headoff; 2147 unsigned long tailoff; 2148 2149 /* 2150 * Set up the rpc header and record marker stuff 2151 */ 2152 xs_encode_tcp_record_marker(xbufp); 2153 2154 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; 2155 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; 2156 len = svc_send_common(sock, xbufp, 2157 virt_to_page(xbufp->head[0].iov_base), headoff, 2158 xbufp->tail[0].iov_base, tailoff); 2159 2160 if (len != xbufp->len) { 2161 printk(KERN_NOTICE "Error sending entire callback!\n"); 2162 len = -EAGAIN; 2163 } 2164 2165 return len; 2166 } 2167 2168 /* 2169 * The send routine. Borrows from svc_send 2170 */ 2171 static int bc_send_request(struct rpc_task *task) 2172 { 2173 struct rpc_rqst *req = task->tk_rqstp; 2174 struct svc_xprt *xprt; 2175 struct svc_sock *svsk; 2176 u32 len; 2177 2178 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); 2179 /* 2180 * Get the server socket associated with this callback xprt 2181 */ 2182 xprt = req->rq_xprt->bc_xprt; 2183 svsk = container_of(xprt, struct svc_sock, sk_xprt); 2184 2185 /* 2186 * Grab the mutex to serialize data as the connection is shared 2187 * with the fore channel 2188 */ 2189 if (!mutex_trylock(&xprt->xpt_mutex)) { 2190 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL); 2191 if (!mutex_trylock(&xprt->xpt_mutex)) 2192 return -EAGAIN; 2193 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task); 2194 } 2195 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 2196 len = -ENOTCONN; 2197 else 2198 len = bc_sendto(req); 2199 mutex_unlock(&xprt->xpt_mutex); 2200 2201 if (len > 0) 2202 len = 0; 2203 2204 return len; 2205 } 2206 2207 /* 2208 * The close routine. Since this is client initiated, we do nothing 2209 */ 2210 2211 static void bc_close(struct rpc_xprt *xprt) 2212 { 2213 return; 2214 } 2215 2216 /* 2217 * The xprt destroy routine. Again, because this connection is client 2218 * initiated, we do nothing 2219 */ 2220 2221 static void bc_destroy(struct rpc_xprt *xprt) 2222 { 2223 return; 2224 } 2225 2226 static struct rpc_xprt_ops xs_udp_ops = { 2227 .set_buffer_size = xs_udp_set_buffer_size, 2228 .reserve_xprt = xprt_reserve_xprt_cong, 2229 .release_xprt = xprt_release_xprt_cong, 2230 .rpcbind = rpcb_getport_async, 2231 .set_port = xs_set_port, 2232 .connect = xs_connect, 2233 .buf_alloc = rpc_malloc, 2234 .buf_free = rpc_free, 2235 .send_request = xs_udp_send_request, 2236 .set_retrans_timeout = xprt_set_retrans_timeout_rtt, 2237 .timer = xs_udp_timer, 2238 .release_request = xprt_release_rqst_cong, 2239 .close = xs_close, 2240 .destroy = xs_destroy, 2241 .print_stats = xs_udp_print_stats, 2242 }; 2243 2244 static struct rpc_xprt_ops xs_tcp_ops = { 2245 .reserve_xprt = xprt_reserve_xprt, 2246 .release_xprt = xs_tcp_release_xprt, 2247 .rpcbind = rpcb_getport_async, 2248 .set_port = xs_set_port, 2249 .connect = xs_tcp_connect, 2250 .buf_alloc = rpc_malloc, 2251 .buf_free = rpc_free, 2252 .send_request = xs_tcp_send_request, 2253 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2254 #if defined(CONFIG_NFS_V4_1) 2255 .release_request = bc_release_request, 2256 #endif /* CONFIG_NFS_V4_1 */ 2257 .close = xs_tcp_close, 2258 .destroy = xs_destroy, 2259 .print_stats = xs_tcp_print_stats, 2260 }; 2261 2262 /* 2263 * The rpc_xprt_ops for the server backchannel 2264 */ 2265 2266 static struct rpc_xprt_ops bc_tcp_ops = { 2267 .reserve_xprt = xprt_reserve_xprt, 2268 .release_xprt = xprt_release_xprt, 2269 .buf_alloc = bc_malloc, 2270 .buf_free = bc_free, 2271 .send_request = bc_send_request, 2272 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2273 .close = bc_close, 2274 .destroy = bc_destroy, 2275 .print_stats = xs_tcp_print_stats, 2276 }; 2277 2278 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 2279 unsigned int slot_table_size) 2280 { 2281 struct rpc_xprt *xprt; 2282 struct sock_xprt *new; 2283 2284 if (args->addrlen > sizeof(xprt->addr)) { 2285 dprintk("RPC: xs_setup_xprt: address too large\n"); 2286 return ERR_PTR(-EBADF); 2287 } 2288 2289 new = kzalloc(sizeof(*new), GFP_KERNEL); 2290 if (new == NULL) { 2291 dprintk("RPC: xs_setup_xprt: couldn't allocate " 2292 "rpc_xprt\n"); 2293 return ERR_PTR(-ENOMEM); 2294 } 2295 xprt = &new->xprt; 2296 2297 xprt->max_reqs = slot_table_size; 2298 xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); 2299 if (xprt->slot == NULL) { 2300 kfree(xprt); 2301 dprintk("RPC: xs_setup_xprt: couldn't allocate slot " 2302 "table\n"); 2303 return ERR_PTR(-ENOMEM); 2304 } 2305 2306 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 2307 xprt->addrlen = args->addrlen; 2308 if (args->srcaddr) 2309 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); 2310 2311 return xprt; 2312 } 2313 2314 static const struct rpc_timeout xs_udp_default_timeout = { 2315 .to_initval = 5 * HZ, 2316 .to_maxval = 30 * HZ, 2317 .to_increment = 5 * HZ, 2318 .to_retries = 5, 2319 }; 2320 2321 /** 2322 * xs_setup_udp - Set up transport to use a UDP socket 2323 * @args: rpc transport creation arguments 2324 * 2325 */ 2326 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) 2327 { 2328 struct sockaddr *addr = args->dstaddr; 2329 struct rpc_xprt *xprt; 2330 struct sock_xprt *transport; 2331 2332 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); 2333 if (IS_ERR(xprt)) 2334 return xprt; 2335 transport = container_of(xprt, struct sock_xprt, xprt); 2336 2337 xprt->prot = IPPROTO_UDP; 2338 xprt->tsh_size = 0; 2339 /* XXX: header size can vary due to auth type, IPv6, etc. */ 2340 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 2341 2342 xprt->bind_timeout = XS_BIND_TO; 2343 xprt->connect_timeout = XS_UDP_CONN_TO; 2344 xprt->reestablish_timeout = XS_UDP_REEST_TO; 2345 xprt->idle_timeout = XS_IDLE_DISC_TO; 2346 2347 xprt->ops = &xs_udp_ops; 2348 2349 xprt->timeout = &xs_udp_default_timeout; 2350 2351 switch (addr->sa_family) { 2352 case AF_INET: 2353 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2354 xprt_set_bound(xprt); 2355 2356 INIT_DELAYED_WORK(&transport->connect_worker, 2357 xs_udp_connect_worker4); 2358 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); 2359 break; 2360 case AF_INET6: 2361 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2362 xprt_set_bound(xprt); 2363 2364 INIT_DELAYED_WORK(&transport->connect_worker, 2365 xs_udp_connect_worker6); 2366 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2367 break; 2368 default: 2369 kfree(xprt); 2370 return ERR_PTR(-EAFNOSUPPORT); 2371 } 2372 2373 if (xprt_bound(xprt)) 2374 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2375 xprt->address_strings[RPC_DISPLAY_ADDR], 2376 xprt->address_strings[RPC_DISPLAY_PORT], 2377 xprt->address_strings[RPC_DISPLAY_PROTO]); 2378 else 2379 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2380 xprt->address_strings[RPC_DISPLAY_ADDR], 2381 xprt->address_strings[RPC_DISPLAY_PROTO]); 2382 2383 if (try_module_get(THIS_MODULE)) 2384 return xprt; 2385 2386 kfree(xprt->slot); 2387 kfree(xprt); 2388 return ERR_PTR(-EINVAL); 2389 } 2390 2391 static const struct rpc_timeout xs_tcp_default_timeout = { 2392 .to_initval = 60 * HZ, 2393 .to_maxval = 60 * HZ, 2394 .to_retries = 2, 2395 }; 2396 2397 /** 2398 * xs_setup_tcp - Set up transport to use a TCP socket 2399 * @args: rpc transport creation arguments 2400 * 2401 */ 2402 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) 2403 { 2404 struct sockaddr *addr = args->dstaddr; 2405 struct rpc_xprt *xprt; 2406 struct sock_xprt *transport; 2407 2408 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2409 if (IS_ERR(xprt)) 2410 return xprt; 2411 transport = container_of(xprt, struct sock_xprt, xprt); 2412 2413 xprt->prot = IPPROTO_TCP; 2414 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2415 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2416 2417 xprt->bind_timeout = XS_BIND_TO; 2418 xprt->connect_timeout = XS_TCP_CONN_TO; 2419 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2420 xprt->idle_timeout = XS_IDLE_DISC_TO; 2421 2422 xprt->ops = &xs_tcp_ops; 2423 xprt->timeout = &xs_tcp_default_timeout; 2424 2425 switch (addr->sa_family) { 2426 case AF_INET: 2427 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2428 xprt_set_bound(xprt); 2429 2430 INIT_DELAYED_WORK(&transport->connect_worker, 2431 xs_tcp_connect_worker4); 2432 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 2433 break; 2434 case AF_INET6: 2435 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2436 xprt_set_bound(xprt); 2437 2438 INIT_DELAYED_WORK(&transport->connect_worker, 2439 xs_tcp_connect_worker6); 2440 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2441 break; 2442 default: 2443 kfree(xprt); 2444 return ERR_PTR(-EAFNOSUPPORT); 2445 } 2446 2447 if (xprt_bound(xprt)) 2448 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2449 xprt->address_strings[RPC_DISPLAY_ADDR], 2450 xprt->address_strings[RPC_DISPLAY_PORT], 2451 xprt->address_strings[RPC_DISPLAY_PROTO]); 2452 else 2453 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2454 xprt->address_strings[RPC_DISPLAY_ADDR], 2455 xprt->address_strings[RPC_DISPLAY_PROTO]); 2456 2457 2458 if (try_module_get(THIS_MODULE)) 2459 return xprt; 2460 2461 kfree(xprt->slot); 2462 kfree(xprt); 2463 return ERR_PTR(-EINVAL); 2464 } 2465 2466 /** 2467 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket 2468 * @args: rpc transport creation arguments 2469 * 2470 */ 2471 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) 2472 { 2473 struct sockaddr *addr = args->dstaddr; 2474 struct rpc_xprt *xprt; 2475 struct sock_xprt *transport; 2476 struct svc_sock *bc_sock; 2477 2478 if (!args->bc_xprt) 2479 ERR_PTR(-EINVAL); 2480 2481 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2482 if (IS_ERR(xprt)) 2483 return xprt; 2484 transport = container_of(xprt, struct sock_xprt, xprt); 2485 2486 xprt->prot = IPPROTO_TCP; 2487 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2488 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2489 xprt->timeout = &xs_tcp_default_timeout; 2490 2491 /* backchannel */ 2492 xprt_set_bound(xprt); 2493 xprt->bind_timeout = 0; 2494 xprt->connect_timeout = 0; 2495 xprt->reestablish_timeout = 0; 2496 xprt->idle_timeout = 0; 2497 2498 /* 2499 * The backchannel uses the same socket connection as the 2500 * forechannel 2501 */ 2502 xprt->bc_xprt = args->bc_xprt; 2503 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); 2504 bc_sock->sk_bc_xprt = xprt; 2505 transport->sock = bc_sock->sk_sock; 2506 transport->inet = bc_sock->sk_sk; 2507 2508 xprt->ops = &bc_tcp_ops; 2509 2510 switch (addr->sa_family) { 2511 case AF_INET: 2512 xs_format_peer_addresses(xprt, "tcp", 2513 RPCBIND_NETID_TCP); 2514 break; 2515 case AF_INET6: 2516 xs_format_peer_addresses(xprt, "tcp", 2517 RPCBIND_NETID_TCP6); 2518 break; 2519 default: 2520 kfree(xprt); 2521 return ERR_PTR(-EAFNOSUPPORT); 2522 } 2523 2524 if (xprt_bound(xprt)) 2525 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2526 xprt->address_strings[RPC_DISPLAY_ADDR], 2527 xprt->address_strings[RPC_DISPLAY_PORT], 2528 xprt->address_strings[RPC_DISPLAY_PROTO]); 2529 else 2530 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2531 xprt->address_strings[RPC_DISPLAY_ADDR], 2532 xprt->address_strings[RPC_DISPLAY_PROTO]); 2533 2534 /* 2535 * Since we don't want connections for the backchannel, we set 2536 * the xprt status to connected 2537 */ 2538 xprt_set_connected(xprt); 2539 2540 2541 if (try_module_get(THIS_MODULE)) 2542 return xprt; 2543 kfree(xprt->slot); 2544 kfree(xprt); 2545 return ERR_PTR(-EINVAL); 2546 } 2547 2548 static struct xprt_class xs_udp_transport = { 2549 .list = LIST_HEAD_INIT(xs_udp_transport.list), 2550 .name = "udp", 2551 .owner = THIS_MODULE, 2552 .ident = XPRT_TRANSPORT_UDP, 2553 .setup = xs_setup_udp, 2554 }; 2555 2556 static struct xprt_class xs_tcp_transport = { 2557 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 2558 .name = "tcp", 2559 .owner = THIS_MODULE, 2560 .ident = XPRT_TRANSPORT_TCP, 2561 .setup = xs_setup_tcp, 2562 }; 2563 2564 static struct xprt_class xs_bc_tcp_transport = { 2565 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), 2566 .name = "tcp NFSv4.1 backchannel", 2567 .owner = THIS_MODULE, 2568 .ident = XPRT_TRANSPORT_BC_TCP, 2569 .setup = xs_setup_bc_tcp, 2570 }; 2571 2572 /** 2573 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 2574 * 2575 */ 2576 int init_socket_xprt(void) 2577 { 2578 #ifdef RPC_DEBUG 2579 if (!sunrpc_table_header) 2580 sunrpc_table_header = register_sysctl_table(sunrpc_table); 2581 #endif 2582 2583 xprt_register_transport(&xs_udp_transport); 2584 xprt_register_transport(&xs_tcp_transport); 2585 xprt_register_transport(&xs_bc_tcp_transport); 2586 2587 return 0; 2588 } 2589 2590 /** 2591 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister 2592 * 2593 */ 2594 void cleanup_socket_xprt(void) 2595 { 2596 #ifdef RPC_DEBUG 2597 if (sunrpc_table_header) { 2598 unregister_sysctl_table(sunrpc_table_header); 2599 sunrpc_table_header = NULL; 2600 } 2601 #endif 2602 2603 xprt_unregister_transport(&xs_udp_transport); 2604 xprt_unregister_transport(&xs_tcp_transport); 2605 xprt_unregister_transport(&xs_bc_tcp_transport); 2606 } 2607 2608 static int param_set_uint_minmax(const char *val, struct kernel_param *kp, 2609 unsigned int min, unsigned int max) 2610 { 2611 unsigned long num; 2612 int ret; 2613 2614 if (!val) 2615 return -EINVAL; 2616 ret = strict_strtoul(val, 0, &num); 2617 if (ret == -EINVAL || num < min || num > max) 2618 return -EINVAL; 2619 *((unsigned int *)kp->arg) = num; 2620 return 0; 2621 } 2622 2623 static int param_set_portnr(const char *val, struct kernel_param *kp) 2624 { 2625 return param_set_uint_minmax(val, kp, 2626 RPC_MIN_RESVPORT, 2627 RPC_MAX_RESVPORT); 2628 } 2629 2630 static int param_get_portnr(char *buffer, struct kernel_param *kp) 2631 { 2632 return param_get_uint(buffer, kp); 2633 } 2634 #define param_check_portnr(name, p) \ 2635 __param_check(name, p, unsigned int); 2636 2637 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); 2638 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); 2639 2640 static int param_set_slot_table_size(const char *val, struct kernel_param *kp) 2641 { 2642 return param_set_uint_minmax(val, kp, 2643 RPC_MIN_SLOT_TABLE, 2644 RPC_MAX_SLOT_TABLE); 2645 } 2646 2647 static int param_get_slot_table_size(char *buffer, struct kernel_param *kp) 2648 { 2649 return param_get_uint(buffer, kp); 2650 } 2651 #define param_check_slot_table_size(name, p) \ 2652 __param_check(name, p, unsigned int); 2653 2654 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, 2655 slot_table_size, 0644); 2656 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 2657 slot_table_size, 0644); 2658 2659