1 /* 2 * linux/net/sunrpc/xprtsock.c 3 * 4 * Client-side transport implementation for sockets. 5 * 6 * TCP callback races fixes (C) 1998 Red Hat 7 * TCP send fixes (C) 1998 Red Hat 8 * TCP NFS related read + write fixes 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 10 * 11 * Rewrite of larges part of the code in order to stabilize TCP stuff. 12 * Fix behaviour when socket buffer is full. 13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 14 * 15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> 16 * 17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005. 18 * <gilles.quillard@bull.net> 19 */ 20 21 #include <linux/types.h> 22 #include <linux/string.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/capability.h> 26 #include <linux/pagemap.h> 27 #include <linux/errno.h> 28 #include <linux/socket.h> 29 #include <linux/in.h> 30 #include <linux/net.h> 31 #include <linux/mm.h> 32 #include <linux/un.h> 33 #include <linux/udp.h> 34 #include <linux/tcp.h> 35 #include <linux/sunrpc/clnt.h> 36 #include <linux/sunrpc/addr.h> 37 #include <linux/sunrpc/sched.h> 38 #include <linux/sunrpc/svcsock.h> 39 #include <linux/sunrpc/xprtsock.h> 40 #include <linux/file.h> 41 #ifdef CONFIG_SUNRPC_BACKCHANNEL 42 #include <linux/sunrpc/bc_xprt.h> 43 #endif 44 45 #include <net/sock.h> 46 #include <net/checksum.h> 47 #include <net/udp.h> 48 #include <net/tcp.h> 49 50 #include <trace/events/sunrpc.h> 51 52 #include "sunrpc.h" 53 54 static void xs_close(struct rpc_xprt *xprt); 55 56 /* 57 * xprtsock tunables 58 */ 59 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 60 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; 61 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; 62 63 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 64 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 65 66 #define XS_TCP_LINGER_TO (15U * HZ) 67 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; 68 69 /* 70 * We can register our own files under /proc/sys/sunrpc by 71 * calling register_sysctl_table() again. The files in that 72 * directory become the union of all files registered there. 73 * 74 * We simply need to make sure that we don't collide with 75 * someone else's file names! 76 */ 77 78 #ifdef RPC_DEBUG 79 80 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 81 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 82 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; 83 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 84 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 85 86 static struct ctl_table_header *sunrpc_table_header; 87 88 /* 89 * FIXME: changing the UDP slot table size should also resize the UDP 90 * socket buffers for existing UDP transports 91 */ 92 static struct ctl_table xs_tunables_table[] = { 93 { 94 .procname = "udp_slot_table_entries", 95 .data = &xprt_udp_slot_table_entries, 96 .maxlen = sizeof(unsigned int), 97 .mode = 0644, 98 .proc_handler = proc_dointvec_minmax, 99 .extra1 = &min_slot_table_size, 100 .extra2 = &max_slot_table_size 101 }, 102 { 103 .procname = "tcp_slot_table_entries", 104 .data = &xprt_tcp_slot_table_entries, 105 .maxlen = sizeof(unsigned int), 106 .mode = 0644, 107 .proc_handler = proc_dointvec_minmax, 108 .extra1 = &min_slot_table_size, 109 .extra2 = &max_slot_table_size 110 }, 111 { 112 .procname = "tcp_max_slot_table_entries", 113 .data = &xprt_max_tcp_slot_table_entries, 114 .maxlen = sizeof(unsigned int), 115 .mode = 0644, 116 .proc_handler = proc_dointvec_minmax, 117 .extra1 = &min_slot_table_size, 118 .extra2 = &max_tcp_slot_table_limit 119 }, 120 { 121 .procname = "min_resvport", 122 .data = &xprt_min_resvport, 123 .maxlen = sizeof(unsigned int), 124 .mode = 0644, 125 .proc_handler = proc_dointvec_minmax, 126 .extra1 = &xprt_min_resvport_limit, 127 .extra2 = &xprt_max_resvport_limit 128 }, 129 { 130 .procname = "max_resvport", 131 .data = &xprt_max_resvport, 132 .maxlen = sizeof(unsigned int), 133 .mode = 0644, 134 .proc_handler = proc_dointvec_minmax, 135 .extra1 = &xprt_min_resvport_limit, 136 .extra2 = &xprt_max_resvport_limit 137 }, 138 { 139 .procname = "tcp_fin_timeout", 140 .data = &xs_tcp_fin_timeout, 141 .maxlen = sizeof(xs_tcp_fin_timeout), 142 .mode = 0644, 143 .proc_handler = proc_dointvec_jiffies, 144 }, 145 { }, 146 }; 147 148 static struct ctl_table sunrpc_table[] = { 149 { 150 .procname = "sunrpc", 151 .mode = 0555, 152 .child = xs_tunables_table 153 }, 154 { }, 155 }; 156 157 #endif 158 159 /* 160 * Wait duration for a reply from the RPC portmapper. 161 */ 162 #define XS_BIND_TO (60U * HZ) 163 164 /* 165 * Delay if a UDP socket connect error occurs. This is most likely some 166 * kind of resource problem on the local host. 167 */ 168 #define XS_UDP_REEST_TO (2U * HZ) 169 170 /* 171 * The reestablish timeout allows clients to delay for a bit before attempting 172 * to reconnect to a server that just dropped our connection. 173 * 174 * We implement an exponential backoff when trying to reestablish a TCP 175 * transport connection with the server. Some servers like to drop a TCP 176 * connection when they are overworked, so we start with a short timeout and 177 * increase over time if the server is down or not responding. 178 */ 179 #define XS_TCP_INIT_REEST_TO (3U * HZ) 180 #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) 181 182 /* 183 * TCP idle timeout; client drops the transport socket if it is idle 184 * for this long. Note that we also timeout UDP sockets to prevent 185 * holding port numbers when there is no RPC traffic. 186 */ 187 #define XS_IDLE_DISC_TO (5U * 60 * HZ) 188 189 #ifdef RPC_DEBUG 190 # undef RPC_DEBUG_DATA 191 # define RPCDBG_FACILITY RPCDBG_TRANS 192 #endif 193 194 #ifdef RPC_DEBUG_DATA 195 static void xs_pktdump(char *msg, u32 *packet, unsigned int count) 196 { 197 u8 *buf = (u8 *) packet; 198 int j; 199 200 dprintk("RPC: %s\n", msg); 201 for (j = 0; j < count && j < 128; j += 4) { 202 if (!(j & 31)) { 203 if (j) 204 dprintk("\n"); 205 dprintk("0x%04x ", j); 206 } 207 dprintk("%02x%02x%02x%02x ", 208 buf[j], buf[j+1], buf[j+2], buf[j+3]); 209 } 210 dprintk("\n"); 211 } 212 #else 213 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) 214 { 215 /* NOP */ 216 } 217 #endif 218 219 struct sock_xprt { 220 struct rpc_xprt xprt; 221 222 /* 223 * Network layer 224 */ 225 struct socket * sock; 226 struct sock * inet; 227 228 /* 229 * State of TCP reply receive 230 */ 231 __be32 tcp_fraghdr, 232 tcp_xid, 233 tcp_calldir; 234 235 u32 tcp_offset, 236 tcp_reclen; 237 238 unsigned long tcp_copied, 239 tcp_flags; 240 241 /* 242 * Connection of transports 243 */ 244 struct delayed_work connect_worker; 245 struct sockaddr_storage srcaddr; 246 unsigned short srcport; 247 248 /* 249 * UDP socket buffer size parameters 250 */ 251 size_t rcvsize, 252 sndsize; 253 254 /* 255 * Saved socket callback addresses 256 */ 257 void (*old_data_ready)(struct sock *); 258 void (*old_state_change)(struct sock *); 259 void (*old_write_space)(struct sock *); 260 void (*old_error_report)(struct sock *); 261 }; 262 263 /* 264 * TCP receive state flags 265 */ 266 #define TCP_RCV_LAST_FRAG (1UL << 0) 267 #define TCP_RCV_COPY_FRAGHDR (1UL << 1) 268 #define TCP_RCV_COPY_XID (1UL << 2) 269 #define TCP_RCV_COPY_DATA (1UL << 3) 270 #define TCP_RCV_READ_CALLDIR (1UL << 4) 271 #define TCP_RCV_COPY_CALLDIR (1UL << 5) 272 273 /* 274 * TCP RPC flags 275 */ 276 #define TCP_RPC_REPLY (1UL << 6) 277 278 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 279 { 280 return (struct rpc_xprt *) sk->sk_user_data; 281 } 282 283 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 284 { 285 return (struct sockaddr *) &xprt->addr; 286 } 287 288 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt) 289 { 290 return (struct sockaddr_un *) &xprt->addr; 291 } 292 293 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 294 { 295 return (struct sockaddr_in *) &xprt->addr; 296 } 297 298 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt) 299 { 300 return (struct sockaddr_in6 *) &xprt->addr; 301 } 302 303 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) 304 { 305 struct sockaddr *sap = xs_addr(xprt); 306 struct sockaddr_in6 *sin6; 307 struct sockaddr_in *sin; 308 struct sockaddr_un *sun; 309 char buf[128]; 310 311 switch (sap->sa_family) { 312 case AF_LOCAL: 313 sun = xs_addr_un(xprt); 314 strlcpy(buf, sun->sun_path, sizeof(buf)); 315 xprt->address_strings[RPC_DISPLAY_ADDR] = 316 kstrdup(buf, GFP_KERNEL); 317 break; 318 case AF_INET: 319 (void)rpc_ntop(sap, buf, sizeof(buf)); 320 xprt->address_strings[RPC_DISPLAY_ADDR] = 321 kstrdup(buf, GFP_KERNEL); 322 sin = xs_addr_in(xprt); 323 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 324 break; 325 case AF_INET6: 326 (void)rpc_ntop(sap, buf, sizeof(buf)); 327 xprt->address_strings[RPC_DISPLAY_ADDR] = 328 kstrdup(buf, GFP_KERNEL); 329 sin6 = xs_addr_in6(xprt); 330 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 331 break; 332 default: 333 BUG(); 334 } 335 336 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 337 } 338 339 static void xs_format_common_peer_ports(struct rpc_xprt *xprt) 340 { 341 struct sockaddr *sap = xs_addr(xprt); 342 char buf[128]; 343 344 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 345 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 346 347 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 348 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 349 } 350 351 static void xs_format_peer_addresses(struct rpc_xprt *xprt, 352 const char *protocol, 353 const char *netid) 354 { 355 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; 356 xprt->address_strings[RPC_DISPLAY_NETID] = netid; 357 xs_format_common_peer_addresses(xprt); 358 xs_format_common_peer_ports(xprt); 359 } 360 361 static void xs_update_peer_port(struct rpc_xprt *xprt) 362 { 363 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 364 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 365 366 xs_format_common_peer_ports(xprt); 367 } 368 369 static void xs_free_peer_addresses(struct rpc_xprt *xprt) 370 { 371 unsigned int i; 372 373 for (i = 0; i < RPC_DISPLAY_MAX; i++) 374 switch (i) { 375 case RPC_DISPLAY_PROTO: 376 case RPC_DISPLAY_NETID: 377 continue; 378 default: 379 kfree(xprt->address_strings[i]); 380 } 381 } 382 383 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 384 385 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) 386 { 387 struct msghdr msg = { 388 .msg_name = addr, 389 .msg_namelen = addrlen, 390 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0), 391 }; 392 struct kvec iov = { 393 .iov_base = vec->iov_base + base, 394 .iov_len = vec->iov_len - base, 395 }; 396 397 if (iov.iov_len != 0) 398 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 399 return kernel_sendmsg(sock, &msg, NULL, 0, 0); 400 } 401 402 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p) 403 { 404 ssize_t (*do_sendpage)(struct socket *sock, struct page *page, 405 int offset, size_t size, int flags); 406 struct page **ppage; 407 unsigned int remainder; 408 int err; 409 410 remainder = xdr->page_len - base; 411 base += xdr->page_base; 412 ppage = xdr->pages + (base >> PAGE_SHIFT); 413 base &= ~PAGE_MASK; 414 do_sendpage = sock->ops->sendpage; 415 if (!zerocopy) 416 do_sendpage = sock_no_sendpage; 417 for(;;) { 418 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); 419 int flags = XS_SENDMSG_FLAGS; 420 421 remainder -= len; 422 if (remainder != 0 || more) 423 flags |= MSG_MORE; 424 err = do_sendpage(sock, *ppage, base, len, flags); 425 if (remainder == 0 || err != len) 426 break; 427 *sent_p += err; 428 ppage++; 429 base = 0; 430 } 431 if (err > 0) { 432 *sent_p += err; 433 err = 0; 434 } 435 return err; 436 } 437 438 /** 439 * xs_sendpages - write pages directly to a socket 440 * @sock: socket to send on 441 * @addr: UDP only -- address of destination 442 * @addrlen: UDP only -- length of destination address 443 * @xdr: buffer containing this request 444 * @base: starting position in the buffer 445 * @zerocopy: true if it is safe to use sendpage() 446 * @sent_p: return the total number of bytes successfully queued for sending 447 * 448 */ 449 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p) 450 { 451 unsigned int remainder = xdr->len - base; 452 int err = 0; 453 int sent = 0; 454 455 if (unlikely(!sock)) 456 return -ENOTSOCK; 457 458 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 459 if (base != 0) { 460 addr = NULL; 461 addrlen = 0; 462 } 463 464 if (base < xdr->head[0].iov_len || addr != NULL) { 465 unsigned int len = xdr->head[0].iov_len - base; 466 remainder -= len; 467 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); 468 if (remainder == 0 || err != len) 469 goto out; 470 *sent_p += err; 471 base = 0; 472 } else 473 base -= xdr->head[0].iov_len; 474 475 if (base < xdr->page_len) { 476 unsigned int len = xdr->page_len - base; 477 remainder -= len; 478 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent); 479 *sent_p += sent; 480 if (remainder == 0 || sent != len) 481 goto out; 482 base = 0; 483 } else 484 base -= xdr->page_len; 485 486 if (base >= xdr->tail[0].iov_len) 487 return 0; 488 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); 489 out: 490 if (err > 0) { 491 *sent_p += err; 492 err = 0; 493 } 494 return err; 495 } 496 497 static void xs_nospace_callback(struct rpc_task *task) 498 { 499 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); 500 501 transport->inet->sk_write_pending--; 502 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 503 } 504 505 /** 506 * xs_nospace - place task on wait queue if transmit was incomplete 507 * @task: task to put to sleep 508 * 509 */ 510 static int xs_nospace(struct rpc_task *task) 511 { 512 struct rpc_rqst *req = task->tk_rqstp; 513 struct rpc_xprt *xprt = req->rq_xprt; 514 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 515 struct sock *sk = transport->inet; 516 int ret = -EAGAIN; 517 518 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 519 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 520 req->rq_slen); 521 522 /* Protect against races with write_space */ 523 spin_lock_bh(&xprt->transport_lock); 524 525 /* Don't race with disconnect */ 526 if (xprt_connected(xprt)) { 527 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 528 /* 529 * Notify TCP that we're limited by the application 530 * window size 531 */ 532 set_bit(SOCK_NOSPACE, &transport->sock->flags); 533 sk->sk_write_pending++; 534 /* ...and wait for more buffer space */ 535 xprt_wait_for_buffer_space(task, xs_nospace_callback); 536 } 537 } else { 538 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 539 ret = -ENOTCONN; 540 } 541 542 spin_unlock_bh(&xprt->transport_lock); 543 544 /* Race breaker in case memory is freed before above code is called */ 545 sk->sk_write_space(sk); 546 return ret; 547 } 548 549 /* 550 * Construct a stream transport record marker in @buf. 551 */ 552 static inline void xs_encode_stream_record_marker(struct xdr_buf *buf) 553 { 554 u32 reclen = buf->len - sizeof(rpc_fraghdr); 555 rpc_fraghdr *base = buf->head[0].iov_base; 556 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen); 557 } 558 559 /** 560 * xs_local_send_request - write an RPC request to an AF_LOCAL socket 561 * @task: RPC task that manages the state of an RPC request 562 * 563 * Return values: 564 * 0: The request has been sent 565 * EAGAIN: The socket was blocked, please call again later to 566 * complete the request 567 * ENOTCONN: Caller needs to invoke connect logic then call again 568 * other: Some other error occured, the request was not sent 569 */ 570 static int xs_local_send_request(struct rpc_task *task) 571 { 572 struct rpc_rqst *req = task->tk_rqstp; 573 struct rpc_xprt *xprt = req->rq_xprt; 574 struct sock_xprt *transport = 575 container_of(xprt, struct sock_xprt, xprt); 576 struct xdr_buf *xdr = &req->rq_snd_buf; 577 int status; 578 int sent = 0; 579 580 xs_encode_stream_record_marker(&req->rq_snd_buf); 581 582 xs_pktdump("packet data:", 583 req->rq_svec->iov_base, req->rq_svec->iov_len); 584 585 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent, 586 true, &sent); 587 dprintk("RPC: %s(%u) = %d\n", 588 __func__, xdr->len - req->rq_bytes_sent, status); 589 if (likely(sent > 0) || status == 0) { 590 req->rq_bytes_sent += sent; 591 req->rq_xmit_bytes_sent += sent; 592 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 593 req->rq_bytes_sent = 0; 594 return 0; 595 } 596 status = -EAGAIN; 597 } 598 599 switch (status) { 600 case -ENOBUFS: 601 case -EAGAIN: 602 status = xs_nospace(task); 603 break; 604 default: 605 dprintk("RPC: sendmsg returned unrecognized error %d\n", 606 -status); 607 case -EPIPE: 608 xs_close(xprt); 609 status = -ENOTCONN; 610 } 611 612 return status; 613 } 614 615 /** 616 * xs_udp_send_request - write an RPC request to a UDP socket 617 * @task: address of RPC task that manages the state of an RPC request 618 * 619 * Return values: 620 * 0: The request has been sent 621 * EAGAIN: The socket was blocked, please call again later to 622 * complete the request 623 * ENOTCONN: Caller needs to invoke connect logic then call again 624 * other: Some other error occurred, the request was not sent 625 */ 626 static int xs_udp_send_request(struct rpc_task *task) 627 { 628 struct rpc_rqst *req = task->tk_rqstp; 629 struct rpc_xprt *xprt = req->rq_xprt; 630 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 631 struct xdr_buf *xdr = &req->rq_snd_buf; 632 int sent = 0; 633 int status; 634 635 xs_pktdump("packet data:", 636 req->rq_svec->iov_base, 637 req->rq_svec->iov_len); 638 639 if (!xprt_bound(xprt)) 640 return -ENOTCONN; 641 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, 642 xdr, req->rq_bytes_sent, true, &sent); 643 644 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 645 xdr->len - req->rq_bytes_sent, status); 646 647 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ 648 if (status == -EPERM) 649 goto process_status; 650 651 if (sent > 0 || status == 0) { 652 req->rq_xmit_bytes_sent += sent; 653 if (sent >= req->rq_slen) 654 return 0; 655 /* Still some bytes left; set up for a retry later. */ 656 status = -EAGAIN; 657 } 658 659 process_status: 660 switch (status) { 661 case -ENOTSOCK: 662 status = -ENOTCONN; 663 /* Should we call xs_close() here? */ 664 break; 665 case -EAGAIN: 666 status = xs_nospace(task); 667 break; 668 default: 669 dprintk("RPC: sendmsg returned unrecognized error %d\n", 670 -status); 671 case -ENETUNREACH: 672 case -ENOBUFS: 673 case -EPIPE: 674 case -ECONNREFUSED: 675 case -EPERM: 676 /* When the server has died, an ICMP port unreachable message 677 * prompts ECONNREFUSED. */ 678 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 679 } 680 681 return status; 682 } 683 684 /** 685 * xs_tcp_shutdown - gracefully shut down a TCP socket 686 * @xprt: transport 687 * 688 * Initiates a graceful shutdown of the TCP socket by calling the 689 * equivalent of shutdown(SHUT_WR); 690 */ 691 static void xs_tcp_shutdown(struct rpc_xprt *xprt) 692 { 693 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 694 struct socket *sock = transport->sock; 695 696 if (sock != NULL) { 697 kernel_sock_shutdown(sock, SHUT_WR); 698 trace_rpc_socket_shutdown(xprt, sock); 699 } 700 } 701 702 /** 703 * xs_tcp_send_request - write an RPC request to a TCP socket 704 * @task: address of RPC task that manages the state of an RPC request 705 * 706 * Return values: 707 * 0: The request has been sent 708 * EAGAIN: The socket was blocked, please call again later to 709 * complete the request 710 * ENOTCONN: Caller needs to invoke connect logic then call again 711 * other: Some other error occurred, the request was not sent 712 * 713 * XXX: In the case of soft timeouts, should we eventually give up 714 * if sendmsg is not able to make progress? 715 */ 716 static int xs_tcp_send_request(struct rpc_task *task) 717 { 718 struct rpc_rqst *req = task->tk_rqstp; 719 struct rpc_xprt *xprt = req->rq_xprt; 720 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 721 struct xdr_buf *xdr = &req->rq_snd_buf; 722 bool zerocopy = true; 723 int status; 724 int sent; 725 726 xs_encode_stream_record_marker(&req->rq_snd_buf); 727 728 xs_pktdump("packet data:", 729 req->rq_svec->iov_base, 730 req->rq_svec->iov_len); 731 /* Don't use zero copy if this is a resend. If the RPC call 732 * completes while the socket holds a reference to the pages, 733 * then we may end up resending corrupted data. 734 */ 735 if (task->tk_flags & RPC_TASK_SENT) 736 zerocopy = false; 737 738 /* Continue transmitting the packet/record. We must be careful 739 * to cope with writespace callbacks arriving _after_ we have 740 * called sendmsg(). */ 741 while (1) { 742 sent = 0; 743 status = xs_sendpages(transport->sock, NULL, 0, xdr, 744 req->rq_bytes_sent, zerocopy, &sent); 745 746 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 747 xdr->len - req->rq_bytes_sent, status); 748 749 if (unlikely(sent == 0 && status < 0)) 750 break; 751 752 /* If we've sent the entire packet, immediately 753 * reset the count of bytes sent. */ 754 req->rq_bytes_sent += sent; 755 req->rq_xmit_bytes_sent += sent; 756 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 757 req->rq_bytes_sent = 0; 758 return 0; 759 } 760 761 if (sent != 0) 762 continue; 763 status = -EAGAIN; 764 break; 765 } 766 767 switch (status) { 768 case -ENOTSOCK: 769 status = -ENOTCONN; 770 /* Should we call xs_close() here? */ 771 break; 772 case -ENOBUFS: 773 case -EAGAIN: 774 status = xs_nospace(task); 775 break; 776 default: 777 dprintk("RPC: sendmsg returned unrecognized error %d\n", 778 -status); 779 case -ECONNRESET: 780 xs_tcp_shutdown(xprt); 781 case -ECONNREFUSED: 782 case -ENOTCONN: 783 case -EPIPE: 784 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 785 } 786 787 return status; 788 } 789 790 /** 791 * xs_tcp_release_xprt - clean up after a tcp transmission 792 * @xprt: transport 793 * @task: rpc task 794 * 795 * This cleans up if an error causes us to abort the transmission of a request. 796 * In this case, the socket may need to be reset in order to avoid confusing 797 * the server. 798 */ 799 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 800 { 801 struct rpc_rqst *req; 802 803 if (task != xprt->snd_task) 804 return; 805 if (task == NULL) 806 goto out_release; 807 req = task->tk_rqstp; 808 if (req == NULL) 809 goto out_release; 810 if (req->rq_bytes_sent == 0) 811 goto out_release; 812 if (req->rq_bytes_sent == req->rq_snd_buf.len) 813 goto out_release; 814 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 815 out_release: 816 xprt_release_xprt(xprt, task); 817 } 818 819 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) 820 { 821 transport->old_data_ready = sk->sk_data_ready; 822 transport->old_state_change = sk->sk_state_change; 823 transport->old_write_space = sk->sk_write_space; 824 transport->old_error_report = sk->sk_error_report; 825 } 826 827 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 828 { 829 sk->sk_data_ready = transport->old_data_ready; 830 sk->sk_state_change = transport->old_state_change; 831 sk->sk_write_space = transport->old_write_space; 832 sk->sk_error_report = transport->old_error_report; 833 } 834 835 /** 836 * xs_error_report - callback to handle TCP socket state errors 837 * @sk: socket 838 * 839 * Note: we don't call sock_error() since there may be a rpc_task 840 * using the socket, and so we don't want to clear sk->sk_err. 841 */ 842 static void xs_error_report(struct sock *sk) 843 { 844 struct rpc_xprt *xprt; 845 int err; 846 847 read_lock_bh(&sk->sk_callback_lock); 848 if (!(xprt = xprt_from_sock(sk))) 849 goto out; 850 851 err = -sk->sk_err; 852 if (err == 0) 853 goto out; 854 dprintk("RPC: xs_error_report client %p, error=%d...\n", 855 xprt, -err); 856 trace_rpc_socket_error(xprt, sk->sk_socket, err); 857 if (test_bit(XPRT_CONNECTION_REUSE, &xprt->state)) 858 goto out; 859 xprt_wake_pending_tasks(xprt, err); 860 out: 861 read_unlock_bh(&sk->sk_callback_lock); 862 } 863 864 static void xs_reset_transport(struct sock_xprt *transport) 865 { 866 struct socket *sock = transport->sock; 867 struct sock *sk = transport->inet; 868 869 if (sk == NULL) 870 return; 871 872 transport->srcport = 0; 873 874 write_lock_bh(&sk->sk_callback_lock); 875 transport->inet = NULL; 876 transport->sock = NULL; 877 878 sk->sk_user_data = NULL; 879 880 xs_restore_old_callbacks(transport, sk); 881 write_unlock_bh(&sk->sk_callback_lock); 882 883 trace_rpc_socket_close(&transport->xprt, sock); 884 sock_release(sock); 885 } 886 887 /** 888 * xs_close - close a socket 889 * @xprt: transport 890 * 891 * This is used when all requests are complete; ie, no DRC state remains 892 * on the server we want to save. 893 * 894 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with 895 * xs_reset_transport() zeroing the socket from underneath a writer. 896 */ 897 static void xs_close(struct rpc_xprt *xprt) 898 { 899 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 900 901 dprintk("RPC: xs_close xprt %p\n", xprt); 902 903 cancel_delayed_work_sync(&transport->connect_worker); 904 905 xs_reset_transport(transport); 906 xprt->reestablish_timeout = 0; 907 908 smp_mb__before_atomic(); 909 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 910 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 911 clear_bit(XPRT_CLOSING, &xprt->state); 912 smp_mb__after_atomic(); 913 xprt_disconnect_done(xprt); 914 } 915 916 static void xs_tcp_close(struct rpc_xprt *xprt) 917 { 918 if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state)) 919 xs_close(xprt); 920 else 921 xs_tcp_shutdown(xprt); 922 } 923 924 static void xs_xprt_free(struct rpc_xprt *xprt) 925 { 926 xs_free_peer_addresses(xprt); 927 xprt_free(xprt); 928 } 929 930 /** 931 * xs_destroy - prepare to shutdown a transport 932 * @xprt: doomed transport 933 * 934 */ 935 static void xs_destroy(struct rpc_xprt *xprt) 936 { 937 dprintk("RPC: xs_destroy xprt %p\n", xprt); 938 939 xs_close(xprt); 940 xs_xprt_free(xprt); 941 module_put(THIS_MODULE); 942 } 943 944 static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) 945 { 946 struct xdr_skb_reader desc = { 947 .skb = skb, 948 .offset = sizeof(rpc_fraghdr), 949 .count = skb->len - sizeof(rpc_fraghdr), 950 }; 951 952 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) 953 return -1; 954 if (desc.count) 955 return -1; 956 return 0; 957 } 958 959 /** 960 * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets 961 * @sk: socket with data to read 962 * @len: how much data to read 963 * 964 * Currently this assumes we can read the whole reply in a single gulp. 965 */ 966 static void xs_local_data_ready(struct sock *sk) 967 { 968 struct rpc_task *task; 969 struct rpc_xprt *xprt; 970 struct rpc_rqst *rovr; 971 struct sk_buff *skb; 972 int err, repsize, copied; 973 u32 _xid; 974 __be32 *xp; 975 976 read_lock_bh(&sk->sk_callback_lock); 977 dprintk("RPC: %s...\n", __func__); 978 xprt = xprt_from_sock(sk); 979 if (xprt == NULL) 980 goto out; 981 982 skb = skb_recv_datagram(sk, 0, 1, &err); 983 if (skb == NULL) 984 goto out; 985 986 repsize = skb->len - sizeof(rpc_fraghdr); 987 if (repsize < 4) { 988 dprintk("RPC: impossible RPC reply size %d\n", repsize); 989 goto dropit; 990 } 991 992 /* Copy the XID from the skb... */ 993 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid); 994 if (xp == NULL) 995 goto dropit; 996 997 /* Look up and lock the request corresponding to the given XID */ 998 spin_lock(&xprt->transport_lock); 999 rovr = xprt_lookup_rqst(xprt, *xp); 1000 if (!rovr) 1001 goto out_unlock; 1002 task = rovr->rq_task; 1003 1004 copied = rovr->rq_private_buf.buflen; 1005 if (copied > repsize) 1006 copied = repsize; 1007 1008 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) { 1009 dprintk("RPC: sk_buff copy failed\n"); 1010 goto out_unlock; 1011 } 1012 1013 xprt_complete_rqst(task, copied); 1014 1015 out_unlock: 1016 spin_unlock(&xprt->transport_lock); 1017 dropit: 1018 skb_free_datagram(sk, skb); 1019 out: 1020 read_unlock_bh(&sk->sk_callback_lock); 1021 } 1022 1023 /** 1024 * xs_udp_data_ready - "data ready" callback for UDP sockets 1025 * @sk: socket with data to read 1026 * @len: how much data to read 1027 * 1028 */ 1029 static void xs_udp_data_ready(struct sock *sk) 1030 { 1031 struct rpc_task *task; 1032 struct rpc_xprt *xprt; 1033 struct rpc_rqst *rovr; 1034 struct sk_buff *skb; 1035 int err, repsize, copied; 1036 u32 _xid; 1037 __be32 *xp; 1038 1039 read_lock_bh(&sk->sk_callback_lock); 1040 dprintk("RPC: xs_udp_data_ready...\n"); 1041 if (!(xprt = xprt_from_sock(sk))) 1042 goto out; 1043 1044 if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) 1045 goto out; 1046 1047 repsize = skb->len - sizeof(struct udphdr); 1048 if (repsize < 4) { 1049 dprintk("RPC: impossible RPC reply size %d!\n", repsize); 1050 goto dropit; 1051 } 1052 1053 /* Copy the XID from the skb... */ 1054 xp = skb_header_pointer(skb, sizeof(struct udphdr), 1055 sizeof(_xid), &_xid); 1056 if (xp == NULL) 1057 goto dropit; 1058 1059 /* Look up and lock the request corresponding to the given XID */ 1060 spin_lock(&xprt->transport_lock); 1061 rovr = xprt_lookup_rqst(xprt, *xp); 1062 if (!rovr) 1063 goto out_unlock; 1064 task = rovr->rq_task; 1065 1066 if ((copied = rovr->rq_private_buf.buflen) > repsize) 1067 copied = repsize; 1068 1069 /* Suck it into the iovec, verify checksum if not done by hw. */ 1070 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { 1071 UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS); 1072 goto out_unlock; 1073 } 1074 1075 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); 1076 1077 xprt_adjust_cwnd(xprt, task, copied); 1078 xprt_complete_rqst(task, copied); 1079 1080 out_unlock: 1081 spin_unlock(&xprt->transport_lock); 1082 dropit: 1083 skb_free_datagram(sk, skb); 1084 out: 1085 read_unlock_bh(&sk->sk_callback_lock); 1086 } 1087 1088 /* 1089 * Helper function to force a TCP close if the server is sending 1090 * junk and/or it has put us in CLOSE_WAIT 1091 */ 1092 static void xs_tcp_force_close(struct rpc_xprt *xprt) 1093 { 1094 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); 1095 xprt_force_disconnect(xprt); 1096 } 1097 1098 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 1099 { 1100 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1101 size_t len, used; 1102 char *p; 1103 1104 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; 1105 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; 1106 used = xdr_skb_read_bits(desc, p, len); 1107 transport->tcp_offset += used; 1108 if (used != len) 1109 return; 1110 1111 transport->tcp_reclen = ntohl(transport->tcp_fraghdr); 1112 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) 1113 transport->tcp_flags |= TCP_RCV_LAST_FRAG; 1114 else 1115 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; 1116 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; 1117 1118 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; 1119 transport->tcp_offset = 0; 1120 1121 /* Sanity check of the record length */ 1122 if (unlikely(transport->tcp_reclen < 8)) { 1123 dprintk("RPC: invalid TCP record fragment length\n"); 1124 xs_tcp_force_close(xprt); 1125 return; 1126 } 1127 dprintk("RPC: reading TCP record fragment of length %d\n", 1128 transport->tcp_reclen); 1129 } 1130 1131 static void xs_tcp_check_fraghdr(struct sock_xprt *transport) 1132 { 1133 if (transport->tcp_offset == transport->tcp_reclen) { 1134 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; 1135 transport->tcp_offset = 0; 1136 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { 1137 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1138 transport->tcp_flags |= TCP_RCV_COPY_XID; 1139 transport->tcp_copied = 0; 1140 } 1141 } 1142 } 1143 1144 static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1145 { 1146 size_t len, used; 1147 char *p; 1148 1149 len = sizeof(transport->tcp_xid) - transport->tcp_offset; 1150 dprintk("RPC: reading XID (%Zu bytes)\n", len); 1151 p = ((char *) &transport->tcp_xid) + transport->tcp_offset; 1152 used = xdr_skb_read_bits(desc, p, len); 1153 transport->tcp_offset += used; 1154 if (used != len) 1155 return; 1156 transport->tcp_flags &= ~TCP_RCV_COPY_XID; 1157 transport->tcp_flags |= TCP_RCV_READ_CALLDIR; 1158 transport->tcp_copied = 4; 1159 dprintk("RPC: reading %s XID %08x\n", 1160 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for" 1161 : "request with", 1162 ntohl(transport->tcp_xid)); 1163 xs_tcp_check_fraghdr(transport); 1164 } 1165 1166 static inline void xs_tcp_read_calldir(struct sock_xprt *transport, 1167 struct xdr_skb_reader *desc) 1168 { 1169 size_t len, used; 1170 u32 offset; 1171 char *p; 1172 1173 /* 1174 * We want transport->tcp_offset to be 8 at the end of this routine 1175 * (4 bytes for the xid and 4 bytes for the call/reply flag). 1176 * When this function is called for the first time, 1177 * transport->tcp_offset is 4 (after having already read the xid). 1178 */ 1179 offset = transport->tcp_offset - sizeof(transport->tcp_xid); 1180 len = sizeof(transport->tcp_calldir) - offset; 1181 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); 1182 p = ((char *) &transport->tcp_calldir) + offset; 1183 used = xdr_skb_read_bits(desc, p, len); 1184 transport->tcp_offset += used; 1185 if (used != len) 1186 return; 1187 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; 1188 /* 1189 * We don't yet have the XDR buffer, so we will write the calldir 1190 * out after we get the buffer from the 'struct rpc_rqst' 1191 */ 1192 switch (ntohl(transport->tcp_calldir)) { 1193 case RPC_REPLY: 1194 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; 1195 transport->tcp_flags |= TCP_RCV_COPY_DATA; 1196 transport->tcp_flags |= TCP_RPC_REPLY; 1197 break; 1198 case RPC_CALL: 1199 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; 1200 transport->tcp_flags |= TCP_RCV_COPY_DATA; 1201 transport->tcp_flags &= ~TCP_RPC_REPLY; 1202 break; 1203 default: 1204 dprintk("RPC: invalid request message type\n"); 1205 xs_tcp_force_close(&transport->xprt); 1206 } 1207 xs_tcp_check_fraghdr(transport); 1208 } 1209 1210 static inline void xs_tcp_read_common(struct rpc_xprt *xprt, 1211 struct xdr_skb_reader *desc, 1212 struct rpc_rqst *req) 1213 { 1214 struct sock_xprt *transport = 1215 container_of(xprt, struct sock_xprt, xprt); 1216 struct xdr_buf *rcvbuf; 1217 size_t len; 1218 ssize_t r; 1219 1220 rcvbuf = &req->rq_private_buf; 1221 1222 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) { 1223 /* 1224 * Save the RPC direction in the XDR buffer 1225 */ 1226 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, 1227 &transport->tcp_calldir, 1228 sizeof(transport->tcp_calldir)); 1229 transport->tcp_copied += sizeof(transport->tcp_calldir); 1230 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; 1231 } 1232 1233 len = desc->count; 1234 if (len > transport->tcp_reclen - transport->tcp_offset) { 1235 struct xdr_skb_reader my_desc; 1236 1237 len = transport->tcp_reclen - transport->tcp_offset; 1238 memcpy(&my_desc, desc, sizeof(my_desc)); 1239 my_desc.count = len; 1240 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 1241 &my_desc, xdr_skb_read_bits); 1242 desc->count -= r; 1243 desc->offset += r; 1244 } else 1245 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 1246 desc, xdr_skb_read_bits); 1247 1248 if (r > 0) { 1249 transport->tcp_copied += r; 1250 transport->tcp_offset += r; 1251 } 1252 if (r != len) { 1253 /* Error when copying to the receive buffer, 1254 * usually because we weren't able to allocate 1255 * additional buffer pages. All we can do now 1256 * is turn off TCP_RCV_COPY_DATA, so the request 1257 * will not receive any additional updates, 1258 * and time out. 1259 * Any remaining data from this record will 1260 * be discarded. 1261 */ 1262 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1263 dprintk("RPC: XID %08x truncated request\n", 1264 ntohl(transport->tcp_xid)); 1265 dprintk("RPC: xprt = %p, tcp_copied = %lu, " 1266 "tcp_offset = %u, tcp_reclen = %u\n", 1267 xprt, transport->tcp_copied, 1268 transport->tcp_offset, transport->tcp_reclen); 1269 return; 1270 } 1271 1272 dprintk("RPC: XID %08x read %Zd bytes\n", 1273 ntohl(transport->tcp_xid), r); 1274 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " 1275 "tcp_reclen = %u\n", xprt, transport->tcp_copied, 1276 transport->tcp_offset, transport->tcp_reclen); 1277 1278 if (transport->tcp_copied == req->rq_private_buf.buflen) 1279 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1280 else if (transport->tcp_offset == transport->tcp_reclen) { 1281 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) 1282 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1283 } 1284 } 1285 1286 /* 1287 * Finds the request corresponding to the RPC xid and invokes the common 1288 * tcp read code to read the data. 1289 */ 1290 static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, 1291 struct xdr_skb_reader *desc) 1292 { 1293 struct sock_xprt *transport = 1294 container_of(xprt, struct sock_xprt, xprt); 1295 struct rpc_rqst *req; 1296 1297 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid)); 1298 1299 /* Find and lock the request corresponding to this xid */ 1300 spin_lock(&xprt->transport_lock); 1301 req = xprt_lookup_rqst(xprt, transport->tcp_xid); 1302 if (!req) { 1303 dprintk("RPC: XID %08x request not found!\n", 1304 ntohl(transport->tcp_xid)); 1305 spin_unlock(&xprt->transport_lock); 1306 return -1; 1307 } 1308 1309 xs_tcp_read_common(xprt, desc, req); 1310 1311 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1312 xprt_complete_rqst(req->rq_task, transport->tcp_copied); 1313 1314 spin_unlock(&xprt->transport_lock); 1315 return 0; 1316 } 1317 1318 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1319 /* 1320 * Obtains an rpc_rqst previously allocated and invokes the common 1321 * tcp read code to read the data. The result is placed in the callback 1322 * queue. 1323 * If we're unable to obtain the rpc_rqst we schedule the closing of the 1324 * connection and return -1. 1325 */ 1326 static int xs_tcp_read_callback(struct rpc_xprt *xprt, 1327 struct xdr_skb_reader *desc) 1328 { 1329 struct sock_xprt *transport = 1330 container_of(xprt, struct sock_xprt, xprt); 1331 struct rpc_rqst *req; 1332 1333 /* Look up and lock the request corresponding to the given XID */ 1334 spin_lock(&xprt->transport_lock); 1335 req = xprt_lookup_bc_request(xprt, transport->tcp_xid); 1336 if (req == NULL) { 1337 spin_unlock(&xprt->transport_lock); 1338 printk(KERN_WARNING "Callback slot table overflowed\n"); 1339 xprt_force_disconnect(xprt); 1340 return -1; 1341 } 1342 1343 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid)); 1344 xs_tcp_read_common(xprt, desc, req); 1345 1346 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1347 xprt_complete_bc_request(req, transport->tcp_copied); 1348 spin_unlock(&xprt->transport_lock); 1349 1350 return 0; 1351 } 1352 1353 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, 1354 struct xdr_skb_reader *desc) 1355 { 1356 struct sock_xprt *transport = 1357 container_of(xprt, struct sock_xprt, xprt); 1358 1359 return (transport->tcp_flags & TCP_RPC_REPLY) ? 1360 xs_tcp_read_reply(xprt, desc) : 1361 xs_tcp_read_callback(xprt, desc); 1362 } 1363 #else 1364 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, 1365 struct xdr_skb_reader *desc) 1366 { 1367 return xs_tcp_read_reply(xprt, desc); 1368 } 1369 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1370 1371 /* 1372 * Read data off the transport. This can be either an RPC_CALL or an 1373 * RPC_REPLY. Relay the processing to helper functions. 1374 */ 1375 static void xs_tcp_read_data(struct rpc_xprt *xprt, 1376 struct xdr_skb_reader *desc) 1377 { 1378 struct sock_xprt *transport = 1379 container_of(xprt, struct sock_xprt, xprt); 1380 1381 if (_xs_tcp_read_data(xprt, desc) == 0) 1382 xs_tcp_check_fraghdr(transport); 1383 else { 1384 /* 1385 * The transport_lock protects the request handling. 1386 * There's no need to hold it to update the tcp_flags. 1387 */ 1388 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1389 } 1390 } 1391 1392 static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1393 { 1394 size_t len; 1395 1396 len = transport->tcp_reclen - transport->tcp_offset; 1397 if (len > desc->count) 1398 len = desc->count; 1399 desc->count -= len; 1400 desc->offset += len; 1401 transport->tcp_offset += len; 1402 dprintk("RPC: discarded %Zu bytes\n", len); 1403 xs_tcp_check_fraghdr(transport); 1404 } 1405 1406 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) 1407 { 1408 struct rpc_xprt *xprt = rd_desc->arg.data; 1409 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1410 struct xdr_skb_reader desc = { 1411 .skb = skb, 1412 .offset = offset, 1413 .count = len, 1414 }; 1415 1416 dprintk("RPC: xs_tcp_data_recv started\n"); 1417 do { 1418 /* Read in a new fragment marker if necessary */ 1419 /* Can we ever really expect to get completely empty fragments? */ 1420 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { 1421 xs_tcp_read_fraghdr(xprt, &desc); 1422 continue; 1423 } 1424 /* Read in the xid if necessary */ 1425 if (transport->tcp_flags & TCP_RCV_COPY_XID) { 1426 xs_tcp_read_xid(transport, &desc); 1427 continue; 1428 } 1429 /* Read in the call/reply flag */ 1430 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) { 1431 xs_tcp_read_calldir(transport, &desc); 1432 continue; 1433 } 1434 /* Read in the request data */ 1435 if (transport->tcp_flags & TCP_RCV_COPY_DATA) { 1436 xs_tcp_read_data(xprt, &desc); 1437 continue; 1438 } 1439 /* Skip over any trailing bytes on short reads */ 1440 xs_tcp_read_discard(transport, &desc); 1441 } while (desc.count); 1442 dprintk("RPC: xs_tcp_data_recv done\n"); 1443 return len - desc.count; 1444 } 1445 1446 /** 1447 * xs_tcp_data_ready - "data ready" callback for TCP sockets 1448 * @sk: socket with data to read 1449 * @bytes: how much data to read 1450 * 1451 */ 1452 static void xs_tcp_data_ready(struct sock *sk) 1453 { 1454 struct rpc_xprt *xprt; 1455 read_descriptor_t rd_desc; 1456 int read; 1457 1458 dprintk("RPC: xs_tcp_data_ready...\n"); 1459 1460 read_lock_bh(&sk->sk_callback_lock); 1461 if (!(xprt = xprt_from_sock(sk))) 1462 goto out; 1463 /* Any data means we had a useful conversation, so 1464 * the we don't need to delay the next reconnect 1465 */ 1466 if (xprt->reestablish_timeout) 1467 xprt->reestablish_timeout = 0; 1468 1469 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ 1470 rd_desc.arg.data = xprt; 1471 do { 1472 rd_desc.count = 65536; 1473 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1474 } while (read > 0); 1475 out: 1476 read_unlock_bh(&sk->sk_callback_lock); 1477 } 1478 1479 /* 1480 * Do the equivalent of linger/linger2 handling for dealing with 1481 * broken servers that don't close the socket in a timely 1482 * fashion 1483 */ 1484 static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt, 1485 unsigned long timeout) 1486 { 1487 struct sock_xprt *transport; 1488 1489 if (xprt_test_and_set_connecting(xprt)) 1490 return; 1491 set_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1492 transport = container_of(xprt, struct sock_xprt, xprt); 1493 queue_delayed_work(rpciod_workqueue, &transport->connect_worker, 1494 timeout); 1495 } 1496 1497 static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) 1498 { 1499 struct sock_xprt *transport; 1500 1501 transport = container_of(xprt, struct sock_xprt, xprt); 1502 1503 if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) || 1504 !cancel_delayed_work(&transport->connect_worker)) 1505 return; 1506 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1507 xprt_clear_connecting(xprt); 1508 } 1509 1510 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 1511 { 1512 smp_mb__before_atomic(); 1513 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1514 clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); 1515 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1516 clear_bit(XPRT_CLOSING, &xprt->state); 1517 smp_mb__after_atomic(); 1518 } 1519 1520 static void xs_sock_mark_closed(struct rpc_xprt *xprt) 1521 { 1522 xs_sock_reset_connection_flags(xprt); 1523 /* Mark transport as closed and wake up all pending tasks */ 1524 xprt_disconnect_done(xprt); 1525 } 1526 1527 /** 1528 * xs_tcp_state_change - callback to handle TCP socket state changes 1529 * @sk: socket whose state has changed 1530 * 1531 */ 1532 static void xs_tcp_state_change(struct sock *sk) 1533 { 1534 struct rpc_xprt *xprt; 1535 1536 read_lock_bh(&sk->sk_callback_lock); 1537 if (!(xprt = xprt_from_sock(sk))) 1538 goto out; 1539 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1540 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", 1541 sk->sk_state, xprt_connected(xprt), 1542 sock_flag(sk, SOCK_DEAD), 1543 sock_flag(sk, SOCK_ZAPPED), 1544 sk->sk_shutdown); 1545 1546 trace_rpc_socket_state_change(xprt, sk->sk_socket); 1547 switch (sk->sk_state) { 1548 case TCP_ESTABLISHED: 1549 spin_lock(&xprt->transport_lock); 1550 if (!xprt_test_and_set_connected(xprt)) { 1551 struct sock_xprt *transport = container_of(xprt, 1552 struct sock_xprt, xprt); 1553 1554 /* Reset TCP record info */ 1555 transport->tcp_offset = 0; 1556 transport->tcp_reclen = 0; 1557 transport->tcp_copied = 0; 1558 transport->tcp_flags = 1559 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; 1560 xprt->connect_cookie++; 1561 1562 xprt_wake_pending_tasks(xprt, -EAGAIN); 1563 } 1564 spin_unlock(&xprt->transport_lock); 1565 break; 1566 case TCP_FIN_WAIT1: 1567 /* The client initiated a shutdown of the socket */ 1568 xprt->connect_cookie++; 1569 xprt->reestablish_timeout = 0; 1570 set_bit(XPRT_CLOSING, &xprt->state); 1571 smp_mb__before_atomic(); 1572 clear_bit(XPRT_CONNECTED, &xprt->state); 1573 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1574 smp_mb__after_atomic(); 1575 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1576 break; 1577 case TCP_CLOSE_WAIT: 1578 /* The server initiated a shutdown of the socket */ 1579 xprt->connect_cookie++; 1580 clear_bit(XPRT_CONNECTED, &xprt->state); 1581 xs_tcp_force_close(xprt); 1582 case TCP_CLOSING: 1583 /* 1584 * If the server closed down the connection, make sure that 1585 * we back off before reconnecting 1586 */ 1587 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 1588 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1589 break; 1590 case TCP_LAST_ACK: 1591 set_bit(XPRT_CLOSING, &xprt->state); 1592 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1593 smp_mb__before_atomic(); 1594 clear_bit(XPRT_CONNECTED, &xprt->state); 1595 smp_mb__after_atomic(); 1596 break; 1597 case TCP_CLOSE: 1598 xs_tcp_cancel_linger_timeout(xprt); 1599 xs_sock_mark_closed(xprt); 1600 } 1601 out: 1602 read_unlock_bh(&sk->sk_callback_lock); 1603 } 1604 1605 static void xs_write_space(struct sock *sk) 1606 { 1607 struct socket *sock; 1608 struct rpc_xprt *xprt; 1609 1610 if (unlikely(!(sock = sk->sk_socket))) 1611 return; 1612 clear_bit(SOCK_NOSPACE, &sock->flags); 1613 1614 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1615 return; 1616 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0) 1617 return; 1618 1619 xprt_write_space(xprt); 1620 } 1621 1622 /** 1623 * xs_udp_write_space - callback invoked when socket buffer space 1624 * becomes available 1625 * @sk: socket whose state has changed 1626 * 1627 * Called when more output buffer space is available for this socket. 1628 * We try not to wake our writers until they can make "significant" 1629 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1630 * with a bunch of small requests. 1631 */ 1632 static void xs_udp_write_space(struct sock *sk) 1633 { 1634 read_lock_bh(&sk->sk_callback_lock); 1635 1636 /* from net/core/sock.c:sock_def_write_space */ 1637 if (sock_writeable(sk)) 1638 xs_write_space(sk); 1639 1640 read_unlock_bh(&sk->sk_callback_lock); 1641 } 1642 1643 /** 1644 * xs_tcp_write_space - callback invoked when socket buffer space 1645 * becomes available 1646 * @sk: socket whose state has changed 1647 * 1648 * Called when more output buffer space is available for this socket. 1649 * We try not to wake our writers until they can make "significant" 1650 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1651 * with a bunch of small requests. 1652 */ 1653 static void xs_tcp_write_space(struct sock *sk) 1654 { 1655 read_lock_bh(&sk->sk_callback_lock); 1656 1657 /* from net/core/stream.c:sk_stream_write_space */ 1658 if (sk_stream_is_writeable(sk)) 1659 xs_write_space(sk); 1660 1661 read_unlock_bh(&sk->sk_callback_lock); 1662 } 1663 1664 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1665 { 1666 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1667 struct sock *sk = transport->inet; 1668 1669 if (transport->rcvsize) { 1670 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 1671 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; 1672 } 1673 if (transport->sndsize) { 1674 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1675 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; 1676 sk->sk_write_space(sk); 1677 } 1678 } 1679 1680 /** 1681 * xs_udp_set_buffer_size - set send and receive limits 1682 * @xprt: generic transport 1683 * @sndsize: requested size of send buffer, in bytes 1684 * @rcvsize: requested size of receive buffer, in bytes 1685 * 1686 * Set socket send and receive buffer size limits. 1687 */ 1688 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 1689 { 1690 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1691 1692 transport->sndsize = 0; 1693 if (sndsize) 1694 transport->sndsize = sndsize + 1024; 1695 transport->rcvsize = 0; 1696 if (rcvsize) 1697 transport->rcvsize = rcvsize + 1024; 1698 1699 xs_udp_do_set_buffer_size(xprt); 1700 } 1701 1702 /** 1703 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport 1704 * @task: task that timed out 1705 * 1706 * Adjust the congestion window after a retransmit timeout has occurred. 1707 */ 1708 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) 1709 { 1710 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); 1711 } 1712 1713 static unsigned short xs_get_random_port(void) 1714 { 1715 unsigned short range = xprt_max_resvport - xprt_min_resvport; 1716 unsigned short rand = (unsigned short) prandom_u32() % range; 1717 return rand + xprt_min_resvport; 1718 } 1719 1720 /** 1721 * xs_set_port - reset the port number in the remote endpoint address 1722 * @xprt: generic transport 1723 * @port: new port number 1724 * 1725 */ 1726 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1727 { 1728 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1729 1730 rpc_set_port(xs_addr(xprt), port); 1731 xs_update_peer_port(xprt); 1732 } 1733 1734 static unsigned short xs_get_srcport(struct sock_xprt *transport) 1735 { 1736 unsigned short port = transport->srcport; 1737 1738 if (port == 0 && transport->xprt.resvport) 1739 port = xs_get_random_port(); 1740 return port; 1741 } 1742 1743 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) 1744 { 1745 if (transport->srcport != 0) 1746 transport->srcport = 0; 1747 if (!transport->xprt.resvport) 1748 return 0; 1749 if (port <= xprt_min_resvport || port > xprt_max_resvport) 1750 return xprt_max_resvport; 1751 return --port; 1752 } 1753 static int xs_bind(struct sock_xprt *transport, struct socket *sock) 1754 { 1755 struct sockaddr_storage myaddr; 1756 int err, nloop = 0; 1757 unsigned short port = xs_get_srcport(transport); 1758 unsigned short last; 1759 1760 /* 1761 * If we are asking for any ephemeral port (i.e. port == 0 && 1762 * transport->xprt.resvport == 0), don't bind. Let the local 1763 * port selection happen implicitly when the socket is used 1764 * (for example at connect time). 1765 * 1766 * This ensures that we can continue to establish TCP 1767 * connections even when all local ephemeral ports are already 1768 * a part of some TCP connection. This makes no difference 1769 * for UDP sockets, but also doens't harm them. 1770 * 1771 * If we're asking for any reserved port (i.e. port == 0 && 1772 * transport->xprt.resvport == 1) xs_get_srcport above will 1773 * ensure that port is non-zero and we will bind as needed. 1774 */ 1775 if (port == 0) 1776 return 0; 1777 1778 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); 1779 do { 1780 rpc_set_port((struct sockaddr *)&myaddr, port); 1781 err = kernel_bind(sock, (struct sockaddr *)&myaddr, 1782 transport->xprt.addrlen); 1783 if (err == 0) { 1784 transport->srcport = port; 1785 break; 1786 } 1787 last = port; 1788 port = xs_next_srcport(transport, port); 1789 if (port > last) 1790 nloop++; 1791 } while (err == -EADDRINUSE && nloop != 2); 1792 1793 if (myaddr.ss_family == AF_INET) 1794 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, 1795 &((struct sockaddr_in *)&myaddr)->sin_addr, 1796 port, err ? "failed" : "ok", err); 1797 else 1798 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, 1799 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, 1800 port, err ? "failed" : "ok", err); 1801 return err; 1802 } 1803 1804 /* 1805 * We don't support autobind on AF_LOCAL sockets 1806 */ 1807 static void xs_local_rpcbind(struct rpc_task *task) 1808 { 1809 rcu_read_lock(); 1810 xprt_set_bound(rcu_dereference(task->tk_client->cl_xprt)); 1811 rcu_read_unlock(); 1812 } 1813 1814 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) 1815 { 1816 } 1817 1818 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1819 static struct lock_class_key xs_key[2]; 1820 static struct lock_class_key xs_slock_key[2]; 1821 1822 static inline void xs_reclassify_socketu(struct socket *sock) 1823 { 1824 struct sock *sk = sock->sk; 1825 1826 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", 1827 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); 1828 } 1829 1830 static inline void xs_reclassify_socket4(struct socket *sock) 1831 { 1832 struct sock *sk = sock->sk; 1833 1834 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1835 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); 1836 } 1837 1838 static inline void xs_reclassify_socket6(struct socket *sock) 1839 { 1840 struct sock *sk = sock->sk; 1841 1842 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1843 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); 1844 } 1845 1846 static inline void xs_reclassify_socket(int family, struct socket *sock) 1847 { 1848 WARN_ON_ONCE(sock_owned_by_user(sock->sk)); 1849 if (sock_owned_by_user(sock->sk)) 1850 return; 1851 1852 switch (family) { 1853 case AF_LOCAL: 1854 xs_reclassify_socketu(sock); 1855 break; 1856 case AF_INET: 1857 xs_reclassify_socket4(sock); 1858 break; 1859 case AF_INET6: 1860 xs_reclassify_socket6(sock); 1861 break; 1862 } 1863 } 1864 #else 1865 static inline void xs_reclassify_socketu(struct socket *sock) 1866 { 1867 } 1868 1869 static inline void xs_reclassify_socket4(struct socket *sock) 1870 { 1871 } 1872 1873 static inline void xs_reclassify_socket6(struct socket *sock) 1874 { 1875 } 1876 1877 static inline void xs_reclassify_socket(int family, struct socket *sock) 1878 { 1879 } 1880 #endif 1881 1882 static void xs_dummy_setup_socket(struct work_struct *work) 1883 { 1884 } 1885 1886 static struct socket *xs_create_sock(struct rpc_xprt *xprt, 1887 struct sock_xprt *transport, int family, int type, int protocol) 1888 { 1889 struct socket *sock; 1890 int err; 1891 1892 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); 1893 if (err < 0) { 1894 dprintk("RPC: can't create %d transport socket (%d).\n", 1895 protocol, -err); 1896 goto out; 1897 } 1898 xs_reclassify_socket(family, sock); 1899 1900 err = xs_bind(transport, sock); 1901 if (err) { 1902 sock_release(sock); 1903 goto out; 1904 } 1905 1906 return sock; 1907 out: 1908 return ERR_PTR(err); 1909 } 1910 1911 static int xs_local_finish_connecting(struct rpc_xprt *xprt, 1912 struct socket *sock) 1913 { 1914 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 1915 xprt); 1916 1917 if (!transport->inet) { 1918 struct sock *sk = sock->sk; 1919 1920 write_lock_bh(&sk->sk_callback_lock); 1921 1922 xs_save_old_callbacks(transport, sk); 1923 1924 sk->sk_user_data = xprt; 1925 sk->sk_data_ready = xs_local_data_ready; 1926 sk->sk_write_space = xs_udp_write_space; 1927 sk->sk_error_report = xs_error_report; 1928 sk->sk_allocation = GFP_ATOMIC; 1929 1930 xprt_clear_connected(xprt); 1931 1932 /* Reset to new socket */ 1933 transport->sock = sock; 1934 transport->inet = sk; 1935 1936 write_unlock_bh(&sk->sk_callback_lock); 1937 } 1938 1939 /* Tell the socket layer to start connecting... */ 1940 xprt->stat.connect_count++; 1941 xprt->stat.connect_start = jiffies; 1942 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); 1943 } 1944 1945 /** 1946 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint 1947 * @xprt: RPC transport to connect 1948 * @transport: socket transport to connect 1949 * @create_sock: function to create a socket of the correct type 1950 */ 1951 static int xs_local_setup_socket(struct sock_xprt *transport) 1952 { 1953 struct rpc_xprt *xprt = &transport->xprt; 1954 struct socket *sock; 1955 int status = -EIO; 1956 1957 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1958 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1959 SOCK_STREAM, 0, &sock, 1); 1960 if (status < 0) { 1961 dprintk("RPC: can't create AF_LOCAL " 1962 "transport socket (%d).\n", -status); 1963 goto out; 1964 } 1965 xs_reclassify_socketu(sock); 1966 1967 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", 1968 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1969 1970 status = xs_local_finish_connecting(xprt, sock); 1971 trace_rpc_socket_connect(xprt, sock, status); 1972 switch (status) { 1973 case 0: 1974 dprintk("RPC: xprt %p connected to %s\n", 1975 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1976 xprt_set_connected(xprt); 1977 case -ENOBUFS: 1978 break; 1979 case -ENOENT: 1980 dprintk("RPC: xprt %p: socket %s does not exist\n", 1981 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1982 break; 1983 case -ECONNREFUSED: 1984 dprintk("RPC: xprt %p: connection refused for %s\n", 1985 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1986 break; 1987 default: 1988 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n", 1989 __func__, -status, 1990 xprt->address_strings[RPC_DISPLAY_ADDR]); 1991 } 1992 1993 out: 1994 xprt_clear_connecting(xprt); 1995 xprt_wake_pending_tasks(xprt, status); 1996 return status; 1997 } 1998 1999 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2000 { 2001 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2002 int ret; 2003 2004 if (RPC_IS_ASYNC(task)) { 2005 /* 2006 * We want the AF_LOCAL connect to be resolved in the 2007 * filesystem namespace of the process making the rpc 2008 * call. Thus we connect synchronously. 2009 * 2010 * If we want to support asynchronous AF_LOCAL calls, 2011 * we'll need to figure out how to pass a namespace to 2012 * connect. 2013 */ 2014 rpc_exit(task, -ENOTCONN); 2015 return; 2016 } 2017 ret = xs_local_setup_socket(transport); 2018 if (ret && !RPC_IS_SOFTCONN(task)) 2019 msleep_interruptible(15000); 2020 } 2021 2022 #ifdef CONFIG_SUNRPC_SWAP 2023 static void xs_set_memalloc(struct rpc_xprt *xprt) 2024 { 2025 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 2026 xprt); 2027 2028 if (xprt->swapper) 2029 sk_set_memalloc(transport->inet); 2030 } 2031 2032 /** 2033 * xs_swapper - Tag this transport as being used for swap. 2034 * @xprt: transport to tag 2035 * @enable: enable/disable 2036 * 2037 */ 2038 int xs_swapper(struct rpc_xprt *xprt, int enable) 2039 { 2040 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 2041 xprt); 2042 int err = 0; 2043 2044 if (enable) { 2045 xprt->swapper++; 2046 xs_set_memalloc(xprt); 2047 } else if (xprt->swapper) { 2048 xprt->swapper--; 2049 sk_clear_memalloc(transport->inet); 2050 } 2051 2052 return err; 2053 } 2054 EXPORT_SYMBOL_GPL(xs_swapper); 2055 #else 2056 static void xs_set_memalloc(struct rpc_xprt *xprt) 2057 { 2058 } 2059 #endif 2060 2061 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2062 { 2063 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2064 2065 if (!transport->inet) { 2066 struct sock *sk = sock->sk; 2067 2068 write_lock_bh(&sk->sk_callback_lock); 2069 2070 xs_save_old_callbacks(transport, sk); 2071 2072 sk->sk_user_data = xprt; 2073 sk->sk_data_ready = xs_udp_data_ready; 2074 sk->sk_write_space = xs_udp_write_space; 2075 sk->sk_allocation = GFP_ATOMIC; 2076 2077 xprt_set_connected(xprt); 2078 2079 /* Reset to new socket */ 2080 transport->sock = sock; 2081 transport->inet = sk; 2082 2083 xs_set_memalloc(xprt); 2084 2085 write_unlock_bh(&sk->sk_callback_lock); 2086 } 2087 xs_udp_do_set_buffer_size(xprt); 2088 } 2089 2090 static void xs_udp_setup_socket(struct work_struct *work) 2091 { 2092 struct sock_xprt *transport = 2093 container_of(work, struct sock_xprt, connect_worker.work); 2094 struct rpc_xprt *xprt = &transport->xprt; 2095 struct socket *sock = transport->sock; 2096 int status = -EIO; 2097 2098 /* Start by resetting any existing state */ 2099 xs_reset_transport(transport); 2100 sock = xs_create_sock(xprt, transport, 2101 xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP); 2102 if (IS_ERR(sock)) 2103 goto out; 2104 2105 dprintk("RPC: worker connecting xprt %p via %s to " 2106 "%s (port %s)\n", xprt, 2107 xprt->address_strings[RPC_DISPLAY_PROTO], 2108 xprt->address_strings[RPC_DISPLAY_ADDR], 2109 xprt->address_strings[RPC_DISPLAY_PORT]); 2110 2111 xs_udp_finish_connecting(xprt, sock); 2112 trace_rpc_socket_connect(xprt, sock, 0); 2113 status = 0; 2114 out: 2115 xprt_clear_connecting(xprt); 2116 xprt_wake_pending_tasks(xprt, status); 2117 } 2118 2119 /* 2120 * We need to preserve the port number so the reply cache on the server can 2121 * find our cached RPC replies when we get around to reconnecting. 2122 */ 2123 static void xs_abort_connection(struct sock_xprt *transport) 2124 { 2125 int result; 2126 struct sockaddr any; 2127 2128 dprintk("RPC: disconnecting xprt %p to reuse port\n", transport); 2129 2130 /* 2131 * Disconnect the transport socket by doing a connect operation 2132 * with AF_UNSPEC. This should return immediately... 2133 */ 2134 memset(&any, 0, sizeof(any)); 2135 any.sa_family = AF_UNSPEC; 2136 result = kernel_connect(transport->sock, &any, sizeof(any), 0); 2137 trace_rpc_socket_reset_connection(&transport->xprt, 2138 transport->sock, result); 2139 if (!result) 2140 xs_sock_reset_connection_flags(&transport->xprt); 2141 dprintk("RPC: AF_UNSPEC connect return code %d\n", result); 2142 } 2143 2144 static void xs_tcp_reuse_connection(struct sock_xprt *transport) 2145 { 2146 unsigned int state = transport->inet->sk_state; 2147 2148 if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) { 2149 /* we don't need to abort the connection if the socket 2150 * hasn't undergone a shutdown 2151 */ 2152 if (transport->inet->sk_shutdown == 0) 2153 return; 2154 dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n", 2155 __func__, transport->inet->sk_shutdown); 2156 } 2157 if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) { 2158 /* we don't need to abort the connection if the socket 2159 * hasn't undergone a shutdown 2160 */ 2161 if (transport->inet->sk_shutdown == 0) 2162 return; 2163 dprintk("RPC: %s: ESTABLISHED/SYN_SENT " 2164 "sk_shutdown set to %d\n", 2165 __func__, transport->inet->sk_shutdown); 2166 } 2167 xs_abort_connection(transport); 2168 } 2169 2170 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2171 { 2172 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2173 int ret = -ENOTCONN; 2174 2175 if (!transport->inet) { 2176 struct sock *sk = sock->sk; 2177 unsigned int keepidle = xprt->timeout->to_initval / HZ; 2178 unsigned int keepcnt = xprt->timeout->to_retries + 1; 2179 unsigned int opt_on = 1; 2180 2181 /* TCP Keepalive options */ 2182 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 2183 (char *)&opt_on, sizeof(opt_on)); 2184 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, 2185 (char *)&keepidle, sizeof(keepidle)); 2186 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, 2187 (char *)&keepidle, sizeof(keepidle)); 2188 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, 2189 (char *)&keepcnt, sizeof(keepcnt)); 2190 2191 write_lock_bh(&sk->sk_callback_lock); 2192 2193 xs_save_old_callbacks(transport, sk); 2194 2195 sk->sk_user_data = xprt; 2196 sk->sk_data_ready = xs_tcp_data_ready; 2197 sk->sk_state_change = xs_tcp_state_change; 2198 sk->sk_write_space = xs_tcp_write_space; 2199 sk->sk_error_report = xs_error_report; 2200 sk->sk_allocation = GFP_ATOMIC; 2201 2202 /* socket options */ 2203 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; 2204 sock_reset_flag(sk, SOCK_LINGER); 2205 tcp_sk(sk)->linger2 = 0; 2206 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 2207 2208 xprt_clear_connected(xprt); 2209 2210 /* Reset to new socket */ 2211 transport->sock = sock; 2212 transport->inet = sk; 2213 2214 write_unlock_bh(&sk->sk_callback_lock); 2215 } 2216 2217 if (!xprt_bound(xprt)) 2218 goto out; 2219 2220 xs_set_memalloc(xprt); 2221 2222 /* Tell the socket layer to start connecting... */ 2223 xprt->stat.connect_count++; 2224 xprt->stat.connect_start = jiffies; 2225 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2226 switch (ret) { 2227 case 0: 2228 case -EINPROGRESS: 2229 /* SYN_SENT! */ 2230 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2231 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2232 } 2233 out: 2234 return ret; 2235 } 2236 2237 /** 2238 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint 2239 * @xprt: RPC transport to connect 2240 * @transport: socket transport to connect 2241 * @create_sock: function to create a socket of the correct type 2242 * 2243 * Invoked by a work queue tasklet. 2244 */ 2245 static void xs_tcp_setup_socket(struct work_struct *work) 2246 { 2247 struct sock_xprt *transport = 2248 container_of(work, struct sock_xprt, connect_worker.work); 2249 struct socket *sock = transport->sock; 2250 struct rpc_xprt *xprt = &transport->xprt; 2251 int status = -EIO; 2252 2253 if (!sock) { 2254 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 2255 sock = xs_create_sock(xprt, transport, 2256 xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP); 2257 if (IS_ERR(sock)) { 2258 status = PTR_ERR(sock); 2259 goto out; 2260 } 2261 } else { 2262 int abort_and_exit; 2263 2264 abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT, 2265 &xprt->state); 2266 /* "close" the socket, preserving the local port */ 2267 set_bit(XPRT_CONNECTION_REUSE, &xprt->state); 2268 xs_tcp_reuse_connection(transport); 2269 clear_bit(XPRT_CONNECTION_REUSE, &xprt->state); 2270 2271 if (abort_and_exit) 2272 goto out_eagain; 2273 } 2274 2275 dprintk("RPC: worker connecting xprt %p via %s to " 2276 "%s (port %s)\n", xprt, 2277 xprt->address_strings[RPC_DISPLAY_PROTO], 2278 xprt->address_strings[RPC_DISPLAY_ADDR], 2279 xprt->address_strings[RPC_DISPLAY_PORT]); 2280 2281 status = xs_tcp_finish_connecting(xprt, sock); 2282 trace_rpc_socket_connect(xprt, sock, status); 2283 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 2284 xprt, -status, xprt_connected(xprt), 2285 sock->sk->sk_state); 2286 switch (status) { 2287 default: 2288 printk("%s: connect returned unhandled error %d\n", 2289 __func__, status); 2290 case -EADDRNOTAVAIL: 2291 /* We're probably in TIME_WAIT. Get rid of existing socket, 2292 * and retry 2293 */ 2294 xs_tcp_force_close(xprt); 2295 break; 2296 case 0: 2297 case -EINPROGRESS: 2298 case -EALREADY: 2299 xprt_clear_connecting(xprt); 2300 return; 2301 case -EINVAL: 2302 /* Happens, for instance, if the user specified a link 2303 * local IPv6 address without a scope-id. 2304 */ 2305 case -ECONNREFUSED: 2306 case -ECONNRESET: 2307 case -ENETUNREACH: 2308 case -ENOBUFS: 2309 /* retry with existing socket, after a delay */ 2310 goto out; 2311 } 2312 out_eagain: 2313 status = -EAGAIN; 2314 out: 2315 xprt_clear_connecting(xprt); 2316 xprt_wake_pending_tasks(xprt, status); 2317 } 2318 2319 /** 2320 * xs_connect - connect a socket to a remote endpoint 2321 * @xprt: pointer to transport structure 2322 * @task: address of RPC task that manages state of connect request 2323 * 2324 * TCP: If the remote end dropped the connection, delay reconnecting. 2325 * 2326 * UDP socket connects are synchronous, but we use a work queue anyway 2327 * to guarantee that even unprivileged user processes can set up a 2328 * socket on a privileged port. 2329 * 2330 * If a UDP socket connect fails, the delay behavior here prevents 2331 * retry floods (hard mounts). 2332 */ 2333 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2334 { 2335 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2336 2337 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { 2338 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2339 "seconds\n", 2340 xprt, xprt->reestablish_timeout / HZ); 2341 queue_delayed_work(rpciod_workqueue, 2342 &transport->connect_worker, 2343 xprt->reestablish_timeout); 2344 xprt->reestablish_timeout <<= 1; 2345 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2346 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2347 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 2348 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 2349 } else { 2350 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2351 queue_delayed_work(rpciod_workqueue, 2352 &transport->connect_worker, 0); 2353 } 2354 } 2355 2356 /** 2357 * xs_local_print_stats - display AF_LOCAL socket-specifc stats 2358 * @xprt: rpc_xprt struct containing statistics 2359 * @seq: output file 2360 * 2361 */ 2362 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2363 { 2364 long idle_time = 0; 2365 2366 if (xprt_connected(xprt)) 2367 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2368 2369 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu " 2370 "%llu %llu %lu %llu %llu\n", 2371 xprt->stat.bind_count, 2372 xprt->stat.connect_count, 2373 xprt->stat.connect_time, 2374 idle_time, 2375 xprt->stat.sends, 2376 xprt->stat.recvs, 2377 xprt->stat.bad_xids, 2378 xprt->stat.req_u, 2379 xprt->stat.bklog_u, 2380 xprt->stat.max_slots, 2381 xprt->stat.sending_u, 2382 xprt->stat.pending_u); 2383 } 2384 2385 /** 2386 * xs_udp_print_stats - display UDP socket-specifc stats 2387 * @xprt: rpc_xprt struct containing statistics 2388 * @seq: output file 2389 * 2390 */ 2391 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2392 { 2393 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2394 2395 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu " 2396 "%lu %llu %llu\n", 2397 transport->srcport, 2398 xprt->stat.bind_count, 2399 xprt->stat.sends, 2400 xprt->stat.recvs, 2401 xprt->stat.bad_xids, 2402 xprt->stat.req_u, 2403 xprt->stat.bklog_u, 2404 xprt->stat.max_slots, 2405 xprt->stat.sending_u, 2406 xprt->stat.pending_u); 2407 } 2408 2409 /** 2410 * xs_tcp_print_stats - display TCP socket-specifc stats 2411 * @xprt: rpc_xprt struct containing statistics 2412 * @seq: output file 2413 * 2414 */ 2415 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2416 { 2417 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2418 long idle_time = 0; 2419 2420 if (xprt_connected(xprt)) 2421 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2422 2423 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu " 2424 "%llu %llu %lu %llu %llu\n", 2425 transport->srcport, 2426 xprt->stat.bind_count, 2427 xprt->stat.connect_count, 2428 xprt->stat.connect_time, 2429 idle_time, 2430 xprt->stat.sends, 2431 xprt->stat.recvs, 2432 xprt->stat.bad_xids, 2433 xprt->stat.req_u, 2434 xprt->stat.bklog_u, 2435 xprt->stat.max_slots, 2436 xprt->stat.sending_u, 2437 xprt->stat.pending_u); 2438 } 2439 2440 /* 2441 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason 2442 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2443 * to use the server side send routines. 2444 */ 2445 static void *bc_malloc(struct rpc_task *task, size_t size) 2446 { 2447 struct page *page; 2448 struct rpc_buffer *buf; 2449 2450 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer)); 2451 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) 2452 return NULL; 2453 2454 page = alloc_page(GFP_KERNEL); 2455 if (!page) 2456 return NULL; 2457 2458 buf = page_address(page); 2459 buf->len = PAGE_SIZE; 2460 2461 return buf->data; 2462 } 2463 2464 /* 2465 * Free the space allocated in the bc_alloc routine 2466 */ 2467 static void bc_free(void *buffer) 2468 { 2469 struct rpc_buffer *buf; 2470 2471 if (!buffer) 2472 return; 2473 2474 buf = container_of(buffer, struct rpc_buffer, data); 2475 free_page((unsigned long)buf); 2476 } 2477 2478 /* 2479 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex 2480 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request. 2481 */ 2482 static int bc_sendto(struct rpc_rqst *req) 2483 { 2484 int len; 2485 struct xdr_buf *xbufp = &req->rq_snd_buf; 2486 struct rpc_xprt *xprt = req->rq_xprt; 2487 struct sock_xprt *transport = 2488 container_of(xprt, struct sock_xprt, xprt); 2489 struct socket *sock = transport->sock; 2490 unsigned long headoff; 2491 unsigned long tailoff; 2492 2493 xs_encode_stream_record_marker(xbufp); 2494 2495 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; 2496 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; 2497 len = svc_send_common(sock, xbufp, 2498 virt_to_page(xbufp->head[0].iov_base), headoff, 2499 xbufp->tail[0].iov_base, tailoff); 2500 2501 if (len != xbufp->len) { 2502 printk(KERN_NOTICE "Error sending entire callback!\n"); 2503 len = -EAGAIN; 2504 } 2505 2506 return len; 2507 } 2508 2509 /* 2510 * The send routine. Borrows from svc_send 2511 */ 2512 static int bc_send_request(struct rpc_task *task) 2513 { 2514 struct rpc_rqst *req = task->tk_rqstp; 2515 struct svc_xprt *xprt; 2516 u32 len; 2517 2518 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); 2519 /* 2520 * Get the server socket associated with this callback xprt 2521 */ 2522 xprt = req->rq_xprt->bc_xprt; 2523 2524 /* 2525 * Grab the mutex to serialize data as the connection is shared 2526 * with the fore channel 2527 */ 2528 if (!mutex_trylock(&xprt->xpt_mutex)) { 2529 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL); 2530 if (!mutex_trylock(&xprt->xpt_mutex)) 2531 return -EAGAIN; 2532 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task); 2533 } 2534 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 2535 len = -ENOTCONN; 2536 else 2537 len = bc_sendto(req); 2538 mutex_unlock(&xprt->xpt_mutex); 2539 2540 if (len > 0) 2541 len = 0; 2542 2543 return len; 2544 } 2545 2546 /* 2547 * The close routine. Since this is client initiated, we do nothing 2548 */ 2549 2550 static void bc_close(struct rpc_xprt *xprt) 2551 { 2552 } 2553 2554 /* 2555 * The xprt destroy routine. Again, because this connection is client 2556 * initiated, we do nothing 2557 */ 2558 2559 static void bc_destroy(struct rpc_xprt *xprt) 2560 { 2561 dprintk("RPC: bc_destroy xprt %p\n", xprt); 2562 2563 xs_xprt_free(xprt); 2564 module_put(THIS_MODULE); 2565 } 2566 2567 static struct rpc_xprt_ops xs_local_ops = { 2568 .reserve_xprt = xprt_reserve_xprt, 2569 .release_xprt = xs_tcp_release_xprt, 2570 .alloc_slot = xprt_alloc_slot, 2571 .rpcbind = xs_local_rpcbind, 2572 .set_port = xs_local_set_port, 2573 .connect = xs_local_connect, 2574 .buf_alloc = rpc_malloc, 2575 .buf_free = rpc_free, 2576 .send_request = xs_local_send_request, 2577 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2578 .close = xs_close, 2579 .destroy = xs_destroy, 2580 .print_stats = xs_local_print_stats, 2581 }; 2582 2583 static struct rpc_xprt_ops xs_udp_ops = { 2584 .set_buffer_size = xs_udp_set_buffer_size, 2585 .reserve_xprt = xprt_reserve_xprt_cong, 2586 .release_xprt = xprt_release_xprt_cong, 2587 .alloc_slot = xprt_alloc_slot, 2588 .rpcbind = rpcb_getport_async, 2589 .set_port = xs_set_port, 2590 .connect = xs_connect, 2591 .buf_alloc = rpc_malloc, 2592 .buf_free = rpc_free, 2593 .send_request = xs_udp_send_request, 2594 .set_retrans_timeout = xprt_set_retrans_timeout_rtt, 2595 .timer = xs_udp_timer, 2596 .release_request = xprt_release_rqst_cong, 2597 .close = xs_close, 2598 .destroy = xs_destroy, 2599 .print_stats = xs_udp_print_stats, 2600 }; 2601 2602 static struct rpc_xprt_ops xs_tcp_ops = { 2603 .reserve_xprt = xprt_reserve_xprt, 2604 .release_xprt = xs_tcp_release_xprt, 2605 .alloc_slot = xprt_lock_and_alloc_slot, 2606 .rpcbind = rpcb_getport_async, 2607 .set_port = xs_set_port, 2608 .connect = xs_connect, 2609 .buf_alloc = rpc_malloc, 2610 .buf_free = rpc_free, 2611 .send_request = xs_tcp_send_request, 2612 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2613 .close = xs_tcp_close, 2614 .destroy = xs_destroy, 2615 .print_stats = xs_tcp_print_stats, 2616 }; 2617 2618 /* 2619 * The rpc_xprt_ops for the server backchannel 2620 */ 2621 2622 static struct rpc_xprt_ops bc_tcp_ops = { 2623 .reserve_xprt = xprt_reserve_xprt, 2624 .release_xprt = xprt_release_xprt, 2625 .alloc_slot = xprt_alloc_slot, 2626 .buf_alloc = bc_malloc, 2627 .buf_free = bc_free, 2628 .send_request = bc_send_request, 2629 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2630 .close = bc_close, 2631 .destroy = bc_destroy, 2632 .print_stats = xs_tcp_print_stats, 2633 }; 2634 2635 static int xs_init_anyaddr(const int family, struct sockaddr *sap) 2636 { 2637 static const struct sockaddr_in sin = { 2638 .sin_family = AF_INET, 2639 .sin_addr.s_addr = htonl(INADDR_ANY), 2640 }; 2641 static const struct sockaddr_in6 sin6 = { 2642 .sin6_family = AF_INET6, 2643 .sin6_addr = IN6ADDR_ANY_INIT, 2644 }; 2645 2646 switch (family) { 2647 case AF_LOCAL: 2648 break; 2649 case AF_INET: 2650 memcpy(sap, &sin, sizeof(sin)); 2651 break; 2652 case AF_INET6: 2653 memcpy(sap, &sin6, sizeof(sin6)); 2654 break; 2655 default: 2656 dprintk("RPC: %s: Bad address family\n", __func__); 2657 return -EAFNOSUPPORT; 2658 } 2659 return 0; 2660 } 2661 2662 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 2663 unsigned int slot_table_size, 2664 unsigned int max_slot_table_size) 2665 { 2666 struct rpc_xprt *xprt; 2667 struct sock_xprt *new; 2668 2669 if (args->addrlen > sizeof(xprt->addr)) { 2670 dprintk("RPC: xs_setup_xprt: address too large\n"); 2671 return ERR_PTR(-EBADF); 2672 } 2673 2674 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, 2675 max_slot_table_size); 2676 if (xprt == NULL) { 2677 dprintk("RPC: xs_setup_xprt: couldn't allocate " 2678 "rpc_xprt\n"); 2679 return ERR_PTR(-ENOMEM); 2680 } 2681 2682 new = container_of(xprt, struct sock_xprt, xprt); 2683 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 2684 xprt->addrlen = args->addrlen; 2685 if (args->srcaddr) 2686 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); 2687 else { 2688 int err; 2689 err = xs_init_anyaddr(args->dstaddr->sa_family, 2690 (struct sockaddr *)&new->srcaddr); 2691 if (err != 0) { 2692 xprt_free(xprt); 2693 return ERR_PTR(err); 2694 } 2695 } 2696 2697 return xprt; 2698 } 2699 2700 static const struct rpc_timeout xs_local_default_timeout = { 2701 .to_initval = 10 * HZ, 2702 .to_maxval = 10 * HZ, 2703 .to_retries = 2, 2704 }; 2705 2706 /** 2707 * xs_setup_local - Set up transport to use an AF_LOCAL socket 2708 * @args: rpc transport creation arguments 2709 * 2710 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP 2711 */ 2712 static struct rpc_xprt *xs_setup_local(struct xprt_create *args) 2713 { 2714 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; 2715 struct sock_xprt *transport; 2716 struct rpc_xprt *xprt; 2717 struct rpc_xprt *ret; 2718 2719 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2720 xprt_max_tcp_slot_table_entries); 2721 if (IS_ERR(xprt)) 2722 return xprt; 2723 transport = container_of(xprt, struct sock_xprt, xprt); 2724 2725 xprt->prot = 0; 2726 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2727 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2728 2729 xprt->bind_timeout = XS_BIND_TO; 2730 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2731 xprt->idle_timeout = XS_IDLE_DISC_TO; 2732 2733 xprt->ops = &xs_local_ops; 2734 xprt->timeout = &xs_local_default_timeout; 2735 2736 INIT_DELAYED_WORK(&transport->connect_worker, 2737 xs_dummy_setup_socket); 2738 2739 switch (sun->sun_family) { 2740 case AF_LOCAL: 2741 if (sun->sun_path[0] != '/') { 2742 dprintk("RPC: bad AF_LOCAL address: %s\n", 2743 sun->sun_path); 2744 ret = ERR_PTR(-EINVAL); 2745 goto out_err; 2746 } 2747 xprt_set_bound(xprt); 2748 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); 2749 ret = ERR_PTR(xs_local_setup_socket(transport)); 2750 if (ret) 2751 goto out_err; 2752 break; 2753 default: 2754 ret = ERR_PTR(-EAFNOSUPPORT); 2755 goto out_err; 2756 } 2757 2758 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", 2759 xprt->address_strings[RPC_DISPLAY_ADDR]); 2760 2761 if (try_module_get(THIS_MODULE)) 2762 return xprt; 2763 ret = ERR_PTR(-EINVAL); 2764 out_err: 2765 xs_xprt_free(xprt); 2766 return ret; 2767 } 2768 2769 static const struct rpc_timeout xs_udp_default_timeout = { 2770 .to_initval = 5 * HZ, 2771 .to_maxval = 30 * HZ, 2772 .to_increment = 5 * HZ, 2773 .to_retries = 5, 2774 }; 2775 2776 /** 2777 * xs_setup_udp - Set up transport to use a UDP socket 2778 * @args: rpc transport creation arguments 2779 * 2780 */ 2781 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) 2782 { 2783 struct sockaddr *addr = args->dstaddr; 2784 struct rpc_xprt *xprt; 2785 struct sock_xprt *transport; 2786 struct rpc_xprt *ret; 2787 2788 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries, 2789 xprt_udp_slot_table_entries); 2790 if (IS_ERR(xprt)) 2791 return xprt; 2792 transport = container_of(xprt, struct sock_xprt, xprt); 2793 2794 xprt->prot = IPPROTO_UDP; 2795 xprt->tsh_size = 0; 2796 /* XXX: header size can vary due to auth type, IPv6, etc. */ 2797 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 2798 2799 xprt->bind_timeout = XS_BIND_TO; 2800 xprt->reestablish_timeout = XS_UDP_REEST_TO; 2801 xprt->idle_timeout = XS_IDLE_DISC_TO; 2802 2803 xprt->ops = &xs_udp_ops; 2804 2805 xprt->timeout = &xs_udp_default_timeout; 2806 2807 switch (addr->sa_family) { 2808 case AF_INET: 2809 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2810 xprt_set_bound(xprt); 2811 2812 INIT_DELAYED_WORK(&transport->connect_worker, 2813 xs_udp_setup_socket); 2814 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); 2815 break; 2816 case AF_INET6: 2817 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2818 xprt_set_bound(xprt); 2819 2820 INIT_DELAYED_WORK(&transport->connect_worker, 2821 xs_udp_setup_socket); 2822 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2823 break; 2824 default: 2825 ret = ERR_PTR(-EAFNOSUPPORT); 2826 goto out_err; 2827 } 2828 2829 if (xprt_bound(xprt)) 2830 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2831 xprt->address_strings[RPC_DISPLAY_ADDR], 2832 xprt->address_strings[RPC_DISPLAY_PORT], 2833 xprt->address_strings[RPC_DISPLAY_PROTO]); 2834 else 2835 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2836 xprt->address_strings[RPC_DISPLAY_ADDR], 2837 xprt->address_strings[RPC_DISPLAY_PROTO]); 2838 2839 if (try_module_get(THIS_MODULE)) 2840 return xprt; 2841 ret = ERR_PTR(-EINVAL); 2842 out_err: 2843 xs_xprt_free(xprt); 2844 return ret; 2845 } 2846 2847 static const struct rpc_timeout xs_tcp_default_timeout = { 2848 .to_initval = 60 * HZ, 2849 .to_maxval = 60 * HZ, 2850 .to_retries = 2, 2851 }; 2852 2853 /** 2854 * xs_setup_tcp - Set up transport to use a TCP socket 2855 * @args: rpc transport creation arguments 2856 * 2857 */ 2858 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) 2859 { 2860 struct sockaddr *addr = args->dstaddr; 2861 struct rpc_xprt *xprt; 2862 struct sock_xprt *transport; 2863 struct rpc_xprt *ret; 2864 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries; 2865 2866 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) 2867 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT; 2868 2869 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2870 max_slot_table_size); 2871 if (IS_ERR(xprt)) 2872 return xprt; 2873 transport = container_of(xprt, struct sock_xprt, xprt); 2874 2875 xprt->prot = IPPROTO_TCP; 2876 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2877 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2878 2879 xprt->bind_timeout = XS_BIND_TO; 2880 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2881 xprt->idle_timeout = XS_IDLE_DISC_TO; 2882 2883 xprt->ops = &xs_tcp_ops; 2884 xprt->timeout = &xs_tcp_default_timeout; 2885 2886 switch (addr->sa_family) { 2887 case AF_INET: 2888 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2889 xprt_set_bound(xprt); 2890 2891 INIT_DELAYED_WORK(&transport->connect_worker, 2892 xs_tcp_setup_socket); 2893 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 2894 break; 2895 case AF_INET6: 2896 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2897 xprt_set_bound(xprt); 2898 2899 INIT_DELAYED_WORK(&transport->connect_worker, 2900 xs_tcp_setup_socket); 2901 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2902 break; 2903 default: 2904 ret = ERR_PTR(-EAFNOSUPPORT); 2905 goto out_err; 2906 } 2907 2908 if (xprt_bound(xprt)) 2909 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2910 xprt->address_strings[RPC_DISPLAY_ADDR], 2911 xprt->address_strings[RPC_DISPLAY_PORT], 2912 xprt->address_strings[RPC_DISPLAY_PROTO]); 2913 else 2914 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2915 xprt->address_strings[RPC_DISPLAY_ADDR], 2916 xprt->address_strings[RPC_DISPLAY_PROTO]); 2917 2918 if (try_module_get(THIS_MODULE)) 2919 return xprt; 2920 ret = ERR_PTR(-EINVAL); 2921 out_err: 2922 xs_xprt_free(xprt); 2923 return ret; 2924 } 2925 2926 /** 2927 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket 2928 * @args: rpc transport creation arguments 2929 * 2930 */ 2931 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) 2932 { 2933 struct sockaddr *addr = args->dstaddr; 2934 struct rpc_xprt *xprt; 2935 struct sock_xprt *transport; 2936 struct svc_sock *bc_sock; 2937 struct rpc_xprt *ret; 2938 2939 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2940 xprt_tcp_slot_table_entries); 2941 if (IS_ERR(xprt)) 2942 return xprt; 2943 transport = container_of(xprt, struct sock_xprt, xprt); 2944 2945 xprt->prot = IPPROTO_TCP; 2946 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2947 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2948 xprt->timeout = &xs_tcp_default_timeout; 2949 2950 /* backchannel */ 2951 xprt_set_bound(xprt); 2952 xprt->bind_timeout = 0; 2953 xprt->reestablish_timeout = 0; 2954 xprt->idle_timeout = 0; 2955 2956 xprt->ops = &bc_tcp_ops; 2957 2958 switch (addr->sa_family) { 2959 case AF_INET: 2960 xs_format_peer_addresses(xprt, "tcp", 2961 RPCBIND_NETID_TCP); 2962 break; 2963 case AF_INET6: 2964 xs_format_peer_addresses(xprt, "tcp", 2965 RPCBIND_NETID_TCP6); 2966 break; 2967 default: 2968 ret = ERR_PTR(-EAFNOSUPPORT); 2969 goto out_err; 2970 } 2971 2972 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2973 xprt->address_strings[RPC_DISPLAY_ADDR], 2974 xprt->address_strings[RPC_DISPLAY_PORT], 2975 xprt->address_strings[RPC_DISPLAY_PROTO]); 2976 2977 /* 2978 * Once we've associated a backchannel xprt with a connection, 2979 * we want to keep it around as long as the connection lasts, 2980 * in case we need to start using it for a backchannel again; 2981 * this reference won't be dropped until bc_xprt is destroyed. 2982 */ 2983 xprt_get(xprt); 2984 args->bc_xprt->xpt_bc_xprt = xprt; 2985 xprt->bc_xprt = args->bc_xprt; 2986 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); 2987 transport->sock = bc_sock->sk_sock; 2988 transport->inet = bc_sock->sk_sk; 2989 2990 /* 2991 * Since we don't want connections for the backchannel, we set 2992 * the xprt status to connected 2993 */ 2994 xprt_set_connected(xprt); 2995 2996 if (try_module_get(THIS_MODULE)) 2997 return xprt; 2998 2999 args->bc_xprt->xpt_bc_xprt = NULL; 3000 xprt_put(xprt); 3001 ret = ERR_PTR(-EINVAL); 3002 out_err: 3003 xs_xprt_free(xprt); 3004 return ret; 3005 } 3006 3007 static struct xprt_class xs_local_transport = { 3008 .list = LIST_HEAD_INIT(xs_local_transport.list), 3009 .name = "named UNIX socket", 3010 .owner = THIS_MODULE, 3011 .ident = XPRT_TRANSPORT_LOCAL, 3012 .setup = xs_setup_local, 3013 }; 3014 3015 static struct xprt_class xs_udp_transport = { 3016 .list = LIST_HEAD_INIT(xs_udp_transport.list), 3017 .name = "udp", 3018 .owner = THIS_MODULE, 3019 .ident = XPRT_TRANSPORT_UDP, 3020 .setup = xs_setup_udp, 3021 }; 3022 3023 static struct xprt_class xs_tcp_transport = { 3024 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 3025 .name = "tcp", 3026 .owner = THIS_MODULE, 3027 .ident = XPRT_TRANSPORT_TCP, 3028 .setup = xs_setup_tcp, 3029 }; 3030 3031 static struct xprt_class xs_bc_tcp_transport = { 3032 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), 3033 .name = "tcp NFSv4.1 backchannel", 3034 .owner = THIS_MODULE, 3035 .ident = XPRT_TRANSPORT_BC_TCP, 3036 .setup = xs_setup_bc_tcp, 3037 }; 3038 3039 /** 3040 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 3041 * 3042 */ 3043 int init_socket_xprt(void) 3044 { 3045 #ifdef RPC_DEBUG 3046 if (!sunrpc_table_header) 3047 sunrpc_table_header = register_sysctl_table(sunrpc_table); 3048 #endif 3049 3050 xprt_register_transport(&xs_local_transport); 3051 xprt_register_transport(&xs_udp_transport); 3052 xprt_register_transport(&xs_tcp_transport); 3053 xprt_register_transport(&xs_bc_tcp_transport); 3054 3055 return 0; 3056 } 3057 3058 /** 3059 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister 3060 * 3061 */ 3062 void cleanup_socket_xprt(void) 3063 { 3064 #ifdef RPC_DEBUG 3065 if (sunrpc_table_header) { 3066 unregister_sysctl_table(sunrpc_table_header); 3067 sunrpc_table_header = NULL; 3068 } 3069 #endif 3070 3071 xprt_unregister_transport(&xs_local_transport); 3072 xprt_unregister_transport(&xs_udp_transport); 3073 xprt_unregister_transport(&xs_tcp_transport); 3074 xprt_unregister_transport(&xs_bc_tcp_transport); 3075 } 3076 3077 static int param_set_uint_minmax(const char *val, 3078 const struct kernel_param *kp, 3079 unsigned int min, unsigned int max) 3080 { 3081 unsigned int num; 3082 int ret; 3083 3084 if (!val) 3085 return -EINVAL; 3086 ret = kstrtouint(val, 0, &num); 3087 if (ret == -EINVAL || num < min || num > max) 3088 return -EINVAL; 3089 *((unsigned int *)kp->arg) = num; 3090 return 0; 3091 } 3092 3093 static int param_set_portnr(const char *val, const struct kernel_param *kp) 3094 { 3095 return param_set_uint_minmax(val, kp, 3096 RPC_MIN_RESVPORT, 3097 RPC_MAX_RESVPORT); 3098 } 3099 3100 static struct kernel_param_ops param_ops_portnr = { 3101 .set = param_set_portnr, 3102 .get = param_get_uint, 3103 }; 3104 3105 #define param_check_portnr(name, p) \ 3106 __param_check(name, p, unsigned int); 3107 3108 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); 3109 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); 3110 3111 static int param_set_slot_table_size(const char *val, 3112 const struct kernel_param *kp) 3113 { 3114 return param_set_uint_minmax(val, kp, 3115 RPC_MIN_SLOT_TABLE, 3116 RPC_MAX_SLOT_TABLE); 3117 } 3118 3119 static struct kernel_param_ops param_ops_slot_table_size = { 3120 .set = param_set_slot_table_size, 3121 .get = param_get_uint, 3122 }; 3123 3124 #define param_check_slot_table_size(name, p) \ 3125 __param_check(name, p, unsigned int); 3126 3127 static int param_set_max_slot_table_size(const char *val, 3128 const struct kernel_param *kp) 3129 { 3130 return param_set_uint_minmax(val, kp, 3131 RPC_MIN_SLOT_TABLE, 3132 RPC_MAX_SLOT_TABLE_LIMIT); 3133 } 3134 3135 static struct kernel_param_ops param_ops_max_slot_table_size = { 3136 .set = param_set_max_slot_table_size, 3137 .get = param_get_uint, 3138 }; 3139 3140 #define param_check_max_slot_table_size(name, p) \ 3141 __param_check(name, p, unsigned int); 3142 3143 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, 3144 slot_table_size, 0644); 3145 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries, 3146 max_slot_table_size, 0644); 3147 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 3148 slot_table_size, 0644); 3149 3150