1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/svcsock.c 4 * 5 * These are the RPC server socket internals. 6 * 7 * The server scheduling algorithm does not always distribute the load 8 * evenly when servicing a single client. May need to modify the 9 * svc_xprt_enqueue procedure... 10 * 11 * TCP support is largely untested and may be a little slow. The problem 12 * is that we currently do two separate recvfrom's, one for the 4-byte 13 * record length, and the second for the actual record. This could possibly 14 * be improved by always reading a minimum size of around 100 bytes and 15 * tucking any superfluous bytes away in a temporary store. Still, that 16 * leaves write requests out in the rain. An alternative may be to peek at 17 * the first skb in the queue, and if it matches the next TCP sequence 18 * number, to extract the record marker. Yuck. 19 * 20 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/module.h> 26 #include <linux/errno.h> 27 #include <linux/fcntl.h> 28 #include <linux/net.h> 29 #include <linux/in.h> 30 #include <linux/inet.h> 31 #include <linux/udp.h> 32 #include <linux/tcp.h> 33 #include <linux/unistd.h> 34 #include <linux/slab.h> 35 #include <linux/netdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/file.h> 38 #include <linux/freezer.h> 39 #include <net/sock.h> 40 #include <net/checksum.h> 41 #include <net/ip.h> 42 #include <net/ipv6.h> 43 #include <net/udp.h> 44 #include <net/tcp.h> 45 #include <net/tcp_states.h> 46 #include <linux/uaccess.h> 47 #include <linux/highmem.h> 48 #include <asm/ioctls.h> 49 50 #include <linux/sunrpc/types.h> 51 #include <linux/sunrpc/clnt.h> 52 #include <linux/sunrpc/xdr.h> 53 #include <linux/sunrpc/msg_prot.h> 54 #include <linux/sunrpc/svcsock.h> 55 #include <linux/sunrpc/stats.h> 56 #include <linux/sunrpc/xprt.h> 57 58 #include <trace/events/sunrpc.h> 59 60 #include "socklib.h" 61 #include "sunrpc.h" 62 63 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 64 65 66 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 67 int flags); 68 static int svc_udp_recvfrom(struct svc_rqst *); 69 static int svc_udp_sendto(struct svc_rqst *); 70 static void svc_sock_detach(struct svc_xprt *); 71 static void svc_tcp_sock_detach(struct svc_xprt *); 72 static void svc_sock_free(struct svc_xprt *); 73 74 static struct svc_xprt *svc_create_socket(struct svc_serv *, int, 75 struct net *, struct sockaddr *, 76 int, int); 77 #ifdef CONFIG_DEBUG_LOCK_ALLOC 78 static struct lock_class_key svc_key[2]; 79 static struct lock_class_key svc_slock_key[2]; 80 81 static void svc_reclassify_socket(struct socket *sock) 82 { 83 struct sock *sk = sock->sk; 84 85 if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) 86 return; 87 88 switch (sk->sk_family) { 89 case AF_INET: 90 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", 91 &svc_slock_key[0], 92 "sk_xprt.xpt_lock-AF_INET-NFSD", 93 &svc_key[0]); 94 break; 95 96 case AF_INET6: 97 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", 98 &svc_slock_key[1], 99 "sk_xprt.xpt_lock-AF_INET6-NFSD", 100 &svc_key[1]); 101 break; 102 103 default: 104 BUG(); 105 } 106 } 107 #else 108 static void svc_reclassify_socket(struct socket *sock) 109 { 110 } 111 #endif 112 113 /** 114 * svc_tcp_release_rqst - Release transport-related resources 115 * @rqstp: request structure with resources to be released 116 * 117 */ 118 static void svc_tcp_release_rqst(struct svc_rqst *rqstp) 119 { 120 } 121 122 /** 123 * svc_udp_release_rqst - Release transport-related resources 124 * @rqstp: request structure with resources to be released 125 * 126 */ 127 static void svc_udp_release_rqst(struct svc_rqst *rqstp) 128 { 129 struct sk_buff *skb = rqstp->rq_xprt_ctxt; 130 131 if (skb) { 132 rqstp->rq_xprt_ctxt = NULL; 133 consume_skb(skb); 134 } 135 } 136 137 union svc_pktinfo_u { 138 struct in_pktinfo pkti; 139 struct in6_pktinfo pkti6; 140 }; 141 #define SVC_PKTINFO_SPACE \ 142 CMSG_SPACE(sizeof(union svc_pktinfo_u)) 143 144 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) 145 { 146 struct svc_sock *svsk = 147 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); 148 switch (svsk->sk_sk->sk_family) { 149 case AF_INET: { 150 struct in_pktinfo *pki = CMSG_DATA(cmh); 151 152 cmh->cmsg_level = SOL_IP; 153 cmh->cmsg_type = IP_PKTINFO; 154 pki->ipi_ifindex = 0; 155 pki->ipi_spec_dst.s_addr = 156 svc_daddr_in(rqstp)->sin_addr.s_addr; 157 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 158 } 159 break; 160 161 case AF_INET6: { 162 struct in6_pktinfo *pki = CMSG_DATA(cmh); 163 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp); 164 165 cmh->cmsg_level = SOL_IPV6; 166 cmh->cmsg_type = IPV6_PKTINFO; 167 pki->ipi6_ifindex = daddr->sin6_scope_id; 168 pki->ipi6_addr = daddr->sin6_addr; 169 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 170 } 171 break; 172 } 173 } 174 175 static int svc_sock_result_payload(struct svc_rqst *rqstp, unsigned int offset, 176 unsigned int length) 177 { 178 return 0; 179 } 180 181 /* 182 * Report socket names for nfsdfs 183 */ 184 static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) 185 { 186 const struct sock *sk = svsk->sk_sk; 187 const char *proto_name = sk->sk_protocol == IPPROTO_UDP ? 188 "udp" : "tcp"; 189 int len; 190 191 switch (sk->sk_family) { 192 case PF_INET: 193 len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n", 194 proto_name, 195 &inet_sk(sk)->inet_rcv_saddr, 196 inet_sk(sk)->inet_num); 197 break; 198 #if IS_ENABLED(CONFIG_IPV6) 199 case PF_INET6: 200 len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n", 201 proto_name, 202 &sk->sk_v6_rcv_saddr, 203 inet_sk(sk)->inet_num); 204 break; 205 #endif 206 default: 207 len = snprintf(buf, remaining, "*unknown-%d*\n", 208 sk->sk_family); 209 } 210 211 if (len >= remaining) { 212 *buf = '\0'; 213 return -ENAMETOOLONG; 214 } 215 return len; 216 } 217 218 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 219 static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek) 220 { 221 struct bvec_iter bi = { 222 .bi_size = size + seek, 223 }; 224 struct bio_vec bv; 225 226 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); 227 for_each_bvec(bv, bvec, bi, bi) 228 flush_dcache_page(bv.bv_page); 229 } 230 #else 231 static inline void svc_flush_bvec(const struct bio_vec *bvec, size_t size, 232 size_t seek) 233 { 234 } 235 #endif 236 237 /* 238 * Read from @rqstp's transport socket. The incoming message fills whole 239 * pages in @rqstp's rq_pages array until the last page of the message 240 * has been received into a partial page. 241 */ 242 static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen, 243 size_t seek) 244 { 245 struct svc_sock *svsk = 246 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); 247 struct bio_vec *bvec = rqstp->rq_bvec; 248 struct msghdr msg = { NULL }; 249 unsigned int i; 250 ssize_t len; 251 size_t t; 252 253 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 254 255 for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE) 256 bvec_set_page(&bvec[i], rqstp->rq_pages[i], PAGE_SIZE, 0); 257 rqstp->rq_respages = &rqstp->rq_pages[i]; 258 rqstp->rq_next_page = rqstp->rq_respages + 1; 259 260 iov_iter_bvec(&msg.msg_iter, ITER_DEST, bvec, i, buflen); 261 if (seek) { 262 iov_iter_advance(&msg.msg_iter, seek); 263 buflen -= seek; 264 } 265 len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT); 266 if (len > 0) 267 svc_flush_bvec(bvec, len, seek); 268 269 /* If we read a full record, then assume there may be more 270 * data to read (stream based sockets only!) 271 */ 272 if (len == buflen) 273 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 274 275 return len; 276 } 277 278 /* 279 * Set socket snd and rcv buffer lengths 280 */ 281 static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs) 282 { 283 unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg; 284 struct socket *sock = svsk->sk_sock; 285 286 nreqs = min(nreqs, INT_MAX / 2 / max_mesg); 287 288 lock_sock(sock->sk); 289 sock->sk->sk_sndbuf = nreqs * max_mesg * 2; 290 sock->sk->sk_rcvbuf = nreqs * max_mesg * 2; 291 sock->sk->sk_write_space(sock->sk); 292 release_sock(sock->sk); 293 } 294 295 static void svc_sock_secure_port(struct svc_rqst *rqstp) 296 { 297 if (svc_port_is_privileged(svc_addr(rqstp))) 298 set_bit(RQ_SECURE, &rqstp->rq_flags); 299 else 300 clear_bit(RQ_SECURE, &rqstp->rq_flags); 301 } 302 303 /* 304 * INET callback when data has been received on the socket. 305 */ 306 static void svc_data_ready(struct sock *sk) 307 { 308 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 309 310 if (svsk) { 311 /* Refer to svc_setup_socket() for details. */ 312 rmb(); 313 svsk->sk_odata(sk); 314 trace_svcsock_data_ready(&svsk->sk_xprt, 0); 315 if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) 316 svc_xprt_enqueue(&svsk->sk_xprt); 317 } 318 } 319 320 /* 321 * INET callback when space is newly available on the socket. 322 */ 323 static void svc_write_space(struct sock *sk) 324 { 325 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); 326 327 if (svsk) { 328 /* Refer to svc_setup_socket() for details. */ 329 rmb(); 330 trace_svcsock_write_space(&svsk->sk_xprt, 0); 331 svsk->sk_owspace(sk); 332 svc_xprt_enqueue(&svsk->sk_xprt); 333 } 334 } 335 336 static int svc_tcp_has_wspace(struct svc_xprt *xprt) 337 { 338 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 339 340 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) 341 return 1; 342 return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 343 } 344 345 static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt) 346 { 347 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 348 349 sock_no_linger(svsk->sk_sock->sk); 350 } 351 352 /* 353 * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo 354 */ 355 static int svc_udp_get_dest_address4(struct svc_rqst *rqstp, 356 struct cmsghdr *cmh) 357 { 358 struct in_pktinfo *pki = CMSG_DATA(cmh); 359 struct sockaddr_in *daddr = svc_daddr_in(rqstp); 360 361 if (cmh->cmsg_type != IP_PKTINFO) 362 return 0; 363 364 daddr->sin_family = AF_INET; 365 daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr; 366 return 1; 367 } 368 369 /* 370 * See net/ipv6/datagram.c : ip6_datagram_recv_ctl 371 */ 372 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, 373 struct cmsghdr *cmh) 374 { 375 struct in6_pktinfo *pki = CMSG_DATA(cmh); 376 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp); 377 378 if (cmh->cmsg_type != IPV6_PKTINFO) 379 return 0; 380 381 daddr->sin6_family = AF_INET6; 382 daddr->sin6_addr = pki->ipi6_addr; 383 daddr->sin6_scope_id = pki->ipi6_ifindex; 384 return 1; 385 } 386 387 /* 388 * Copy the UDP datagram's destination address to the rqstp structure. 389 * The 'destination' address in this case is the address to which the 390 * peer sent the datagram, i.e. our local address. For multihomed 391 * hosts, this can change from msg to msg. Note that only the IP 392 * address changes, the port number should remain the same. 393 */ 394 static int svc_udp_get_dest_address(struct svc_rqst *rqstp, 395 struct cmsghdr *cmh) 396 { 397 switch (cmh->cmsg_level) { 398 case SOL_IP: 399 return svc_udp_get_dest_address4(rqstp, cmh); 400 case SOL_IPV6: 401 return svc_udp_get_dest_address6(rqstp, cmh); 402 } 403 404 return 0; 405 } 406 407 /** 408 * svc_udp_recvfrom - Receive a datagram from a UDP socket. 409 * @rqstp: request structure into which to receive an RPC Call 410 * 411 * Called in a loop when XPT_DATA has been set. 412 * 413 * Returns: 414 * On success, the number of bytes in a received RPC Call, or 415 * %0 if a complete RPC Call message was not ready to return 416 */ 417 static int svc_udp_recvfrom(struct svc_rqst *rqstp) 418 { 419 struct svc_sock *svsk = 420 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); 421 struct svc_serv *serv = svsk->sk_xprt.xpt_server; 422 struct sk_buff *skb; 423 union { 424 struct cmsghdr hdr; 425 long all[SVC_PKTINFO_SPACE / sizeof(long)]; 426 } buffer; 427 struct cmsghdr *cmh = &buffer.hdr; 428 struct msghdr msg = { 429 .msg_name = svc_addr(rqstp), 430 .msg_control = cmh, 431 .msg_controllen = sizeof(buffer), 432 .msg_flags = MSG_DONTWAIT, 433 }; 434 size_t len; 435 int err; 436 437 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) 438 /* udp sockets need large rcvbuf as all pending 439 * requests are still in that buffer. sndbuf must 440 * also be large enough that there is enough space 441 * for one reply per thread. We count all threads 442 * rather than threads in a particular pool, which 443 * provides an upper bound on the number of threads 444 * which will access the socket. 445 */ 446 svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3); 447 448 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 449 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 450 0, 0, MSG_PEEK | MSG_DONTWAIT); 451 if (err < 0) 452 goto out_recv_err; 453 skb = skb_recv_udp(svsk->sk_sk, MSG_DONTWAIT, &err); 454 if (!skb) 455 goto out_recv_err; 456 457 len = svc_addr_len(svc_addr(rqstp)); 458 rqstp->rq_addrlen = len; 459 if (skb->tstamp == 0) { 460 skb->tstamp = ktime_get_real(); 461 /* Don't enable netstamp, sunrpc doesn't 462 need that much accuracy */ 463 } 464 sock_write_timestamp(svsk->sk_sk, skb->tstamp); 465 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ 466 467 len = skb->len; 468 rqstp->rq_arg.len = len; 469 trace_svcsock_udp_recv(&svsk->sk_xprt, len); 470 471 rqstp->rq_prot = IPPROTO_UDP; 472 473 if (!svc_udp_get_dest_address(rqstp, cmh)) 474 goto out_cmsg_err; 475 rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp)); 476 477 if (skb_is_nonlinear(skb)) { 478 /* we have to copy */ 479 local_bh_disable(); 480 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) 481 goto out_bh_enable; 482 local_bh_enable(); 483 consume_skb(skb); 484 } else { 485 /* we can use it in-place */ 486 rqstp->rq_arg.head[0].iov_base = skb->data; 487 rqstp->rq_arg.head[0].iov_len = len; 488 if (skb_checksum_complete(skb)) 489 goto out_free; 490 rqstp->rq_xprt_ctxt = skb; 491 } 492 493 rqstp->rq_arg.page_base = 0; 494 if (len <= rqstp->rq_arg.head[0].iov_len) { 495 rqstp->rq_arg.head[0].iov_len = len; 496 rqstp->rq_arg.page_len = 0; 497 rqstp->rq_respages = rqstp->rq_pages+1; 498 } else { 499 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 500 rqstp->rq_respages = rqstp->rq_pages + 1 + 501 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); 502 } 503 rqstp->rq_next_page = rqstp->rq_respages+1; 504 505 if (serv->sv_stats) 506 serv->sv_stats->netudpcnt++; 507 508 svc_xprt_received(rqstp->rq_xprt); 509 return len; 510 511 out_recv_err: 512 if (err != -EAGAIN) { 513 /* possibly an icmp error */ 514 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 515 } 516 trace_svcsock_udp_recv_err(&svsk->sk_xprt, err); 517 goto out_clear_busy; 518 out_cmsg_err: 519 net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", 520 cmh->cmsg_level, cmh->cmsg_type); 521 goto out_free; 522 out_bh_enable: 523 local_bh_enable(); 524 out_free: 525 kfree_skb(skb); 526 out_clear_busy: 527 svc_xprt_received(rqstp->rq_xprt); 528 return 0; 529 } 530 531 /** 532 * svc_udp_sendto - Send out a reply on a UDP socket 533 * @rqstp: completed svc_rqst 534 * 535 * xpt_mutex ensures @rqstp's whole message is written to the socket 536 * without interruption. 537 * 538 * Returns the number of bytes sent, or a negative errno. 539 */ 540 static int svc_udp_sendto(struct svc_rqst *rqstp) 541 { 542 struct svc_xprt *xprt = rqstp->rq_xprt; 543 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 544 struct xdr_buf *xdr = &rqstp->rq_res; 545 union { 546 struct cmsghdr hdr; 547 long all[SVC_PKTINFO_SPACE / sizeof(long)]; 548 } buffer; 549 struct cmsghdr *cmh = &buffer.hdr; 550 struct msghdr msg = { 551 .msg_name = &rqstp->rq_addr, 552 .msg_namelen = rqstp->rq_addrlen, 553 .msg_control = cmh, 554 .msg_controllen = sizeof(buffer), 555 }; 556 unsigned int sent; 557 int err; 558 559 svc_udp_release_rqst(rqstp); 560 561 svc_set_cmsg_data(rqstp, cmh); 562 563 mutex_lock(&xprt->xpt_mutex); 564 565 if (svc_xprt_is_dead(xprt)) 566 goto out_notconn; 567 568 err = xdr_alloc_bvec(xdr, GFP_KERNEL); 569 if (err < 0) 570 goto out_unlock; 571 572 err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); 573 if (err == -ECONNREFUSED) { 574 /* ICMP error on earlier request. */ 575 err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); 576 } 577 xdr_free_bvec(xdr); 578 trace_svcsock_udp_send(xprt, err); 579 out_unlock: 580 mutex_unlock(&xprt->xpt_mutex); 581 if (err < 0) 582 return err; 583 return sent; 584 585 out_notconn: 586 mutex_unlock(&xprt->xpt_mutex); 587 return -ENOTCONN; 588 } 589 590 static int svc_udp_has_wspace(struct svc_xprt *xprt) 591 { 592 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 593 struct svc_serv *serv = xprt->xpt_server; 594 unsigned long required; 595 596 /* 597 * Set the SOCK_NOSPACE flag before checking the available 598 * sock space. 599 */ 600 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 601 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg; 602 if (required*2 > sock_wspace(svsk->sk_sk)) 603 return 0; 604 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 605 return 1; 606 } 607 608 static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt) 609 { 610 BUG(); 611 return NULL; 612 } 613 614 static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt) 615 { 616 } 617 618 static struct svc_xprt *svc_udp_create(struct svc_serv *serv, 619 struct net *net, 620 struct sockaddr *sa, int salen, 621 int flags) 622 { 623 return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags); 624 } 625 626 static const struct svc_xprt_ops svc_udp_ops = { 627 .xpo_create = svc_udp_create, 628 .xpo_recvfrom = svc_udp_recvfrom, 629 .xpo_sendto = svc_udp_sendto, 630 .xpo_result_payload = svc_sock_result_payload, 631 .xpo_release_rqst = svc_udp_release_rqst, 632 .xpo_detach = svc_sock_detach, 633 .xpo_free = svc_sock_free, 634 .xpo_has_wspace = svc_udp_has_wspace, 635 .xpo_accept = svc_udp_accept, 636 .xpo_secure_port = svc_sock_secure_port, 637 .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt, 638 }; 639 640 static struct svc_xprt_class svc_udp_class = { 641 .xcl_name = "udp", 642 .xcl_owner = THIS_MODULE, 643 .xcl_ops = &svc_udp_ops, 644 .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP, 645 .xcl_ident = XPRT_TRANSPORT_UDP, 646 }; 647 648 static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) 649 { 650 svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class, 651 &svsk->sk_xprt, serv); 652 clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); 653 svsk->sk_sk->sk_data_ready = svc_data_ready; 654 svsk->sk_sk->sk_write_space = svc_write_space; 655 656 /* initialise setting must have enough space to 657 * receive and respond to one request. 658 * svc_udp_recvfrom will re-adjust if necessary 659 */ 660 svc_sock_setbufsize(svsk, 3); 661 662 /* data might have come in before data_ready set up */ 663 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 664 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); 665 666 /* make sure we get destination address info */ 667 switch (svsk->sk_sk->sk_family) { 668 case AF_INET: 669 ip_sock_set_pktinfo(svsk->sk_sock->sk); 670 break; 671 case AF_INET6: 672 ip6_sock_set_recvpktinfo(svsk->sk_sock->sk); 673 break; 674 default: 675 BUG(); 676 } 677 } 678 679 /* 680 * A data_ready event on a listening socket means there's a connection 681 * pending. Do not use state_change as a substitute for it. 682 */ 683 static void svc_tcp_listen_data_ready(struct sock *sk) 684 { 685 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 686 687 if (svsk) { 688 /* Refer to svc_setup_socket() for details. */ 689 rmb(); 690 svsk->sk_odata(sk); 691 } 692 693 /* 694 * This callback may called twice when a new connection 695 * is established as a child socket inherits everything 696 * from a parent LISTEN socket. 697 * 1) data_ready method of the parent socket will be called 698 * when one of child sockets become ESTABLISHED. 699 * 2) data_ready method of the child socket may be called 700 * when it receives data before the socket is accepted. 701 * In case of 2, we should ignore it silently. 702 */ 703 if (sk->sk_state == TCP_LISTEN) { 704 if (svsk) { 705 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); 706 svc_xprt_enqueue(&svsk->sk_xprt); 707 } 708 } 709 } 710 711 /* 712 * A state change on a connected socket means it's dying or dead. 713 */ 714 static void svc_tcp_state_change(struct sock *sk) 715 { 716 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 717 718 if (svsk) { 719 /* Refer to svc_setup_socket() for details. */ 720 rmb(); 721 svsk->sk_ostate(sk); 722 trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock); 723 if (sk->sk_state != TCP_ESTABLISHED) 724 svc_xprt_deferred_close(&svsk->sk_xprt); 725 } 726 } 727 728 /* 729 * Accept a TCP connection 730 */ 731 static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) 732 { 733 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 734 struct sockaddr_storage addr; 735 struct sockaddr *sin = (struct sockaddr *) &addr; 736 struct svc_serv *serv = svsk->sk_xprt.xpt_server; 737 struct socket *sock = svsk->sk_sock; 738 struct socket *newsock; 739 struct svc_sock *newsvsk; 740 int err, slen; 741 742 if (!sock) 743 return NULL; 744 745 clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); 746 err = kernel_accept(sock, &newsock, O_NONBLOCK); 747 if (err < 0) { 748 if (err == -ENOMEM) 749 printk(KERN_WARNING "%s: no more sockets!\n", 750 serv->sv_name); 751 else if (err != -EAGAIN) 752 net_warn_ratelimited("%s: accept failed (err %d)!\n", 753 serv->sv_name, -err); 754 trace_svcsock_accept_err(xprt, serv->sv_name, err); 755 return NULL; 756 } 757 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); 758 759 err = kernel_getpeername(newsock, sin); 760 if (err < 0) { 761 trace_svcsock_getpeername_err(xprt, serv->sv_name, err); 762 goto failed; /* aborted connection or whatever */ 763 } 764 slen = err; 765 766 /* Reset the inherited callbacks before calling svc_setup_socket */ 767 newsock->sk->sk_state_change = svsk->sk_ostate; 768 newsock->sk->sk_data_ready = svsk->sk_odata; 769 newsock->sk->sk_write_space = svsk->sk_owspace; 770 771 /* make sure that a write doesn't block forever when 772 * low on memory 773 */ 774 newsock->sk->sk_sndtimeo = HZ*30; 775 776 newsvsk = svc_setup_socket(serv, newsock, 777 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)); 778 if (IS_ERR(newsvsk)) 779 goto failed; 780 svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen); 781 err = kernel_getsockname(newsock, sin); 782 slen = err; 783 if (unlikely(err < 0)) 784 slen = offsetof(struct sockaddr, sa_data); 785 svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen); 786 787 if (sock_is_loopback(newsock->sk)) 788 set_bit(XPT_LOCAL, &newsvsk->sk_xprt.xpt_flags); 789 else 790 clear_bit(XPT_LOCAL, &newsvsk->sk_xprt.xpt_flags); 791 if (serv->sv_stats) 792 serv->sv_stats->nettcpconn++; 793 794 return &newsvsk->sk_xprt; 795 796 failed: 797 sock_release(newsock); 798 return NULL; 799 } 800 801 static size_t svc_tcp_restore_pages(struct svc_sock *svsk, 802 struct svc_rqst *rqstp) 803 { 804 size_t len = svsk->sk_datalen; 805 unsigned int i, npages; 806 807 if (!len) 808 return 0; 809 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 810 for (i = 0; i < npages; i++) { 811 if (rqstp->rq_pages[i] != NULL) 812 put_page(rqstp->rq_pages[i]); 813 BUG_ON(svsk->sk_pages[i] == NULL); 814 rqstp->rq_pages[i] = svsk->sk_pages[i]; 815 svsk->sk_pages[i] = NULL; 816 } 817 rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]); 818 return len; 819 } 820 821 static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp) 822 { 823 unsigned int i, len, npages; 824 825 if (svsk->sk_datalen == 0) 826 return; 827 len = svsk->sk_datalen; 828 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 829 for (i = 0; i < npages; i++) { 830 svsk->sk_pages[i] = rqstp->rq_pages[i]; 831 rqstp->rq_pages[i] = NULL; 832 } 833 } 834 835 static void svc_tcp_clear_pages(struct svc_sock *svsk) 836 { 837 unsigned int i, len, npages; 838 839 if (svsk->sk_datalen == 0) 840 goto out; 841 len = svsk->sk_datalen; 842 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 843 for (i = 0; i < npages; i++) { 844 if (svsk->sk_pages[i] == NULL) { 845 WARN_ON_ONCE(1); 846 continue; 847 } 848 put_page(svsk->sk_pages[i]); 849 svsk->sk_pages[i] = NULL; 850 } 851 out: 852 svsk->sk_tcplen = 0; 853 svsk->sk_datalen = 0; 854 } 855 856 /* 857 * Receive fragment record header into sk_marker. 858 */ 859 static ssize_t svc_tcp_read_marker(struct svc_sock *svsk, 860 struct svc_rqst *rqstp) 861 { 862 ssize_t want, len; 863 864 /* If we haven't gotten the record length yet, 865 * get the next four bytes. 866 */ 867 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { 868 struct msghdr msg = { NULL }; 869 struct kvec iov; 870 871 want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; 872 iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen; 873 iov.iov_len = want; 874 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want); 875 len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT); 876 if (len < 0) 877 return len; 878 svsk->sk_tcplen += len; 879 if (len < want) { 880 /* call again to read the remaining bytes */ 881 goto err_short; 882 } 883 trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker); 884 if (svc_sock_reclen(svsk) + svsk->sk_datalen > 885 svsk->sk_xprt.xpt_server->sv_max_mesg) 886 goto err_too_large; 887 } 888 return svc_sock_reclen(svsk); 889 890 err_too_large: 891 net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n", 892 __func__, svsk->sk_xprt.xpt_server->sv_name, 893 svc_sock_reclen(svsk)); 894 svc_xprt_deferred_close(&svsk->sk_xprt); 895 err_short: 896 return -EAGAIN; 897 } 898 899 static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) 900 { 901 struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt; 902 struct rpc_rqst *req = NULL; 903 struct kvec *src, *dst; 904 __be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base; 905 __be32 xid; 906 __be32 calldir; 907 908 xid = *p++; 909 calldir = *p; 910 911 if (!bc_xprt) 912 return -EAGAIN; 913 spin_lock(&bc_xprt->queue_lock); 914 req = xprt_lookup_rqst(bc_xprt, xid); 915 if (!req) 916 goto unlock_notfound; 917 918 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); 919 /* 920 * XXX!: cheating for now! Only copying HEAD. 921 * But we know this is good enough for now (in fact, for any 922 * callback reply in the forseeable future). 923 */ 924 dst = &req->rq_private_buf.head[0]; 925 src = &rqstp->rq_arg.head[0]; 926 if (dst->iov_len < src->iov_len) 927 goto unlock_eagain; /* whatever; just giving up. */ 928 memcpy(dst->iov_base, src->iov_base, src->iov_len); 929 xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); 930 rqstp->rq_arg.len = 0; 931 spin_unlock(&bc_xprt->queue_lock); 932 return 0; 933 unlock_notfound: 934 printk(KERN_NOTICE 935 "%s: Got unrecognized reply: " 936 "calldir 0x%x xpt_bc_xprt %p xid %08x\n", 937 __func__, ntohl(calldir), 938 bc_xprt, ntohl(xid)); 939 unlock_eagain: 940 spin_unlock(&bc_xprt->queue_lock); 941 return -EAGAIN; 942 } 943 944 static void svc_tcp_fragment_received(struct svc_sock *svsk) 945 { 946 /* If we have more data, signal svc_xprt_enqueue() to try again */ 947 svsk->sk_tcplen = 0; 948 svsk->sk_marker = xdr_zero; 949 } 950 951 /** 952 * svc_tcp_recvfrom - Receive data from a TCP socket 953 * @rqstp: request structure into which to receive an RPC Call 954 * 955 * Called in a loop when XPT_DATA has been set. 956 * 957 * Read the 4-byte stream record marker, then use the record length 958 * in that marker to set up exactly the resources needed to receive 959 * the next RPC message into @rqstp. 960 * 961 * Returns: 962 * On success, the number of bytes in a received RPC Call, or 963 * %0 if a complete RPC Call message was not ready to return 964 * 965 * The zero return case handles partial receives and callback Replies. 966 * The state of a partial receive is preserved in the svc_sock for 967 * the next call to svc_tcp_recvfrom. 968 */ 969 static int svc_tcp_recvfrom(struct svc_rqst *rqstp) 970 { 971 struct svc_sock *svsk = 972 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); 973 struct svc_serv *serv = svsk->sk_xprt.xpt_server; 974 size_t want, base; 975 ssize_t len; 976 __be32 *p; 977 __be32 calldir; 978 979 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 980 len = svc_tcp_read_marker(svsk, rqstp); 981 if (len < 0) 982 goto error; 983 984 base = svc_tcp_restore_pages(svsk, rqstp); 985 want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr)); 986 len = svc_tcp_read_msg(rqstp, base + want, base); 987 if (len >= 0) { 988 trace_svcsock_tcp_recv(&svsk->sk_xprt, len); 989 svsk->sk_tcplen += len; 990 svsk->sk_datalen += len; 991 } 992 if (len != want || !svc_sock_final_rec(svsk)) 993 goto err_incomplete; 994 if (svsk->sk_datalen < 8) 995 goto err_nuts; 996 997 rqstp->rq_arg.len = svsk->sk_datalen; 998 rqstp->rq_arg.page_base = 0; 999 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { 1000 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; 1001 rqstp->rq_arg.page_len = 0; 1002 } else 1003 rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1004 1005 rqstp->rq_xprt_ctxt = NULL; 1006 rqstp->rq_prot = IPPROTO_TCP; 1007 if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags)) 1008 set_bit(RQ_LOCAL, &rqstp->rq_flags); 1009 else 1010 clear_bit(RQ_LOCAL, &rqstp->rq_flags); 1011 1012 p = (__be32 *)rqstp->rq_arg.head[0].iov_base; 1013 calldir = p[1]; 1014 if (calldir) 1015 len = receive_cb_reply(svsk, rqstp); 1016 1017 /* Reset TCP read info */ 1018 svsk->sk_datalen = 0; 1019 svc_tcp_fragment_received(svsk); 1020 1021 if (len < 0) 1022 goto error; 1023 1024 svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); 1025 if (serv->sv_stats) 1026 serv->sv_stats->nettcpcnt++; 1027 1028 svc_xprt_received(rqstp->rq_xprt); 1029 return rqstp->rq_arg.len; 1030 1031 err_incomplete: 1032 svc_tcp_save_pages(svsk, rqstp); 1033 if (len < 0 && len != -EAGAIN) 1034 goto err_delete; 1035 if (len == want) 1036 svc_tcp_fragment_received(svsk); 1037 else 1038 trace_svcsock_tcp_recv_short(&svsk->sk_xprt, 1039 svc_sock_reclen(svsk), 1040 svsk->sk_tcplen - sizeof(rpc_fraghdr)); 1041 goto err_noclose; 1042 error: 1043 if (len != -EAGAIN) 1044 goto err_delete; 1045 trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0); 1046 goto err_noclose; 1047 err_nuts: 1048 svsk->sk_datalen = 0; 1049 err_delete: 1050 trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len); 1051 svc_xprt_deferred_close(&svsk->sk_xprt); 1052 err_noclose: 1053 svc_xprt_received(rqstp->rq_xprt); 1054 return 0; /* record not complete */ 1055 } 1056 1057 static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec, 1058 int flags) 1059 { 1060 return kernel_sendpage(sock, virt_to_page(vec->iov_base), 1061 offset_in_page(vec->iov_base), 1062 vec->iov_len, flags); 1063 } 1064 1065 /* 1066 * kernel_sendpage() is used exclusively to reduce the number of 1067 * copy operations in this path. Therefore the caller must ensure 1068 * that the pages backing @xdr are unchanging. 1069 * 1070 * In addition, the logic assumes that * .bv_len is never larger 1071 * than PAGE_SIZE. 1072 */ 1073 static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr, 1074 rpc_fraghdr marker, unsigned int *sentp) 1075 { 1076 const struct kvec *head = xdr->head; 1077 const struct kvec *tail = xdr->tail; 1078 struct kvec rm = { 1079 .iov_base = &marker, 1080 .iov_len = sizeof(marker), 1081 }; 1082 struct msghdr msg = { 1083 .msg_flags = 0, 1084 }; 1085 int ret; 1086 1087 *sentp = 0; 1088 ret = xdr_alloc_bvec(xdr, GFP_KERNEL); 1089 if (ret < 0) 1090 return ret; 1091 1092 ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len); 1093 if (ret < 0) 1094 return ret; 1095 *sentp += ret; 1096 if (ret != rm.iov_len) 1097 return -EAGAIN; 1098 1099 ret = svc_tcp_send_kvec(sock, head, 0); 1100 if (ret < 0) 1101 return ret; 1102 *sentp += ret; 1103 if (ret != head->iov_len) 1104 goto out; 1105 1106 if (xdr->page_len) { 1107 unsigned int offset, len, remaining; 1108 struct bio_vec *bvec; 1109 1110 bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT); 1111 offset = offset_in_page(xdr->page_base); 1112 remaining = xdr->page_len; 1113 while (remaining > 0) { 1114 len = min(remaining, bvec->bv_len - offset); 1115 ret = kernel_sendpage(sock, bvec->bv_page, 1116 bvec->bv_offset + offset, 1117 len, 0); 1118 if (ret < 0) 1119 return ret; 1120 *sentp += ret; 1121 if (ret != len) 1122 goto out; 1123 remaining -= len; 1124 offset = 0; 1125 bvec++; 1126 } 1127 } 1128 1129 if (tail->iov_len) { 1130 ret = svc_tcp_send_kvec(sock, tail, 0); 1131 if (ret < 0) 1132 return ret; 1133 *sentp += ret; 1134 } 1135 1136 out: 1137 return 0; 1138 } 1139 1140 /** 1141 * svc_tcp_sendto - Send out a reply on a TCP socket 1142 * @rqstp: completed svc_rqst 1143 * 1144 * xpt_mutex ensures @rqstp's whole message is written to the socket 1145 * without interruption. 1146 * 1147 * Returns the number of bytes sent, or a negative errno. 1148 */ 1149 static int svc_tcp_sendto(struct svc_rqst *rqstp) 1150 { 1151 struct svc_xprt *xprt = rqstp->rq_xprt; 1152 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 1153 struct xdr_buf *xdr = &rqstp->rq_res; 1154 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | 1155 (u32)xdr->len); 1156 unsigned int sent; 1157 int err; 1158 1159 svc_tcp_release_rqst(rqstp); 1160 1161 atomic_inc(&svsk->sk_sendqlen); 1162 mutex_lock(&xprt->xpt_mutex); 1163 if (svc_xprt_is_dead(xprt)) 1164 goto out_notconn; 1165 tcp_sock_set_cork(svsk->sk_sk, true); 1166 err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent); 1167 xdr_free_bvec(xdr); 1168 trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent); 1169 if (err < 0 || sent != (xdr->len + sizeof(marker))) 1170 goto out_close; 1171 if (atomic_dec_and_test(&svsk->sk_sendqlen)) 1172 tcp_sock_set_cork(svsk->sk_sk, false); 1173 mutex_unlock(&xprt->xpt_mutex); 1174 return sent; 1175 1176 out_notconn: 1177 atomic_dec(&svsk->sk_sendqlen); 1178 mutex_unlock(&xprt->xpt_mutex); 1179 return -ENOTCONN; 1180 out_close: 1181 pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", 1182 xprt->xpt_server->sv_name, 1183 (err < 0) ? "got error" : "sent", 1184 (err < 0) ? err : sent, xdr->len); 1185 svc_xprt_deferred_close(xprt); 1186 atomic_dec(&svsk->sk_sendqlen); 1187 mutex_unlock(&xprt->xpt_mutex); 1188 return -EAGAIN; 1189 } 1190 1191 static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, 1192 struct net *net, 1193 struct sockaddr *sa, int salen, 1194 int flags) 1195 { 1196 return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); 1197 } 1198 1199 static const struct svc_xprt_ops svc_tcp_ops = { 1200 .xpo_create = svc_tcp_create, 1201 .xpo_recvfrom = svc_tcp_recvfrom, 1202 .xpo_sendto = svc_tcp_sendto, 1203 .xpo_result_payload = svc_sock_result_payload, 1204 .xpo_release_rqst = svc_tcp_release_rqst, 1205 .xpo_detach = svc_tcp_sock_detach, 1206 .xpo_free = svc_sock_free, 1207 .xpo_has_wspace = svc_tcp_has_wspace, 1208 .xpo_accept = svc_tcp_accept, 1209 .xpo_secure_port = svc_sock_secure_port, 1210 .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt, 1211 }; 1212 1213 static struct svc_xprt_class svc_tcp_class = { 1214 .xcl_name = "tcp", 1215 .xcl_owner = THIS_MODULE, 1216 .xcl_ops = &svc_tcp_ops, 1217 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, 1218 .xcl_ident = XPRT_TRANSPORT_TCP, 1219 }; 1220 1221 void svc_init_xprt_sock(void) 1222 { 1223 svc_reg_xprt_class(&svc_tcp_class); 1224 svc_reg_xprt_class(&svc_udp_class); 1225 } 1226 1227 void svc_cleanup_xprt_sock(void) 1228 { 1229 svc_unreg_xprt_class(&svc_tcp_class); 1230 svc_unreg_xprt_class(&svc_udp_class); 1231 } 1232 1233 static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) 1234 { 1235 struct sock *sk = svsk->sk_sk; 1236 1237 svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class, 1238 &svsk->sk_xprt, serv); 1239 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); 1240 set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags); 1241 if (sk->sk_state == TCP_LISTEN) { 1242 strcpy(svsk->sk_xprt.xpt_remotebuf, "listener"); 1243 set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags); 1244 sk->sk_data_ready = svc_tcp_listen_data_ready; 1245 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); 1246 } else { 1247 sk->sk_state_change = svc_tcp_state_change; 1248 sk->sk_data_ready = svc_data_ready; 1249 sk->sk_write_space = svc_write_space; 1250 1251 svsk->sk_marker = xdr_zero; 1252 svsk->sk_tcplen = 0; 1253 svsk->sk_datalen = 0; 1254 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages)); 1255 1256 tcp_sock_set_nodelay(sk); 1257 1258 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 1259 switch (sk->sk_state) { 1260 case TCP_SYN_RECV: 1261 case TCP_ESTABLISHED: 1262 break; 1263 default: 1264 svc_xprt_deferred_close(&svsk->sk_xprt); 1265 } 1266 } 1267 } 1268 1269 void svc_sock_update_bufs(struct svc_serv *serv) 1270 { 1271 /* 1272 * The number of server threads has changed. Update 1273 * rcvbuf and sndbuf accordingly on all sockets 1274 */ 1275 struct svc_sock *svsk; 1276 1277 spin_lock_bh(&serv->sv_lock); 1278 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) 1279 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); 1280 spin_unlock_bh(&serv->sv_lock); 1281 } 1282 EXPORT_SYMBOL_GPL(svc_sock_update_bufs); 1283 1284 /* 1285 * Initialize socket for RPC use and create svc_sock struct 1286 */ 1287 static struct svc_sock *svc_setup_socket(struct svc_serv *serv, 1288 struct socket *sock, 1289 int flags) 1290 { 1291 struct svc_sock *svsk; 1292 struct sock *inet; 1293 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); 1294 int err = 0; 1295 1296 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); 1297 if (!svsk) 1298 return ERR_PTR(-ENOMEM); 1299 1300 inet = sock->sk; 1301 1302 /* Register socket with portmapper */ 1303 if (pmap_register) 1304 err = svc_register(serv, sock_net(sock->sk), inet->sk_family, 1305 inet->sk_protocol, 1306 ntohs(inet_sk(inet)->inet_sport)); 1307 1308 if (err < 0) { 1309 kfree(svsk); 1310 return ERR_PTR(err); 1311 } 1312 1313 svsk->sk_sock = sock; 1314 svsk->sk_sk = inet; 1315 svsk->sk_ostate = inet->sk_state_change; 1316 svsk->sk_odata = inet->sk_data_ready; 1317 svsk->sk_owspace = inet->sk_write_space; 1318 /* 1319 * This barrier is necessary in order to prevent race condition 1320 * with svc_data_ready(), svc_listen_data_ready() and others 1321 * when calling callbacks above. 1322 */ 1323 wmb(); 1324 inet->sk_user_data = svsk; 1325 1326 /* Initialize the socket */ 1327 if (sock->type == SOCK_DGRAM) 1328 svc_udp_init(svsk, serv); 1329 else 1330 svc_tcp_init(svsk, serv); 1331 1332 trace_svcsock_new_socket(sock); 1333 return svsk; 1334 } 1335 1336 bool svc_alien_sock(struct net *net, int fd) 1337 { 1338 int err; 1339 struct socket *sock = sockfd_lookup(fd, &err); 1340 bool ret = false; 1341 1342 if (!sock) 1343 goto out; 1344 if (sock_net(sock->sk) != net) 1345 ret = true; 1346 sockfd_put(sock); 1347 out: 1348 return ret; 1349 } 1350 EXPORT_SYMBOL_GPL(svc_alien_sock); 1351 1352 /** 1353 * svc_addsock - add a listener socket to an RPC service 1354 * @serv: pointer to RPC service to which to add a new listener 1355 * @fd: file descriptor of the new listener 1356 * @name_return: pointer to buffer to fill in with name of listener 1357 * @len: size of the buffer 1358 * @cred: credential 1359 * 1360 * Fills in socket name and returns positive length of name if successful. 1361 * Name is terminated with '\n'. On error, returns a negative errno 1362 * value. 1363 */ 1364 int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, 1365 const size_t len, const struct cred *cred) 1366 { 1367 int err = 0; 1368 struct socket *so = sockfd_lookup(fd, &err); 1369 struct svc_sock *svsk = NULL; 1370 struct sockaddr_storage addr; 1371 struct sockaddr *sin = (struct sockaddr *)&addr; 1372 int salen; 1373 1374 if (!so) 1375 return err; 1376 err = -EAFNOSUPPORT; 1377 if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) 1378 goto out; 1379 err = -EPROTONOSUPPORT; 1380 if (so->sk->sk_protocol != IPPROTO_TCP && 1381 so->sk->sk_protocol != IPPROTO_UDP) 1382 goto out; 1383 err = -EISCONN; 1384 if (so->state > SS_UNCONNECTED) 1385 goto out; 1386 err = -ENOENT; 1387 if (!try_module_get(THIS_MODULE)) 1388 goto out; 1389 svsk = svc_setup_socket(serv, so, SVC_SOCK_DEFAULTS); 1390 if (IS_ERR(svsk)) { 1391 module_put(THIS_MODULE); 1392 err = PTR_ERR(svsk); 1393 goto out; 1394 } 1395 salen = kernel_getsockname(svsk->sk_sock, sin); 1396 if (salen >= 0) 1397 svc_xprt_set_local(&svsk->sk_xprt, sin, salen); 1398 svsk->sk_xprt.xpt_cred = get_cred(cred); 1399 svc_add_new_perm_xprt(serv, &svsk->sk_xprt); 1400 return svc_one_sock_name(svsk, name_return, len); 1401 out: 1402 sockfd_put(so); 1403 return err; 1404 } 1405 EXPORT_SYMBOL_GPL(svc_addsock); 1406 1407 /* 1408 * Create socket for RPC service. 1409 */ 1410 static struct svc_xprt *svc_create_socket(struct svc_serv *serv, 1411 int protocol, 1412 struct net *net, 1413 struct sockaddr *sin, int len, 1414 int flags) 1415 { 1416 struct svc_sock *svsk; 1417 struct socket *sock; 1418 int error; 1419 int type; 1420 struct sockaddr_storage addr; 1421 struct sockaddr *newsin = (struct sockaddr *)&addr; 1422 int newlen; 1423 int family; 1424 1425 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { 1426 printk(KERN_WARNING "svc: only UDP and TCP " 1427 "sockets supported\n"); 1428 return ERR_PTR(-EINVAL); 1429 } 1430 1431 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; 1432 switch (sin->sa_family) { 1433 case AF_INET6: 1434 family = PF_INET6; 1435 break; 1436 case AF_INET: 1437 family = PF_INET; 1438 break; 1439 default: 1440 return ERR_PTR(-EINVAL); 1441 } 1442 1443 error = __sock_create(net, family, type, protocol, &sock, 1); 1444 if (error < 0) 1445 return ERR_PTR(error); 1446 1447 svc_reclassify_socket(sock); 1448 1449 /* 1450 * If this is an PF_INET6 listener, we want to avoid 1451 * getting requests from IPv4 remotes. Those should 1452 * be shunted to a PF_INET listener via rpcbind. 1453 */ 1454 if (family == PF_INET6) 1455 ip6_sock_set_v6only(sock->sk); 1456 if (type == SOCK_STREAM) 1457 sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */ 1458 error = kernel_bind(sock, sin, len); 1459 if (error < 0) 1460 goto bummer; 1461 1462 error = kernel_getsockname(sock, newsin); 1463 if (error < 0) 1464 goto bummer; 1465 newlen = error; 1466 1467 if (protocol == IPPROTO_TCP) { 1468 if ((error = kernel_listen(sock, 64)) < 0) 1469 goto bummer; 1470 } 1471 1472 svsk = svc_setup_socket(serv, sock, flags); 1473 if (IS_ERR(svsk)) { 1474 error = PTR_ERR(svsk); 1475 goto bummer; 1476 } 1477 svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); 1478 return (struct svc_xprt *)svsk; 1479 bummer: 1480 sock_release(sock); 1481 return ERR_PTR(error); 1482 } 1483 1484 /* 1485 * Detach the svc_sock from the socket so that no 1486 * more callbacks occur. 1487 */ 1488 static void svc_sock_detach(struct svc_xprt *xprt) 1489 { 1490 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 1491 struct sock *sk = svsk->sk_sk; 1492 1493 /* put back the old socket callbacks */ 1494 lock_sock(sk); 1495 sk->sk_state_change = svsk->sk_ostate; 1496 sk->sk_data_ready = svsk->sk_odata; 1497 sk->sk_write_space = svsk->sk_owspace; 1498 sk->sk_user_data = NULL; 1499 release_sock(sk); 1500 } 1501 1502 /* 1503 * Disconnect the socket, and reset the callbacks 1504 */ 1505 static void svc_tcp_sock_detach(struct svc_xprt *xprt) 1506 { 1507 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 1508 1509 svc_sock_detach(xprt); 1510 1511 if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 1512 svc_tcp_clear_pages(svsk); 1513 kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR); 1514 } 1515 } 1516 1517 /* 1518 * Free the svc_sock's socket resources and the svc_sock itself. 1519 */ 1520 static void svc_sock_free(struct svc_xprt *xprt) 1521 { 1522 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 1523 1524 if (svsk->sk_sock->file) 1525 sockfd_put(svsk->sk_sock); 1526 else 1527 sock_release(svsk->sk_sock); 1528 kfree(svsk); 1529 } 1530