1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/sched/signal.h> 13 #include <linux/atomic.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #include <net/tcp.h> 19 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 20 #include <net/transp_v6.h> 21 #endif 22 #include <net/mptcp.h> 23 #include "protocol.h" 24 25 #define MPTCP_SAME_STATE TCP_MAX_STATES 26 27 static void __mptcp_close(struct sock *sk, long timeout); 28 29 static const struct proto_ops *tcp_proto_ops(struct sock *sk) 30 { 31 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 32 if (sk->sk_family == AF_INET6) 33 return &inet6_stream_ops; 34 #endif 35 return &inet_stream_ops; 36 } 37 38 /* MP_CAPABLE handshake failed, convert msk to plain tcp, replacing 39 * socket->sk and stream ops and destroying msk 40 * return the msk socket, as we can't access msk anymore after this function 41 * completes 42 * Called with msk lock held, releases such lock before returning 43 */ 44 static struct socket *__mptcp_fallback_to_tcp(struct mptcp_sock *msk, 45 struct sock *ssk) 46 { 47 struct mptcp_subflow_context *subflow; 48 struct socket *sock; 49 struct sock *sk; 50 51 sk = (struct sock *)msk; 52 sock = sk->sk_socket; 53 subflow = mptcp_subflow_ctx(ssk); 54 55 /* detach the msk socket */ 56 list_del_init(&subflow->node); 57 sock_orphan(sk); 58 sock->sk = NULL; 59 60 /* socket is now TCP */ 61 lock_sock(ssk); 62 sock_graft(ssk, sock); 63 if (subflow->conn) { 64 /* We can't release the ULP data on a live socket, 65 * restore the tcp callback 66 */ 67 mptcp_subflow_tcp_fallback(ssk, subflow); 68 sock_put(subflow->conn); 69 subflow->conn = NULL; 70 } 71 release_sock(ssk); 72 sock->ops = tcp_proto_ops(ssk); 73 74 /* destroy the left-over msk sock */ 75 __mptcp_close(sk, 0); 76 return sock; 77 } 78 79 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not 80 * completed yet or has failed, return the subflow socket. 81 * Otherwise return NULL. 82 */ 83 static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) 84 { 85 if (!msk->subflow || READ_ONCE(msk->can_ack)) 86 return NULL; 87 88 return msk->subflow; 89 } 90 91 static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk) 92 { 93 return msk->first && !sk_is_mptcp(msk->first); 94 } 95 96 /* if the mp_capable handshake has failed, it fallbacks msk to plain TCP, 97 * releases the socket lock and returns a reference to the now TCP socket. 98 * Otherwise returns NULL 99 */ 100 static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) 101 { 102 sock_owned_by_me((const struct sock *)msk); 103 104 if (likely(!__mptcp_needs_tcp_fallback(msk))) 105 return NULL; 106 107 if (msk->subflow) { 108 /* the first subflow is an active connection, discart the 109 * paired socket 110 */ 111 msk->subflow->sk = NULL; 112 sock_release(msk->subflow); 113 msk->subflow = NULL; 114 } 115 116 return __mptcp_fallback_to_tcp(msk, msk->first); 117 } 118 119 static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk) 120 { 121 return !msk->first; 122 } 123 124 static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state) 125 { 126 struct mptcp_subflow_context *subflow; 127 struct sock *sk = (struct sock *)msk; 128 struct socket *ssock; 129 int err; 130 131 ssock = __mptcp_nmpc_socket(msk); 132 if (ssock) 133 goto set_state; 134 135 if (!__mptcp_can_create_subflow(msk)) 136 return ERR_PTR(-EINVAL); 137 138 err = mptcp_subflow_create_socket(sk, &ssock); 139 if (err) 140 return ERR_PTR(err); 141 142 msk->first = ssock->sk; 143 msk->subflow = ssock; 144 subflow = mptcp_subflow_ctx(ssock->sk); 145 list_add(&subflow->node, &msk->conn_list); 146 subflow->request_mptcp = 1; 147 148 set_state: 149 if (state != MPTCP_SAME_STATE) 150 inet_sk_state_store(sk, state); 151 return ssock; 152 } 153 154 static struct sock *mptcp_subflow_get(const struct mptcp_sock *msk) 155 { 156 struct mptcp_subflow_context *subflow; 157 158 sock_owned_by_me((const struct sock *)msk); 159 160 mptcp_for_each_subflow(msk, subflow) { 161 return mptcp_subflow_tcp_sock(subflow); 162 } 163 164 return NULL; 165 } 166 167 static bool mptcp_ext_cache_refill(struct mptcp_sock *msk) 168 { 169 if (!msk->cached_ext) 170 msk->cached_ext = __skb_ext_alloc(); 171 172 return !!msk->cached_ext; 173 } 174 175 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) 176 { 177 struct mptcp_subflow_context *subflow; 178 struct sock *sk = (struct sock *)msk; 179 180 sock_owned_by_me(sk); 181 182 mptcp_for_each_subflow(msk, subflow) { 183 if (subflow->data_avail) 184 return mptcp_subflow_tcp_sock(subflow); 185 } 186 187 return NULL; 188 } 189 190 static inline bool mptcp_skb_can_collapse_to(const struct mptcp_sock *msk, 191 const struct sk_buff *skb, 192 const struct mptcp_ext *mpext) 193 { 194 if (!tcp_skb_can_collapse_to(skb)) 195 return false; 196 197 /* can collapse only if MPTCP level sequence is in order */ 198 return mpext && mpext->data_seq + mpext->data_len == msk->write_seq; 199 } 200 201 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, 202 struct msghdr *msg, long *timeo, int *pmss_now, 203 int *ps_goal) 204 { 205 int mss_now, avail_size, size_goal, ret; 206 struct mptcp_sock *msk = mptcp_sk(sk); 207 struct mptcp_ext *mpext = NULL; 208 struct sk_buff *skb, *tail; 209 bool can_collapse = false; 210 struct page_frag *pfrag; 211 size_t psize; 212 213 /* use the mptcp page cache so that we can easily move the data 214 * from one substream to another, but do per subflow memory accounting 215 */ 216 pfrag = sk_page_frag(sk); 217 while (!sk_page_frag_refill(ssk, pfrag) || 218 !mptcp_ext_cache_refill(msk)) { 219 ret = sk_stream_wait_memory(ssk, timeo); 220 if (ret) 221 return ret; 222 if (unlikely(__mptcp_needs_tcp_fallback(msk))) 223 return 0; 224 } 225 226 /* compute copy limit */ 227 mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags); 228 *pmss_now = mss_now; 229 *ps_goal = size_goal; 230 avail_size = size_goal; 231 skb = tcp_write_queue_tail(ssk); 232 if (skb) { 233 mpext = skb_ext_find(skb, SKB_EXT_MPTCP); 234 235 /* Limit the write to the size available in the 236 * current skb, if any, so that we create at most a new skb. 237 * Explicitly tells TCP internals to avoid collapsing on later 238 * queue management operation, to avoid breaking the ext <-> 239 * SSN association set here 240 */ 241 can_collapse = (size_goal - skb->len > 0) && 242 mptcp_skb_can_collapse_to(msk, skb, mpext); 243 if (!can_collapse) 244 TCP_SKB_CB(skb)->eor = 1; 245 else 246 avail_size = size_goal - skb->len; 247 } 248 psize = min_t(size_t, pfrag->size - pfrag->offset, avail_size); 249 250 /* Copy to page */ 251 pr_debug("left=%zu", msg_data_left(msg)); 252 psize = copy_page_from_iter(pfrag->page, pfrag->offset, 253 min_t(size_t, msg_data_left(msg), psize), 254 &msg->msg_iter); 255 pr_debug("left=%zu", msg_data_left(msg)); 256 if (!psize) 257 return -EINVAL; 258 259 /* tell the TCP stack to delay the push so that we can safely 260 * access the skb after the sendpages call 261 */ 262 ret = do_tcp_sendpages(ssk, pfrag->page, pfrag->offset, psize, 263 msg->msg_flags | MSG_SENDPAGE_NOTLAST); 264 if (ret <= 0) 265 return ret; 266 if (unlikely(ret < psize)) 267 iov_iter_revert(&msg->msg_iter, psize - ret); 268 269 /* if the tail skb extension is still the cached one, collapsing 270 * really happened. Note: we can't check for 'same skb' as the sk_buff 271 * hdr on tail can be transmitted, freed and re-allocated by the 272 * do_tcp_sendpages() call 273 */ 274 tail = tcp_write_queue_tail(ssk); 275 if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) { 276 WARN_ON_ONCE(!can_collapse); 277 mpext->data_len += ret; 278 goto out; 279 } 280 281 skb = tcp_write_queue_tail(ssk); 282 mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext); 283 msk->cached_ext = NULL; 284 285 memset(mpext, 0, sizeof(*mpext)); 286 mpext->data_seq = msk->write_seq; 287 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; 288 mpext->data_len = ret; 289 mpext->use_map = 1; 290 mpext->dsn64 = 1; 291 292 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", 293 mpext->data_seq, mpext->subflow_seq, mpext->data_len, 294 mpext->dsn64); 295 296 out: 297 pfrag->offset += ret; 298 msk->write_seq += ret; 299 mptcp_subflow_ctx(ssk)->rel_write_seq += ret; 300 301 return ret; 302 } 303 304 static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk) 305 { 306 struct socket *sock; 307 308 if (likely(sk_stream_is_writeable(ssk))) 309 return; 310 311 sock = READ_ONCE(ssk->sk_socket); 312 313 if (sock) { 314 clear_bit(MPTCP_SEND_SPACE, &msk->flags); 315 smp_mb__after_atomic(); 316 /* set NOSPACE only after clearing SEND_SPACE flag */ 317 set_bit(SOCK_NOSPACE, &sock->flags); 318 } 319 } 320 321 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 322 { 323 int mss_now = 0, size_goal = 0, ret = 0; 324 struct mptcp_sock *msk = mptcp_sk(sk); 325 struct socket *ssock; 326 size_t copied = 0; 327 struct sock *ssk; 328 long timeo; 329 330 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 331 return -EOPNOTSUPP; 332 333 lock_sock(sk); 334 ssock = __mptcp_tcp_fallback(msk); 335 if (unlikely(ssock)) { 336 fallback: 337 pr_debug("fallback passthrough"); 338 ret = sock_sendmsg(ssock, msg); 339 return ret >= 0 ? ret + copied : (copied ? copied : ret); 340 } 341 342 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 343 344 ssk = mptcp_subflow_get(msk); 345 if (!ssk) { 346 release_sock(sk); 347 return -ENOTCONN; 348 } 349 350 pr_debug("conn_list->subflow=%p", ssk); 351 352 lock_sock(ssk); 353 while (msg_data_left(msg)) { 354 ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo, &mss_now, 355 &size_goal); 356 if (ret < 0) 357 break; 358 if (ret == 0 && unlikely(__mptcp_needs_tcp_fallback(msk))) { 359 release_sock(ssk); 360 ssock = __mptcp_tcp_fallback(msk); 361 goto fallback; 362 } 363 364 copied += ret; 365 } 366 367 if (copied) { 368 ret = copied; 369 tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, 370 size_goal); 371 } 372 373 ssk_check_wmem(msk, ssk); 374 release_sock(ssk); 375 release_sock(sk); 376 return ret; 377 } 378 379 int mptcp_read_actor(read_descriptor_t *desc, struct sk_buff *skb, 380 unsigned int offset, size_t len) 381 { 382 struct mptcp_read_arg *arg = desc->arg.data; 383 size_t copy_len; 384 385 copy_len = min(desc->count, len); 386 387 if (likely(arg->msg)) { 388 int err; 389 390 err = skb_copy_datagram_msg(skb, offset, arg->msg, copy_len); 391 if (err) { 392 pr_debug("error path"); 393 desc->error = err; 394 return err; 395 } 396 } else { 397 pr_debug("Flushing skb payload"); 398 } 399 400 desc->count -= copy_len; 401 402 pr_debug("consumed %zu bytes, %zu left", copy_len, desc->count); 403 return copy_len; 404 } 405 406 static void mptcp_wait_data(struct sock *sk, long *timeo) 407 { 408 DEFINE_WAIT_FUNC(wait, woken_wake_function); 409 struct mptcp_sock *msk = mptcp_sk(sk); 410 411 add_wait_queue(sk_sleep(sk), &wait); 412 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 413 414 sk_wait_event(sk, timeo, 415 test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait); 416 417 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 418 remove_wait_queue(sk_sleep(sk), &wait); 419 } 420 421 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 422 int nonblock, int flags, int *addr_len) 423 { 424 struct mptcp_sock *msk = mptcp_sk(sk); 425 struct mptcp_subflow_context *subflow; 426 bool more_data_avail = false; 427 struct mptcp_read_arg arg; 428 read_descriptor_t desc; 429 bool wait_data = false; 430 struct socket *ssock; 431 struct tcp_sock *tp; 432 bool done = false; 433 struct sock *ssk; 434 int copied = 0; 435 int target; 436 long timeo; 437 438 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT)) 439 return -EOPNOTSUPP; 440 441 lock_sock(sk); 442 ssock = __mptcp_tcp_fallback(msk); 443 if (unlikely(ssock)) { 444 fallback: 445 pr_debug("fallback-read subflow=%p", 446 mptcp_subflow_ctx(ssock->sk)); 447 copied = sock_recvmsg(ssock, msg, flags); 448 return copied; 449 } 450 451 arg.msg = msg; 452 desc.arg.data = &arg; 453 desc.error = 0; 454 455 timeo = sock_rcvtimeo(sk, nonblock); 456 457 len = min_t(size_t, len, INT_MAX); 458 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 459 460 while (!done) { 461 u32 map_remaining; 462 int bytes_read; 463 464 ssk = mptcp_subflow_recv_lookup(msk); 465 pr_debug("msk=%p ssk=%p", msk, ssk); 466 if (!ssk) 467 goto wait_for_data; 468 469 subflow = mptcp_subflow_ctx(ssk); 470 tp = tcp_sk(ssk); 471 472 lock_sock(ssk); 473 do { 474 /* try to read as much data as available */ 475 map_remaining = subflow->map_data_len - 476 mptcp_subflow_get_map_offset(subflow); 477 desc.count = min_t(size_t, len - copied, map_remaining); 478 pr_debug("reading %zu bytes, copied %d", desc.count, 479 copied); 480 bytes_read = tcp_read_sock(ssk, &desc, 481 mptcp_read_actor); 482 if (bytes_read < 0) { 483 if (!copied) 484 copied = bytes_read; 485 done = true; 486 goto next; 487 } 488 489 pr_debug("msk ack_seq=%llx -> %llx", msk->ack_seq, 490 msk->ack_seq + bytes_read); 491 msk->ack_seq += bytes_read; 492 copied += bytes_read; 493 if (copied >= len) { 494 done = true; 495 goto next; 496 } 497 if (tp->urg_data && tp->urg_seq == tp->copied_seq) { 498 pr_err("Urgent data present, cannot proceed"); 499 done = true; 500 goto next; 501 } 502 next: 503 more_data_avail = mptcp_subflow_data_available(ssk); 504 } while (more_data_avail && !done); 505 release_sock(ssk); 506 continue; 507 508 wait_for_data: 509 more_data_avail = false; 510 511 /* only the master socket status is relevant here. The exit 512 * conditions mirror closely tcp_recvmsg() 513 */ 514 if (copied >= target) 515 break; 516 517 if (copied) { 518 if (sk->sk_err || 519 sk->sk_state == TCP_CLOSE || 520 (sk->sk_shutdown & RCV_SHUTDOWN) || 521 !timeo || 522 signal_pending(current)) 523 break; 524 } else { 525 if (sk->sk_err) { 526 copied = sock_error(sk); 527 break; 528 } 529 530 if (sk->sk_shutdown & RCV_SHUTDOWN) 531 break; 532 533 if (sk->sk_state == TCP_CLOSE) { 534 copied = -ENOTCONN; 535 break; 536 } 537 538 if (!timeo) { 539 copied = -EAGAIN; 540 break; 541 } 542 543 if (signal_pending(current)) { 544 copied = sock_intr_errno(timeo); 545 break; 546 } 547 } 548 549 pr_debug("block timeout %ld", timeo); 550 wait_data = true; 551 mptcp_wait_data(sk, &timeo); 552 if (unlikely(__mptcp_tcp_fallback(msk))) 553 goto fallback; 554 } 555 556 if (more_data_avail) { 557 if (!test_bit(MPTCP_DATA_READY, &msk->flags)) 558 set_bit(MPTCP_DATA_READY, &msk->flags); 559 } else if (!wait_data) { 560 clear_bit(MPTCP_DATA_READY, &msk->flags); 561 562 /* .. race-breaker: ssk might get new data after last 563 * data_available() returns false. 564 */ 565 ssk = mptcp_subflow_recv_lookup(msk); 566 if (unlikely(ssk)) 567 set_bit(MPTCP_DATA_READY, &msk->flags); 568 } 569 570 release_sock(sk); 571 return copied; 572 } 573 574 /* subflow sockets can be either outgoing (connect) or incoming 575 * (accept). 576 * 577 * Outgoing subflows use in-kernel sockets. 578 * Incoming subflows do not have their own 'struct socket' allocated, 579 * so we need to use tcp_close() after detaching them from the mptcp 580 * parent socket. 581 */ 582 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, 583 struct mptcp_subflow_context *subflow, 584 long timeout) 585 { 586 struct socket *sock = READ_ONCE(ssk->sk_socket); 587 588 list_del(&subflow->node); 589 590 if (sock && sock != sk->sk_socket) { 591 /* outgoing subflow */ 592 sock_release(sock); 593 } else { 594 /* incoming subflow */ 595 tcp_close(ssk, timeout); 596 } 597 } 598 599 static int __mptcp_init_sock(struct sock *sk) 600 { 601 struct mptcp_sock *msk = mptcp_sk(sk); 602 603 INIT_LIST_HEAD(&msk->conn_list); 604 __set_bit(MPTCP_SEND_SPACE, &msk->flags); 605 606 msk->first = NULL; 607 608 return 0; 609 } 610 611 static int mptcp_init_sock(struct sock *sk) 612 { 613 if (!mptcp_is_enabled(sock_net(sk))) 614 return -ENOPROTOOPT; 615 616 return __mptcp_init_sock(sk); 617 } 618 619 static void mptcp_subflow_shutdown(struct sock *ssk, int how) 620 { 621 lock_sock(ssk); 622 623 switch (ssk->sk_state) { 624 case TCP_LISTEN: 625 if (!(how & RCV_SHUTDOWN)) 626 break; 627 /* fall through */ 628 case TCP_SYN_SENT: 629 tcp_disconnect(ssk, O_NONBLOCK); 630 break; 631 default: 632 ssk->sk_shutdown |= how; 633 tcp_shutdown(ssk, how); 634 break; 635 } 636 637 /* Wake up anyone sleeping in poll. */ 638 ssk->sk_state_change(ssk); 639 release_sock(ssk); 640 } 641 642 /* Called with msk lock held, releases such lock before returning */ 643 static void __mptcp_close(struct sock *sk, long timeout) 644 { 645 struct mptcp_subflow_context *subflow, *tmp; 646 struct mptcp_sock *msk = mptcp_sk(sk); 647 LIST_HEAD(conn_list); 648 649 mptcp_token_destroy(msk->token); 650 inet_sk_state_store(sk, TCP_CLOSE); 651 652 list_splice_init(&msk->conn_list, &conn_list); 653 654 release_sock(sk); 655 656 list_for_each_entry_safe(subflow, tmp, &conn_list, node) { 657 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 658 659 __mptcp_close_ssk(sk, ssk, subflow, timeout); 660 } 661 662 sk_common_release(sk); 663 } 664 665 static void mptcp_close(struct sock *sk, long timeout) 666 { 667 lock_sock(sk); 668 __mptcp_close(sk, timeout); 669 } 670 671 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) 672 { 673 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 674 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); 675 struct ipv6_pinfo *msk6 = inet6_sk(msk); 676 677 msk->sk_v6_daddr = ssk->sk_v6_daddr; 678 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; 679 680 if (msk6 && ssk6) { 681 msk6->saddr = ssk6->saddr; 682 msk6->flow_label = ssk6->flow_label; 683 } 684 #endif 685 686 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; 687 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; 688 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; 689 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; 690 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; 691 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; 692 } 693 694 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, 695 bool kern) 696 { 697 struct mptcp_sock *msk = mptcp_sk(sk); 698 struct socket *listener; 699 struct sock *newsk; 700 701 listener = __mptcp_nmpc_socket(msk); 702 if (WARN_ON_ONCE(!listener)) { 703 *err = -EINVAL; 704 return NULL; 705 } 706 707 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk)); 708 newsk = inet_csk_accept(listener->sk, flags, err, kern); 709 if (!newsk) 710 return NULL; 711 712 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk)); 713 714 if (sk_is_mptcp(newsk)) { 715 struct mptcp_subflow_context *subflow; 716 struct sock *new_mptcp_sock; 717 struct sock *ssk = newsk; 718 u64 ack_seq; 719 720 subflow = mptcp_subflow_ctx(newsk); 721 lock_sock(sk); 722 723 local_bh_disable(); 724 new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC); 725 if (!new_mptcp_sock) { 726 *err = -ENOBUFS; 727 local_bh_enable(); 728 release_sock(sk); 729 mptcp_subflow_shutdown(newsk, SHUT_RDWR + 1); 730 tcp_close(newsk, 0); 731 return NULL; 732 } 733 734 __mptcp_init_sock(new_mptcp_sock); 735 736 msk = mptcp_sk(new_mptcp_sock); 737 msk->local_key = subflow->local_key; 738 msk->token = subflow->token; 739 msk->subflow = NULL; 740 msk->first = newsk; 741 742 mptcp_token_update_accept(newsk, new_mptcp_sock); 743 744 msk->write_seq = subflow->idsn + 1; 745 if (subflow->can_ack) { 746 msk->can_ack = true; 747 msk->remote_key = subflow->remote_key; 748 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); 749 ack_seq++; 750 msk->ack_seq = ack_seq; 751 } 752 newsk = new_mptcp_sock; 753 mptcp_copy_inaddrs(newsk, ssk); 754 list_add(&subflow->node, &msk->conn_list); 755 756 /* will be fully established at mptcp_stream_accept() 757 * completion. 758 */ 759 inet_sk_state_store(new_mptcp_sock, TCP_SYN_RECV); 760 bh_unlock_sock(new_mptcp_sock); 761 local_bh_enable(); 762 release_sock(sk); 763 764 /* the subflow can already receive packet, avoid racing with 765 * the receive path and process the pending ones 766 */ 767 lock_sock(ssk); 768 subflow->rel_write_seq = 1; 769 subflow->tcp_sock = ssk; 770 subflow->conn = new_mptcp_sock; 771 if (unlikely(!skb_queue_empty(&ssk->sk_receive_queue))) 772 mptcp_subflow_data_available(ssk); 773 release_sock(ssk); 774 } 775 776 return newsk; 777 } 778 779 static void mptcp_destroy(struct sock *sk) 780 { 781 struct mptcp_sock *msk = mptcp_sk(sk); 782 783 if (msk->cached_ext) 784 __skb_ext_put(msk->cached_ext); 785 } 786 787 static int mptcp_setsockopt(struct sock *sk, int level, int optname, 788 char __user *optval, unsigned int optlen) 789 { 790 struct mptcp_sock *msk = mptcp_sk(sk); 791 int ret = -EOPNOTSUPP; 792 struct socket *ssock; 793 struct sock *ssk; 794 795 pr_debug("msk=%p", msk); 796 797 /* @@ the meaning of setsockopt() when the socket is connected and 798 * there are multiple subflows is not defined. 799 */ 800 lock_sock(sk); 801 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE); 802 if (IS_ERR(ssock)) { 803 release_sock(sk); 804 return ret; 805 } 806 807 ssk = ssock->sk; 808 sock_hold(ssk); 809 release_sock(sk); 810 811 ret = tcp_setsockopt(ssk, level, optname, optval, optlen); 812 sock_put(ssk); 813 814 return ret; 815 } 816 817 static int mptcp_getsockopt(struct sock *sk, int level, int optname, 818 char __user *optval, int __user *option) 819 { 820 struct mptcp_sock *msk = mptcp_sk(sk); 821 int ret = -EOPNOTSUPP; 822 struct socket *ssock; 823 struct sock *ssk; 824 825 pr_debug("msk=%p", msk); 826 827 /* @@ the meaning of getsockopt() when the socket is connected and 828 * there are multiple subflows is not defined. 829 */ 830 lock_sock(sk); 831 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE); 832 if (IS_ERR(ssock)) { 833 release_sock(sk); 834 return ret; 835 } 836 837 ssk = ssock->sk; 838 sock_hold(ssk); 839 release_sock(sk); 840 841 ret = tcp_getsockopt(ssk, level, optname, optval, option); 842 sock_put(ssk); 843 844 return ret; 845 } 846 847 static int mptcp_get_port(struct sock *sk, unsigned short snum) 848 { 849 struct mptcp_sock *msk = mptcp_sk(sk); 850 struct socket *ssock; 851 852 ssock = __mptcp_nmpc_socket(msk); 853 pr_debug("msk=%p, subflow=%p", msk, ssock); 854 if (WARN_ON_ONCE(!ssock)) 855 return -EINVAL; 856 857 return inet_csk_get_port(ssock->sk, snum); 858 } 859 860 void mptcp_finish_connect(struct sock *ssk) 861 { 862 struct mptcp_subflow_context *subflow; 863 struct mptcp_sock *msk; 864 struct sock *sk; 865 u64 ack_seq; 866 867 subflow = mptcp_subflow_ctx(ssk); 868 869 if (!subflow->mp_capable) 870 return; 871 872 sk = subflow->conn; 873 msk = mptcp_sk(sk); 874 875 pr_debug("msk=%p, token=%u", sk, subflow->token); 876 877 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); 878 ack_seq++; 879 subflow->map_seq = ack_seq; 880 subflow->map_subflow_seq = 1; 881 subflow->rel_write_seq = 1; 882 883 /* the socket is not connected yet, no msk/subflow ops can access/race 884 * accessing the field below 885 */ 886 WRITE_ONCE(msk->remote_key, subflow->remote_key); 887 WRITE_ONCE(msk->local_key, subflow->local_key); 888 WRITE_ONCE(msk->token, subflow->token); 889 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 890 WRITE_ONCE(msk->ack_seq, ack_seq); 891 WRITE_ONCE(msk->can_ack, 1); 892 } 893 894 static void mptcp_sock_graft(struct sock *sk, struct socket *parent) 895 { 896 write_lock_bh(&sk->sk_callback_lock); 897 rcu_assign_pointer(sk->sk_wq, &parent->wq); 898 sk_set_socket(sk, parent); 899 sk->sk_uid = SOCK_INODE(parent)->i_uid; 900 write_unlock_bh(&sk->sk_callback_lock); 901 } 902 903 static bool mptcp_memory_free(const struct sock *sk, int wake) 904 { 905 struct mptcp_sock *msk = mptcp_sk(sk); 906 907 return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true; 908 } 909 910 static struct proto mptcp_prot = { 911 .name = "MPTCP", 912 .owner = THIS_MODULE, 913 .init = mptcp_init_sock, 914 .close = mptcp_close, 915 .accept = mptcp_accept, 916 .setsockopt = mptcp_setsockopt, 917 .getsockopt = mptcp_getsockopt, 918 .shutdown = tcp_shutdown, 919 .destroy = mptcp_destroy, 920 .sendmsg = mptcp_sendmsg, 921 .recvmsg = mptcp_recvmsg, 922 .hash = inet_hash, 923 .unhash = inet_unhash, 924 .get_port = mptcp_get_port, 925 .stream_memory_free = mptcp_memory_free, 926 .obj_size = sizeof(struct mptcp_sock), 927 .no_autobind = true, 928 }; 929 930 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 931 { 932 struct mptcp_sock *msk = mptcp_sk(sock->sk); 933 struct socket *ssock; 934 int err; 935 936 lock_sock(sock->sk); 937 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE); 938 if (IS_ERR(ssock)) { 939 err = PTR_ERR(ssock); 940 goto unlock; 941 } 942 943 err = ssock->ops->bind(ssock, uaddr, addr_len); 944 if (!err) 945 mptcp_copy_inaddrs(sock->sk, ssock->sk); 946 947 unlock: 948 release_sock(sock->sk); 949 return err; 950 } 951 952 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, 953 int addr_len, int flags) 954 { 955 struct mptcp_sock *msk = mptcp_sk(sock->sk); 956 struct socket *ssock; 957 int err; 958 959 lock_sock(sock->sk); 960 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT); 961 if (IS_ERR(ssock)) { 962 err = PTR_ERR(ssock); 963 goto unlock; 964 } 965 966 #ifdef CONFIG_TCP_MD5SIG 967 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 968 * TCP option space. 969 */ 970 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) 971 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0; 972 #endif 973 974 err = ssock->ops->connect(ssock, uaddr, addr_len, flags); 975 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 976 mptcp_copy_inaddrs(sock->sk, ssock->sk); 977 978 unlock: 979 release_sock(sock->sk); 980 return err; 981 } 982 983 static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr, 984 int peer) 985 { 986 if (sock->sk->sk_prot == &tcp_prot) { 987 /* we are being invoked from __sys_accept4, after 988 * mptcp_accept() has just accepted a non-mp-capable 989 * flow: sk is a tcp_sk, not an mptcp one. 990 * 991 * Hand the socket over to tcp so all further socket ops 992 * bypass mptcp. 993 */ 994 sock->ops = &inet_stream_ops; 995 } 996 997 return inet_getname(sock, uaddr, peer); 998 } 999 1000 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1001 static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr, 1002 int peer) 1003 { 1004 if (sock->sk->sk_prot == &tcpv6_prot) { 1005 /* we are being invoked from __sys_accept4 after 1006 * mptcp_accept() has accepted a non-mp-capable 1007 * subflow: sk is a tcp_sk, not mptcp. 1008 * 1009 * Hand the socket over to tcp so all further 1010 * socket ops bypass mptcp. 1011 */ 1012 sock->ops = &inet6_stream_ops; 1013 } 1014 1015 return inet6_getname(sock, uaddr, peer); 1016 } 1017 #endif 1018 1019 static int mptcp_listen(struct socket *sock, int backlog) 1020 { 1021 struct mptcp_sock *msk = mptcp_sk(sock->sk); 1022 struct socket *ssock; 1023 int err; 1024 1025 pr_debug("msk=%p", msk); 1026 1027 lock_sock(sock->sk); 1028 ssock = __mptcp_socket_create(msk, TCP_LISTEN); 1029 if (IS_ERR(ssock)) { 1030 err = PTR_ERR(ssock); 1031 goto unlock; 1032 } 1033 1034 err = ssock->ops->listen(ssock, backlog); 1035 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 1036 if (!err) 1037 mptcp_copy_inaddrs(sock->sk, ssock->sk); 1038 1039 unlock: 1040 release_sock(sock->sk); 1041 return err; 1042 } 1043 1044 static bool is_tcp_proto(const struct proto *p) 1045 { 1046 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1047 return p == &tcp_prot || p == &tcpv6_prot; 1048 #else 1049 return p == &tcp_prot; 1050 #endif 1051 } 1052 1053 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, 1054 int flags, bool kern) 1055 { 1056 struct mptcp_sock *msk = mptcp_sk(sock->sk); 1057 struct socket *ssock; 1058 int err; 1059 1060 pr_debug("msk=%p", msk); 1061 1062 lock_sock(sock->sk); 1063 if (sock->sk->sk_state != TCP_LISTEN) 1064 goto unlock_fail; 1065 1066 ssock = __mptcp_nmpc_socket(msk); 1067 if (!ssock) 1068 goto unlock_fail; 1069 1070 sock_hold(ssock->sk); 1071 release_sock(sock->sk); 1072 1073 err = ssock->ops->accept(sock, newsock, flags, kern); 1074 if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) { 1075 struct mptcp_sock *msk = mptcp_sk(newsock->sk); 1076 struct mptcp_subflow_context *subflow; 1077 1078 /* set ssk->sk_socket of accept()ed flows to mptcp socket. 1079 * This is needed so NOSPACE flag can be set from tcp stack. 1080 */ 1081 list_for_each_entry(subflow, &msk->conn_list, node) { 1082 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 1083 1084 if (!ssk->sk_socket) 1085 mptcp_sock_graft(ssk, newsock); 1086 } 1087 1088 inet_sk_state_store(newsock->sk, TCP_ESTABLISHED); 1089 } 1090 1091 sock_put(ssock->sk); 1092 return err; 1093 1094 unlock_fail: 1095 release_sock(sock->sk); 1096 return -EINVAL; 1097 } 1098 1099 static __poll_t mptcp_poll(struct file *file, struct socket *sock, 1100 struct poll_table_struct *wait) 1101 { 1102 struct sock *sk = sock->sk; 1103 struct mptcp_sock *msk; 1104 struct socket *ssock; 1105 __poll_t mask = 0; 1106 1107 msk = mptcp_sk(sk); 1108 lock_sock(sk); 1109 ssock = __mptcp_nmpc_socket(msk); 1110 if (ssock) { 1111 mask = ssock->ops->poll(file, ssock, wait); 1112 release_sock(sk); 1113 return mask; 1114 } 1115 1116 release_sock(sk); 1117 sock_poll_wait(file, sock, wait); 1118 lock_sock(sk); 1119 ssock = __mptcp_tcp_fallback(msk); 1120 if (unlikely(ssock)) 1121 return ssock->ops->poll(file, ssock, NULL); 1122 1123 if (test_bit(MPTCP_DATA_READY, &msk->flags)) 1124 mask = EPOLLIN | EPOLLRDNORM; 1125 if (sk_stream_is_writeable(sk) && 1126 test_bit(MPTCP_SEND_SPACE, &msk->flags)) 1127 mask |= EPOLLOUT | EPOLLWRNORM; 1128 if (sk->sk_shutdown & RCV_SHUTDOWN) 1129 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 1130 1131 release_sock(sk); 1132 1133 return mask; 1134 } 1135 1136 static int mptcp_shutdown(struct socket *sock, int how) 1137 { 1138 struct mptcp_sock *msk = mptcp_sk(sock->sk); 1139 struct mptcp_subflow_context *subflow; 1140 int ret = 0; 1141 1142 pr_debug("sk=%p, how=%d", msk, how); 1143 1144 lock_sock(sock->sk); 1145 1146 if (how == SHUT_WR || how == SHUT_RDWR) 1147 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); 1148 1149 how++; 1150 1151 if ((how & ~SHUTDOWN_MASK) || !how) { 1152 ret = -EINVAL; 1153 goto out_unlock; 1154 } 1155 1156 if (sock->state == SS_CONNECTING) { 1157 if ((1 << sock->sk->sk_state) & 1158 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)) 1159 sock->state = SS_DISCONNECTING; 1160 else 1161 sock->state = SS_CONNECTED; 1162 } 1163 1164 mptcp_for_each_subflow(msk, subflow) { 1165 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); 1166 1167 mptcp_subflow_shutdown(tcp_sk, how); 1168 } 1169 1170 out_unlock: 1171 release_sock(sock->sk); 1172 1173 return ret; 1174 } 1175 1176 static const struct proto_ops mptcp_stream_ops = { 1177 .family = PF_INET, 1178 .owner = THIS_MODULE, 1179 .release = inet_release, 1180 .bind = mptcp_bind, 1181 .connect = mptcp_stream_connect, 1182 .socketpair = sock_no_socketpair, 1183 .accept = mptcp_stream_accept, 1184 .getname = mptcp_v4_getname, 1185 .poll = mptcp_poll, 1186 .ioctl = inet_ioctl, 1187 .gettstamp = sock_gettstamp, 1188 .listen = mptcp_listen, 1189 .shutdown = mptcp_shutdown, 1190 .setsockopt = sock_common_setsockopt, 1191 .getsockopt = sock_common_getsockopt, 1192 .sendmsg = inet_sendmsg, 1193 .recvmsg = inet_recvmsg, 1194 .mmap = sock_no_mmap, 1195 .sendpage = inet_sendpage, 1196 #ifdef CONFIG_COMPAT 1197 .compat_setsockopt = compat_sock_common_setsockopt, 1198 .compat_getsockopt = compat_sock_common_getsockopt, 1199 #endif 1200 }; 1201 1202 static struct inet_protosw mptcp_protosw = { 1203 .type = SOCK_STREAM, 1204 .protocol = IPPROTO_MPTCP, 1205 .prot = &mptcp_prot, 1206 .ops = &mptcp_stream_ops, 1207 .flags = INET_PROTOSW_ICSK, 1208 }; 1209 1210 void mptcp_proto_init(void) 1211 { 1212 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; 1213 1214 mptcp_subflow_init(); 1215 1216 if (proto_register(&mptcp_prot, 1) != 0) 1217 panic("Failed to register MPTCP proto.\n"); 1218 1219 inet_register_protosw(&mptcp_protosw); 1220 } 1221 1222 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1223 static const struct proto_ops mptcp_v6_stream_ops = { 1224 .family = PF_INET6, 1225 .owner = THIS_MODULE, 1226 .release = inet6_release, 1227 .bind = mptcp_bind, 1228 .connect = mptcp_stream_connect, 1229 .socketpair = sock_no_socketpair, 1230 .accept = mptcp_stream_accept, 1231 .getname = mptcp_v6_getname, 1232 .poll = mptcp_poll, 1233 .ioctl = inet6_ioctl, 1234 .gettstamp = sock_gettstamp, 1235 .listen = mptcp_listen, 1236 .shutdown = mptcp_shutdown, 1237 .setsockopt = sock_common_setsockopt, 1238 .getsockopt = sock_common_getsockopt, 1239 .sendmsg = inet6_sendmsg, 1240 .recvmsg = inet6_recvmsg, 1241 .mmap = sock_no_mmap, 1242 .sendpage = inet_sendpage, 1243 #ifdef CONFIG_COMPAT 1244 .compat_setsockopt = compat_sock_common_setsockopt, 1245 .compat_getsockopt = compat_sock_common_getsockopt, 1246 #endif 1247 }; 1248 1249 static struct proto mptcp_v6_prot; 1250 1251 static void mptcp_v6_destroy(struct sock *sk) 1252 { 1253 mptcp_destroy(sk); 1254 inet6_destroy_sock(sk); 1255 } 1256 1257 static struct inet_protosw mptcp_v6_protosw = { 1258 .type = SOCK_STREAM, 1259 .protocol = IPPROTO_MPTCP, 1260 .prot = &mptcp_v6_prot, 1261 .ops = &mptcp_v6_stream_ops, 1262 .flags = INET_PROTOSW_ICSK, 1263 }; 1264 1265 int mptcp_proto_v6_init(void) 1266 { 1267 int err; 1268 1269 mptcp_v6_prot = mptcp_prot; 1270 strcpy(mptcp_v6_prot.name, "MPTCPv6"); 1271 mptcp_v6_prot.slab = NULL; 1272 mptcp_v6_prot.destroy = mptcp_v6_destroy; 1273 mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) + 1274 sizeof(struct ipv6_pinfo); 1275 1276 err = proto_register(&mptcp_v6_prot, 1); 1277 if (err) 1278 return err; 1279 1280 err = inet6_register_protosw(&mptcp_v6_protosw); 1281 if (err) 1282 proto_unregister(&mptcp_v6_prot); 1283 1284 return err; 1285 } 1286 #endif 1287