1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/sched/signal.h> 13 #include <linux/atomic.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #include <net/tcp.h> 19 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 20 #include <net/transp_v6.h> 21 #endif 22 #include <net/mptcp.h> 23 #include "protocol.h" 24 25 #define MPTCP_SAME_STATE TCP_MAX_STATES 26 27 static void __mptcp_close(struct sock *sk, long timeout); 28 29 static const struct proto_ops *tcp_proto_ops(struct sock *sk) 30 { 31 #if IS_ENABLED(CONFIG_IPV6) 32 if (sk->sk_family == AF_INET6) 33 return &inet6_stream_ops; 34 #endif 35 return &inet_stream_ops; 36 } 37 38 /* MP_CAPABLE handshake failed, convert msk to plain tcp, replacing 39 * socket->sk and stream ops and destroying msk 40 * return the msk socket, as we can't access msk anymore after this function 41 * completes 42 * Called with msk lock held, releases such lock before returning 43 */ 44 static struct socket *__mptcp_fallback_to_tcp(struct mptcp_sock *msk, 45 struct sock *ssk) 46 { 47 struct mptcp_subflow_context *subflow; 48 struct socket *sock; 49 struct sock *sk; 50 51 sk = (struct sock *)msk; 52 sock = sk->sk_socket; 53 subflow = mptcp_subflow_ctx(ssk); 54 55 /* detach the msk socket */ 56 list_del_init(&subflow->node); 57 sock_orphan(sk); 58 sock->sk = NULL; 59 60 /* socket is now TCP */ 61 lock_sock(ssk); 62 sock_graft(ssk, sock); 63 if (subflow->conn) { 64 /* We can't release the ULP data on a live socket, 65 * restore the tcp callback 66 */ 67 mptcp_subflow_tcp_fallback(ssk, subflow); 68 sock_put(subflow->conn); 69 subflow->conn = NULL; 70 } 71 release_sock(ssk); 72 sock->ops = tcp_proto_ops(ssk); 73 74 /* destroy the left-over msk sock */ 75 __mptcp_close(sk, 0); 76 return sock; 77 } 78 79 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not 80 * completed yet or has failed, return the subflow socket. 81 * Otherwise return NULL. 82 */ 83 static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) 84 { 85 if (!msk->subflow || READ_ONCE(msk->can_ack)) 86 return NULL; 87 88 return msk->subflow; 89 } 90 91 static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk) 92 { 93 return msk->first && !sk_is_mptcp(msk->first); 94 } 95 96 /* if the mp_capable handshake has failed, it fallbacks msk to plain TCP, 97 * releases the socket lock and returns a reference to the now TCP socket. 98 * Otherwise returns NULL 99 */ 100 static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) 101 { 102 sock_owned_by_me((const struct sock *)msk); 103 104 if (likely(!__mptcp_needs_tcp_fallback(msk))) 105 return NULL; 106 107 if (msk->subflow) { 108 /* the first subflow is an active connection, discart the 109 * paired socket 110 */ 111 msk->subflow->sk = NULL; 112 sock_release(msk->subflow); 113 msk->subflow = NULL; 114 } 115 116 return __mptcp_fallback_to_tcp(msk, msk->first); 117 } 118 119 static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk) 120 { 121 return !msk->first; 122 } 123 124 static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state) 125 { 126 struct mptcp_subflow_context *subflow; 127 struct sock *sk = (struct sock *)msk; 128 struct socket *ssock; 129 int err; 130 131 ssock = __mptcp_nmpc_socket(msk); 132 if (ssock) 133 goto set_state; 134 135 if (!__mptcp_can_create_subflow(msk)) 136 return ERR_PTR(-EINVAL); 137 138 err = mptcp_subflow_create_socket(sk, &ssock); 139 if (err) 140 return ERR_PTR(err); 141 142 msk->first = ssock->sk; 143 msk->subflow = ssock; 144 subflow = mptcp_subflow_ctx(ssock->sk); 145 list_add(&subflow->node, &msk->conn_list); 146 subflow->request_mptcp = 1; 147 148 set_state: 149 if (state != MPTCP_SAME_STATE) 150 inet_sk_state_store(sk, state); 151 return ssock; 152 } 153 154 static struct sock *mptcp_subflow_get(const struct mptcp_sock *msk) 155 { 156 struct mptcp_subflow_context *subflow; 157 158 sock_owned_by_me((const struct sock *)msk); 159 160 mptcp_for_each_subflow(msk, subflow) { 161 return mptcp_subflow_tcp_sock(subflow); 162 } 163 164 return NULL; 165 } 166 167 static bool mptcp_ext_cache_refill(struct mptcp_sock *msk) 168 { 169 if (!msk->cached_ext) 170 msk->cached_ext = __skb_ext_alloc(); 171 172 return !!msk->cached_ext; 173 } 174 175 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) 176 { 177 struct mptcp_subflow_context *subflow; 178 struct sock *sk = (struct sock *)msk; 179 180 sock_owned_by_me(sk); 181 182 mptcp_for_each_subflow(msk, subflow) { 183 if (subflow->data_avail) 184 return mptcp_subflow_tcp_sock(subflow); 185 } 186 187 return NULL; 188 } 189 190 static inline bool mptcp_skb_can_collapse_to(const struct mptcp_sock *msk, 191 const struct sk_buff *skb, 192 const struct mptcp_ext *mpext) 193 { 194 if (!tcp_skb_can_collapse_to(skb)) 195 return false; 196 197 /* can collapse only if MPTCP level sequence is in order */ 198 return mpext && mpext->data_seq + mpext->data_len == msk->write_seq; 199 } 200 201 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, 202 struct msghdr *msg, long *timeo, int *pmss_now, 203 int *ps_goal) 204 { 205 int mss_now, avail_size, size_goal, ret; 206 struct mptcp_sock *msk = mptcp_sk(sk); 207 struct mptcp_ext *mpext = NULL; 208 struct sk_buff *skb, *tail; 209 bool can_collapse = false; 210 struct page_frag *pfrag; 211 size_t psize; 212 213 /* use the mptcp page cache so that we can easily move the data 214 * from one substream to another, but do per subflow memory accounting 215 */ 216 pfrag = sk_page_frag(sk); 217 while (!sk_page_frag_refill(ssk, pfrag) || 218 !mptcp_ext_cache_refill(msk)) { 219 ret = sk_stream_wait_memory(ssk, timeo); 220 if (ret) 221 return ret; 222 if (unlikely(__mptcp_needs_tcp_fallback(msk))) 223 return 0; 224 } 225 226 /* compute copy limit */ 227 mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags); 228 *pmss_now = mss_now; 229 *ps_goal = size_goal; 230 avail_size = size_goal; 231 skb = tcp_write_queue_tail(ssk); 232 if (skb) { 233 mpext = skb_ext_find(skb, SKB_EXT_MPTCP); 234 235 /* Limit the write to the size available in the 236 * current skb, if any, so that we create at most a new skb. 237 * Explicitly tells TCP internals to avoid collapsing on later 238 * queue management operation, to avoid breaking the ext <-> 239 * SSN association set here 240 */ 241 can_collapse = (size_goal - skb->len > 0) && 242 mptcp_skb_can_collapse_to(msk, skb, mpext); 243 if (!can_collapse) 244 TCP_SKB_CB(skb)->eor = 1; 245 else 246 avail_size = size_goal - skb->len; 247 } 248 psize = min_t(size_t, pfrag->size - pfrag->offset, avail_size); 249 250 /* Copy to page */ 251 pr_debug("left=%zu", msg_data_left(msg)); 252 psize = copy_page_from_iter(pfrag->page, pfrag->offset, 253 min_t(size_t, msg_data_left(msg), psize), 254 &msg->msg_iter); 255 pr_debug("left=%zu", msg_data_left(msg)); 256 if (!psize) 257 return -EINVAL; 258 259 /* tell the TCP stack to delay the push so that we can safely 260 * access the skb after the sendpages call 261 */ 262 ret = do_tcp_sendpages(ssk, pfrag->page, pfrag->offset, psize, 263 msg->msg_flags | MSG_SENDPAGE_NOTLAST); 264 if (ret <= 0) 265 return ret; 266 if (unlikely(ret < psize)) 267 iov_iter_revert(&msg->msg_iter, psize - ret); 268 269 /* if the tail skb extension is still the cached one, collapsing 270 * really happened. Note: we can't check for 'same skb' as the sk_buff 271 * hdr on tail can be transmitted, freed and re-allocated by the 272 * do_tcp_sendpages() call 273 */ 274 tail = tcp_write_queue_tail(ssk); 275 if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) { 276 WARN_ON_ONCE(!can_collapse); 277 mpext->data_len += ret; 278 goto out; 279 } 280 281 skb = tcp_write_queue_tail(ssk); 282 mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext); 283 msk->cached_ext = NULL; 284 285 memset(mpext, 0, sizeof(*mpext)); 286 mpext->data_seq = msk->write_seq; 287 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; 288 mpext->data_len = ret; 289 mpext->use_map = 1; 290 mpext->dsn64 = 1; 291 292 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", 293 mpext->data_seq, mpext->subflow_seq, mpext->data_len, 294 mpext->dsn64); 295 296 out: 297 pfrag->offset += ret; 298 msk->write_seq += ret; 299 mptcp_subflow_ctx(ssk)->rel_write_seq += ret; 300 301 return ret; 302 } 303 304 static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk) 305 { 306 struct socket *sock; 307 308 if (likely(sk_stream_is_writeable(ssk))) 309 return; 310 311 sock = READ_ONCE(ssk->sk_socket); 312 313 if (sock) { 314 clear_bit(MPTCP_SEND_SPACE, &msk->flags); 315 smp_mb__after_atomic(); 316 /* set NOSPACE only after clearing SEND_SPACE flag */ 317 set_bit(SOCK_NOSPACE, &sock->flags); 318 } 319 } 320 321 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 322 { 323 int mss_now = 0, size_goal = 0, ret = 0; 324 struct mptcp_sock *msk = mptcp_sk(sk); 325 struct socket *ssock; 326 size_t copied = 0; 327 struct sock *ssk; 328 long timeo; 329 330 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 331 return -EOPNOTSUPP; 332 333 lock_sock(sk); 334 ssock = __mptcp_tcp_fallback(msk); 335 if (unlikely(ssock)) { 336 fallback: 337 pr_debug("fallback passthrough"); 338 ret = sock_sendmsg(ssock, msg); 339 return ret >= 0 ? ret + copied : (copied ? copied : ret); 340 } 341 342 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 343 344 ssk = mptcp_subflow_get(msk); 345 if (!ssk) { 346 release_sock(sk); 347 return -ENOTCONN; 348 } 349 350 pr_debug("conn_list->subflow=%p", ssk); 351 352 lock_sock(ssk); 353 while (msg_data_left(msg)) { 354 ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo, &mss_now, 355 &size_goal); 356 if (ret < 0) 357 break; 358 if (ret == 0 && unlikely(__mptcp_needs_tcp_fallback(msk))) { 359 release_sock(ssk); 360 ssock = __mptcp_tcp_fallback(msk); 361 goto fallback; 362 } 363 364 copied += ret; 365 } 366 367 if (copied) { 368 ret = copied; 369 tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, 370 size_goal); 371 } 372 373 ssk_check_wmem(msk, ssk); 374 release_sock(ssk); 375 release_sock(sk); 376 return ret; 377 } 378 379 int mptcp_read_actor(read_descriptor_t *desc, struct sk_buff *skb, 380 unsigned int offset, size_t len) 381 { 382 struct mptcp_read_arg *arg = desc->arg.data; 383 size_t copy_len; 384 385 copy_len = min(desc->count, len); 386 387 if (likely(arg->msg)) { 388 int err; 389 390 err = skb_copy_datagram_msg(skb, offset, arg->msg, copy_len); 391 if (err) { 392 pr_debug("error path"); 393 desc->error = err; 394 return err; 395 } 396 } else { 397 pr_debug("Flushing skb payload"); 398 } 399 400 desc->count -= copy_len; 401 402 pr_debug("consumed %zu bytes, %zu left", copy_len, desc->count); 403 return copy_len; 404 } 405 406 static void mptcp_wait_data(struct sock *sk, long *timeo) 407 { 408 DEFINE_WAIT_FUNC(wait, woken_wake_function); 409 struct mptcp_sock *msk = mptcp_sk(sk); 410 411 add_wait_queue(sk_sleep(sk), &wait); 412 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 413 414 sk_wait_event(sk, timeo, 415 test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait); 416 417 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 418 remove_wait_queue(sk_sleep(sk), &wait); 419 } 420 421 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 422 int nonblock, int flags, int *addr_len) 423 { 424 struct mptcp_sock *msk = mptcp_sk(sk); 425 struct mptcp_subflow_context *subflow; 426 bool more_data_avail = false; 427 struct mptcp_read_arg arg; 428 read_descriptor_t desc; 429 bool wait_data = false; 430 struct socket *ssock; 431 struct tcp_sock *tp; 432 bool done = false; 433 struct sock *ssk; 434 int copied = 0; 435 int target; 436 long timeo; 437 438 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT)) 439 return -EOPNOTSUPP; 440 441 lock_sock(sk); 442 ssock = __mptcp_tcp_fallback(msk); 443 if (unlikely(ssock)) { 444 fallback: 445 pr_debug("fallback-read subflow=%p", 446 mptcp_subflow_ctx(ssock->sk)); 447 copied = sock_recvmsg(ssock, msg, flags); 448 return copied; 449 } 450 451 arg.msg = msg; 452 desc.arg.data = &arg; 453 desc.error = 0; 454 455 timeo = sock_rcvtimeo(sk, nonblock); 456 457 len = min_t(size_t, len, INT_MAX); 458 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 459 460 while (!done) { 461 u32 map_remaining; 462 int bytes_read; 463 464 ssk = mptcp_subflow_recv_lookup(msk); 465 pr_debug("msk=%p ssk=%p", msk, ssk); 466 if (!ssk) 467 goto wait_for_data; 468 469 subflow = mptcp_subflow_ctx(ssk); 470 tp = tcp_sk(ssk); 471 472 lock_sock(ssk); 473 do { 474 /* try to read as much data as available */ 475 map_remaining = subflow->map_data_len - 476 mptcp_subflow_get_map_offset(subflow); 477 desc.count = min_t(size_t, len - copied, map_remaining); 478 pr_debug("reading %zu bytes, copied %d", desc.count, 479 copied); 480 bytes_read = tcp_read_sock(ssk, &desc, 481 mptcp_read_actor); 482 if (bytes_read < 0) { 483 if (!copied) 484 copied = bytes_read; 485 done = true; 486 goto next; 487 } 488 489 pr_debug("msk ack_seq=%llx -> %llx", msk->ack_seq, 490 msk->ack_seq + bytes_read); 491 msk->ack_seq += bytes_read; 492 copied += bytes_read; 493 if (copied >= len) { 494 done = true; 495 goto next; 496 } 497 if (tp->urg_data && tp->urg_seq == tp->copied_seq) { 498 pr_err("Urgent data present, cannot proceed"); 499 done = true; 500 goto next; 501 } 502 next: 503 more_data_avail = mptcp_subflow_data_available(ssk); 504 } while (more_data_avail && !done); 505 release_sock(ssk); 506 continue; 507 508 wait_for_data: 509 more_data_avail = false; 510 511 /* only the master socket status is relevant here. The exit 512 * conditions mirror closely tcp_recvmsg() 513 */ 514 if (copied >= target) 515 break; 516 517 if (copied) { 518 if (sk->sk_err || 519 sk->sk_state == TCP_CLOSE || 520 (sk->sk_shutdown & RCV_SHUTDOWN) || 521 !timeo || 522 signal_pending(current)) 523 break; 524 } else { 525 if (sk->sk_err) { 526 copied = sock_error(sk); 527 break; 528 } 529 530 if (sk->sk_shutdown & RCV_SHUTDOWN) 531 break; 532 533 if (sk->sk_state == TCP_CLOSE) { 534 copied = -ENOTCONN; 535 break; 536 } 537 538 if (!timeo) { 539 copied = -EAGAIN; 540 break; 541 } 542 543 if (signal_pending(current)) { 544 copied = sock_intr_errno(timeo); 545 break; 546 } 547 } 548 549 pr_debug("block timeout %ld", timeo); 550 wait_data = true; 551 mptcp_wait_data(sk, &timeo); 552 if (unlikely(__mptcp_tcp_fallback(msk))) 553 goto fallback; 554 } 555 556 if (more_data_avail) { 557 if (!test_bit(MPTCP_DATA_READY, &msk->flags)) 558 set_bit(MPTCP_DATA_READY, &msk->flags); 559 } else if (!wait_data) { 560 clear_bit(MPTCP_DATA_READY, &msk->flags); 561 562 /* .. race-breaker: ssk might get new data after last 563 * data_available() returns false. 564 */ 565 ssk = mptcp_subflow_recv_lookup(msk); 566 if (unlikely(ssk)) 567 set_bit(MPTCP_DATA_READY, &msk->flags); 568 } 569 570 release_sock(sk); 571 return copied; 572 } 573 574 /* subflow sockets can be either outgoing (connect) or incoming 575 * (accept). 576 * 577 * Outgoing subflows use in-kernel sockets. 578 * Incoming subflows do not have their own 'struct socket' allocated, 579 * so we need to use tcp_close() after detaching them from the mptcp 580 * parent socket. 581 */ 582 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, 583 struct mptcp_subflow_context *subflow, 584 long timeout) 585 { 586 struct socket *sock = READ_ONCE(ssk->sk_socket); 587 588 list_del(&subflow->node); 589 590 if (sock && sock != sk->sk_socket) { 591 /* outgoing subflow */ 592 sock_release(sock); 593 } else { 594 /* incoming subflow */ 595 tcp_close(ssk, timeout); 596 } 597 } 598 599 static int __mptcp_init_sock(struct sock *sk) 600 { 601 struct mptcp_sock *msk = mptcp_sk(sk); 602 603 INIT_LIST_HEAD(&msk->conn_list); 604 __set_bit(MPTCP_SEND_SPACE, &msk->flags); 605 606 msk->first = NULL; 607 608 return 0; 609 } 610 611 static int mptcp_init_sock(struct sock *sk) 612 { 613 if (!mptcp_is_enabled(sock_net(sk))) 614 return -ENOPROTOOPT; 615 616 return __mptcp_init_sock(sk); 617 } 618 619 static void mptcp_subflow_shutdown(struct sock *ssk, int how) 620 { 621 lock_sock(ssk); 622 623 switch (ssk->sk_state) { 624 case TCP_LISTEN: 625 if (!(how & RCV_SHUTDOWN)) 626 break; 627 /* fall through */ 628 case TCP_SYN_SENT: 629 tcp_disconnect(ssk, O_NONBLOCK); 630 break; 631 default: 632 ssk->sk_shutdown |= how; 633 tcp_shutdown(ssk, how); 634 break; 635 } 636 637 /* Wake up anyone sleeping in poll. */ 638 ssk->sk_state_change(ssk); 639 release_sock(ssk); 640 } 641 642 /* Called with msk lock held, releases such lock before returning */ 643 static void __mptcp_close(struct sock *sk, long timeout) 644 { 645 struct mptcp_subflow_context *subflow, *tmp; 646 struct mptcp_sock *msk = mptcp_sk(sk); 647 648 mptcp_token_destroy(msk->token); 649 inet_sk_state_store(sk, TCP_CLOSE); 650 651 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { 652 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 653 654 __mptcp_close_ssk(sk, ssk, subflow, timeout); 655 } 656 657 if (msk->cached_ext) 658 __skb_ext_put(msk->cached_ext); 659 release_sock(sk); 660 sk_common_release(sk); 661 } 662 663 static void mptcp_close(struct sock *sk, long timeout) 664 { 665 lock_sock(sk); 666 __mptcp_close(sk, timeout); 667 } 668 669 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) 670 { 671 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 672 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); 673 struct ipv6_pinfo *msk6 = inet6_sk(msk); 674 675 msk->sk_v6_daddr = ssk->sk_v6_daddr; 676 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; 677 678 if (msk6 && ssk6) { 679 msk6->saddr = ssk6->saddr; 680 msk6->flow_label = ssk6->flow_label; 681 } 682 #endif 683 684 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; 685 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; 686 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; 687 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; 688 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; 689 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; 690 } 691 692 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, 693 bool kern) 694 { 695 struct mptcp_sock *msk = mptcp_sk(sk); 696 struct socket *listener; 697 struct sock *newsk; 698 699 listener = __mptcp_nmpc_socket(msk); 700 if (WARN_ON_ONCE(!listener)) { 701 *err = -EINVAL; 702 return NULL; 703 } 704 705 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk)); 706 newsk = inet_csk_accept(listener->sk, flags, err, kern); 707 if (!newsk) 708 return NULL; 709 710 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk)); 711 712 if (sk_is_mptcp(newsk)) { 713 struct mptcp_subflow_context *subflow; 714 struct sock *new_mptcp_sock; 715 struct sock *ssk = newsk; 716 u64 ack_seq; 717 718 subflow = mptcp_subflow_ctx(newsk); 719 lock_sock(sk); 720 721 local_bh_disable(); 722 new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC); 723 if (!new_mptcp_sock) { 724 *err = -ENOBUFS; 725 local_bh_enable(); 726 release_sock(sk); 727 mptcp_subflow_shutdown(newsk, SHUT_RDWR + 1); 728 tcp_close(newsk, 0); 729 return NULL; 730 } 731 732 __mptcp_init_sock(new_mptcp_sock); 733 734 msk = mptcp_sk(new_mptcp_sock); 735 msk->local_key = subflow->local_key; 736 msk->token = subflow->token; 737 msk->subflow = NULL; 738 msk->first = newsk; 739 740 mptcp_token_update_accept(newsk, new_mptcp_sock); 741 742 msk->write_seq = subflow->idsn + 1; 743 if (subflow->can_ack) { 744 msk->can_ack = true; 745 msk->remote_key = subflow->remote_key; 746 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); 747 ack_seq++; 748 msk->ack_seq = ack_seq; 749 } 750 newsk = new_mptcp_sock; 751 mptcp_copy_inaddrs(newsk, ssk); 752 list_add(&subflow->node, &msk->conn_list); 753 754 /* will be fully established at mptcp_stream_accept() 755 * completion. 756 */ 757 inet_sk_state_store(new_mptcp_sock, TCP_SYN_RECV); 758 bh_unlock_sock(new_mptcp_sock); 759 local_bh_enable(); 760 release_sock(sk); 761 762 /* the subflow can already receive packet, avoid racing with 763 * the receive path and process the pending ones 764 */ 765 lock_sock(ssk); 766 subflow->rel_write_seq = 1; 767 subflow->tcp_sock = ssk; 768 subflow->conn = new_mptcp_sock; 769 if (unlikely(!skb_queue_empty(&ssk->sk_receive_queue))) 770 mptcp_subflow_data_available(ssk); 771 release_sock(ssk); 772 } 773 774 return newsk; 775 } 776 777 static void mptcp_destroy(struct sock *sk) 778 { 779 } 780 781 static int mptcp_setsockopt(struct sock *sk, int level, int optname, 782 char __user *uoptval, unsigned int optlen) 783 { 784 struct mptcp_sock *msk = mptcp_sk(sk); 785 char __kernel *optval; 786 int ret = -EOPNOTSUPP; 787 struct socket *ssock; 788 789 /* will be treated as __user in tcp_setsockopt */ 790 optval = (char __kernel __force *)uoptval; 791 792 pr_debug("msk=%p", msk); 793 794 /* @@ the meaning of setsockopt() when the socket is connected and 795 * there are multiple subflows is not defined. 796 */ 797 lock_sock(sk); 798 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE); 799 if (!IS_ERR(ssock)) { 800 pr_debug("subflow=%p", ssock->sk); 801 ret = kernel_setsockopt(ssock, level, optname, optval, optlen); 802 } 803 release_sock(sk); 804 805 return ret; 806 } 807 808 static int mptcp_getsockopt(struct sock *sk, int level, int optname, 809 char __user *uoptval, int __user *uoption) 810 { 811 struct mptcp_sock *msk = mptcp_sk(sk); 812 char __kernel *optval; 813 int ret = -EOPNOTSUPP; 814 int __kernel *option; 815 struct socket *ssock; 816 817 /* will be treated as __user in tcp_getsockopt */ 818 optval = (char __kernel __force *)uoptval; 819 option = (int __kernel __force *)uoption; 820 821 pr_debug("msk=%p", msk); 822 823 /* @@ the meaning of getsockopt() when the socket is connected and 824 * there are multiple subflows is not defined. 825 */ 826 lock_sock(sk); 827 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE); 828 if (!IS_ERR(ssock)) { 829 pr_debug("subflow=%p", ssock->sk); 830 ret = kernel_getsockopt(ssock, level, optname, optval, option); 831 } 832 release_sock(sk); 833 834 return ret; 835 } 836 837 static int mptcp_get_port(struct sock *sk, unsigned short snum) 838 { 839 struct mptcp_sock *msk = mptcp_sk(sk); 840 struct socket *ssock; 841 842 ssock = __mptcp_nmpc_socket(msk); 843 pr_debug("msk=%p, subflow=%p", msk, ssock); 844 if (WARN_ON_ONCE(!ssock)) 845 return -EINVAL; 846 847 return inet_csk_get_port(ssock->sk, snum); 848 } 849 850 void mptcp_finish_connect(struct sock *ssk) 851 { 852 struct mptcp_subflow_context *subflow; 853 struct mptcp_sock *msk; 854 struct sock *sk; 855 u64 ack_seq; 856 857 subflow = mptcp_subflow_ctx(ssk); 858 859 if (!subflow->mp_capable) 860 return; 861 862 sk = subflow->conn; 863 msk = mptcp_sk(sk); 864 865 pr_debug("msk=%p, token=%u", sk, subflow->token); 866 867 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); 868 ack_seq++; 869 subflow->map_seq = ack_seq; 870 subflow->map_subflow_seq = 1; 871 subflow->rel_write_seq = 1; 872 873 /* the socket is not connected yet, no msk/subflow ops can access/race 874 * accessing the field below 875 */ 876 WRITE_ONCE(msk->remote_key, subflow->remote_key); 877 WRITE_ONCE(msk->local_key, subflow->local_key); 878 WRITE_ONCE(msk->token, subflow->token); 879 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 880 WRITE_ONCE(msk->ack_seq, ack_seq); 881 WRITE_ONCE(msk->can_ack, 1); 882 } 883 884 static void mptcp_sock_graft(struct sock *sk, struct socket *parent) 885 { 886 write_lock_bh(&sk->sk_callback_lock); 887 rcu_assign_pointer(sk->sk_wq, &parent->wq); 888 sk_set_socket(sk, parent); 889 sk->sk_uid = SOCK_INODE(parent)->i_uid; 890 write_unlock_bh(&sk->sk_callback_lock); 891 } 892 893 static bool mptcp_memory_free(const struct sock *sk, int wake) 894 { 895 struct mptcp_sock *msk = mptcp_sk(sk); 896 897 return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true; 898 } 899 900 static struct proto mptcp_prot = { 901 .name = "MPTCP", 902 .owner = THIS_MODULE, 903 .init = mptcp_init_sock, 904 .close = mptcp_close, 905 .accept = mptcp_accept, 906 .setsockopt = mptcp_setsockopt, 907 .getsockopt = mptcp_getsockopt, 908 .shutdown = tcp_shutdown, 909 .destroy = mptcp_destroy, 910 .sendmsg = mptcp_sendmsg, 911 .recvmsg = mptcp_recvmsg, 912 .hash = inet_hash, 913 .unhash = inet_unhash, 914 .get_port = mptcp_get_port, 915 .stream_memory_free = mptcp_memory_free, 916 .obj_size = sizeof(struct mptcp_sock), 917 .no_autobind = true, 918 }; 919 920 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 921 { 922 struct mptcp_sock *msk = mptcp_sk(sock->sk); 923 struct socket *ssock; 924 int err; 925 926 lock_sock(sock->sk); 927 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE); 928 if (IS_ERR(ssock)) { 929 err = PTR_ERR(ssock); 930 goto unlock; 931 } 932 933 err = ssock->ops->bind(ssock, uaddr, addr_len); 934 if (!err) 935 mptcp_copy_inaddrs(sock->sk, ssock->sk); 936 937 unlock: 938 release_sock(sock->sk); 939 return err; 940 } 941 942 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, 943 int addr_len, int flags) 944 { 945 struct mptcp_sock *msk = mptcp_sk(sock->sk); 946 struct socket *ssock; 947 int err; 948 949 lock_sock(sock->sk); 950 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT); 951 if (IS_ERR(ssock)) { 952 err = PTR_ERR(ssock); 953 goto unlock; 954 } 955 956 #ifdef CONFIG_TCP_MD5SIG 957 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 958 * TCP option space. 959 */ 960 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) 961 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0; 962 #endif 963 964 err = ssock->ops->connect(ssock, uaddr, addr_len, flags); 965 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 966 mptcp_copy_inaddrs(sock->sk, ssock->sk); 967 968 unlock: 969 release_sock(sock->sk); 970 return err; 971 } 972 973 static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr, 974 int peer) 975 { 976 if (sock->sk->sk_prot == &tcp_prot) { 977 /* we are being invoked from __sys_accept4, after 978 * mptcp_accept() has just accepted a non-mp-capable 979 * flow: sk is a tcp_sk, not an mptcp one. 980 * 981 * Hand the socket over to tcp so all further socket ops 982 * bypass mptcp. 983 */ 984 sock->ops = &inet_stream_ops; 985 } 986 987 return inet_getname(sock, uaddr, peer); 988 } 989 990 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 991 static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr, 992 int peer) 993 { 994 if (sock->sk->sk_prot == &tcpv6_prot) { 995 /* we are being invoked from __sys_accept4 after 996 * mptcp_accept() has accepted a non-mp-capable 997 * subflow: sk is a tcp_sk, not mptcp. 998 * 999 * Hand the socket over to tcp so all further 1000 * socket ops bypass mptcp. 1001 */ 1002 sock->ops = &inet6_stream_ops; 1003 } 1004 1005 return inet6_getname(sock, uaddr, peer); 1006 } 1007 #endif 1008 1009 static int mptcp_listen(struct socket *sock, int backlog) 1010 { 1011 struct mptcp_sock *msk = mptcp_sk(sock->sk); 1012 struct socket *ssock; 1013 int err; 1014 1015 pr_debug("msk=%p", msk); 1016 1017 lock_sock(sock->sk); 1018 ssock = __mptcp_socket_create(msk, TCP_LISTEN); 1019 if (IS_ERR(ssock)) { 1020 err = PTR_ERR(ssock); 1021 goto unlock; 1022 } 1023 1024 err = ssock->ops->listen(ssock, backlog); 1025 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 1026 if (!err) 1027 mptcp_copy_inaddrs(sock->sk, ssock->sk); 1028 1029 unlock: 1030 release_sock(sock->sk); 1031 return err; 1032 } 1033 1034 static bool is_tcp_proto(const struct proto *p) 1035 { 1036 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1037 return p == &tcp_prot || p == &tcpv6_prot; 1038 #else 1039 return p == &tcp_prot; 1040 #endif 1041 } 1042 1043 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, 1044 int flags, bool kern) 1045 { 1046 struct mptcp_sock *msk = mptcp_sk(sock->sk); 1047 struct socket *ssock; 1048 int err; 1049 1050 pr_debug("msk=%p", msk); 1051 1052 lock_sock(sock->sk); 1053 if (sock->sk->sk_state != TCP_LISTEN) 1054 goto unlock_fail; 1055 1056 ssock = __mptcp_nmpc_socket(msk); 1057 if (!ssock) 1058 goto unlock_fail; 1059 1060 sock_hold(ssock->sk); 1061 release_sock(sock->sk); 1062 1063 err = ssock->ops->accept(sock, newsock, flags, kern); 1064 if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) { 1065 struct mptcp_sock *msk = mptcp_sk(newsock->sk); 1066 struct mptcp_subflow_context *subflow; 1067 1068 /* set ssk->sk_socket of accept()ed flows to mptcp socket. 1069 * This is needed so NOSPACE flag can be set from tcp stack. 1070 */ 1071 list_for_each_entry(subflow, &msk->conn_list, node) { 1072 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 1073 1074 if (!ssk->sk_socket) 1075 mptcp_sock_graft(ssk, newsock); 1076 } 1077 1078 inet_sk_state_store(newsock->sk, TCP_ESTABLISHED); 1079 } 1080 1081 sock_put(ssock->sk); 1082 return err; 1083 1084 unlock_fail: 1085 release_sock(sock->sk); 1086 return -EINVAL; 1087 } 1088 1089 static __poll_t mptcp_poll(struct file *file, struct socket *sock, 1090 struct poll_table_struct *wait) 1091 { 1092 struct sock *sk = sock->sk; 1093 struct mptcp_sock *msk; 1094 struct socket *ssock; 1095 __poll_t mask = 0; 1096 1097 msk = mptcp_sk(sk); 1098 lock_sock(sk); 1099 ssock = __mptcp_nmpc_socket(msk); 1100 if (ssock) { 1101 mask = ssock->ops->poll(file, ssock, wait); 1102 release_sock(sk); 1103 return mask; 1104 } 1105 1106 release_sock(sk); 1107 sock_poll_wait(file, sock, wait); 1108 lock_sock(sk); 1109 ssock = __mptcp_tcp_fallback(msk); 1110 if (unlikely(ssock)) 1111 return ssock->ops->poll(file, ssock, NULL); 1112 1113 if (test_bit(MPTCP_DATA_READY, &msk->flags)) 1114 mask = EPOLLIN | EPOLLRDNORM; 1115 if (sk_stream_is_writeable(sk) && 1116 test_bit(MPTCP_SEND_SPACE, &msk->flags)) 1117 mask |= EPOLLOUT | EPOLLWRNORM; 1118 if (sk->sk_shutdown & RCV_SHUTDOWN) 1119 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 1120 1121 release_sock(sk); 1122 1123 return mask; 1124 } 1125 1126 static int mptcp_shutdown(struct socket *sock, int how) 1127 { 1128 struct mptcp_sock *msk = mptcp_sk(sock->sk); 1129 struct mptcp_subflow_context *subflow; 1130 int ret = 0; 1131 1132 pr_debug("sk=%p, how=%d", msk, how); 1133 1134 lock_sock(sock->sk); 1135 1136 if (how == SHUT_WR || how == SHUT_RDWR) 1137 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); 1138 1139 how++; 1140 1141 if ((how & ~SHUTDOWN_MASK) || !how) { 1142 ret = -EINVAL; 1143 goto out_unlock; 1144 } 1145 1146 if (sock->state == SS_CONNECTING) { 1147 if ((1 << sock->sk->sk_state) & 1148 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)) 1149 sock->state = SS_DISCONNECTING; 1150 else 1151 sock->state = SS_CONNECTED; 1152 } 1153 1154 mptcp_for_each_subflow(msk, subflow) { 1155 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); 1156 1157 mptcp_subflow_shutdown(tcp_sk, how); 1158 } 1159 1160 out_unlock: 1161 release_sock(sock->sk); 1162 1163 return ret; 1164 } 1165 1166 static const struct proto_ops mptcp_stream_ops = { 1167 .family = PF_INET, 1168 .owner = THIS_MODULE, 1169 .release = inet_release, 1170 .bind = mptcp_bind, 1171 .connect = mptcp_stream_connect, 1172 .socketpair = sock_no_socketpair, 1173 .accept = mptcp_stream_accept, 1174 .getname = mptcp_v4_getname, 1175 .poll = mptcp_poll, 1176 .ioctl = inet_ioctl, 1177 .gettstamp = sock_gettstamp, 1178 .listen = mptcp_listen, 1179 .shutdown = mptcp_shutdown, 1180 .setsockopt = sock_common_setsockopt, 1181 .getsockopt = sock_common_getsockopt, 1182 .sendmsg = inet_sendmsg, 1183 .recvmsg = inet_recvmsg, 1184 .mmap = sock_no_mmap, 1185 .sendpage = inet_sendpage, 1186 #ifdef CONFIG_COMPAT 1187 .compat_setsockopt = compat_sock_common_setsockopt, 1188 .compat_getsockopt = compat_sock_common_getsockopt, 1189 #endif 1190 }; 1191 1192 static struct inet_protosw mptcp_protosw = { 1193 .type = SOCK_STREAM, 1194 .protocol = IPPROTO_MPTCP, 1195 .prot = &mptcp_prot, 1196 .ops = &mptcp_stream_ops, 1197 .flags = INET_PROTOSW_ICSK, 1198 }; 1199 1200 void mptcp_proto_init(void) 1201 { 1202 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; 1203 1204 mptcp_subflow_init(); 1205 1206 if (proto_register(&mptcp_prot, 1) != 0) 1207 panic("Failed to register MPTCP proto.\n"); 1208 1209 inet_register_protosw(&mptcp_protosw); 1210 } 1211 1212 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1213 static const struct proto_ops mptcp_v6_stream_ops = { 1214 .family = PF_INET6, 1215 .owner = THIS_MODULE, 1216 .release = inet6_release, 1217 .bind = mptcp_bind, 1218 .connect = mptcp_stream_connect, 1219 .socketpair = sock_no_socketpair, 1220 .accept = mptcp_stream_accept, 1221 .getname = mptcp_v6_getname, 1222 .poll = mptcp_poll, 1223 .ioctl = inet6_ioctl, 1224 .gettstamp = sock_gettstamp, 1225 .listen = mptcp_listen, 1226 .shutdown = mptcp_shutdown, 1227 .setsockopt = sock_common_setsockopt, 1228 .getsockopt = sock_common_getsockopt, 1229 .sendmsg = inet6_sendmsg, 1230 .recvmsg = inet6_recvmsg, 1231 .mmap = sock_no_mmap, 1232 .sendpage = inet_sendpage, 1233 #ifdef CONFIG_COMPAT 1234 .compat_setsockopt = compat_sock_common_setsockopt, 1235 .compat_getsockopt = compat_sock_common_getsockopt, 1236 #endif 1237 }; 1238 1239 static struct proto mptcp_v6_prot; 1240 1241 static void mptcp_v6_destroy(struct sock *sk) 1242 { 1243 mptcp_destroy(sk); 1244 inet6_destroy_sock(sk); 1245 } 1246 1247 static struct inet_protosw mptcp_v6_protosw = { 1248 .type = SOCK_STREAM, 1249 .protocol = IPPROTO_MPTCP, 1250 .prot = &mptcp_v6_prot, 1251 .ops = &mptcp_v6_stream_ops, 1252 .flags = INET_PROTOSW_ICSK, 1253 }; 1254 1255 int mptcp_proto_v6_init(void) 1256 { 1257 int err; 1258 1259 mptcp_v6_prot = mptcp_prot; 1260 strcpy(mptcp_v6_prot.name, "MPTCPv6"); 1261 mptcp_v6_prot.slab = NULL; 1262 mptcp_v6_prot.destroy = mptcp_v6_destroy; 1263 mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) + 1264 sizeof(struct ipv6_pinfo); 1265 1266 err = proto_register(&mptcp_v6_prot, 1); 1267 if (err) 1268 return err; 1269 1270 err = inet6_register_protosw(&mptcp_v6_protosw); 1271 if (err) 1272 proto_unregister(&mptcp_v6_prot); 1273 1274 return err; 1275 } 1276 #endif 1277