1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/sched/signal.h> 13 #include <linux/atomic.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #include <net/tcp.h> 19 #include <net/tcp_states.h> 20 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 21 #include <net/transp_v6.h> 22 #endif 23 #include <net/mptcp.h> 24 #include <net/xfrm.h> 25 #include "protocol.h" 26 #include "mib.h" 27 28 #define CREATE_TRACE_POINTS 29 #include <trace/events/mptcp.h> 30 31 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 32 struct mptcp6_sock { 33 struct mptcp_sock msk; 34 struct ipv6_pinfo np; 35 }; 36 #endif 37 38 struct mptcp_skb_cb { 39 u64 map_seq; 40 u64 end_seq; 41 u32 offset; 42 u8 has_rxtstamp:1; 43 }; 44 45 #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0])) 46 47 enum { 48 MPTCP_CMSG_TS = BIT(0), 49 }; 50 51 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp; 52 53 static void __mptcp_destroy_sock(struct sock *sk); 54 static void __mptcp_check_send_data_fin(struct sock *sk); 55 56 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions); 57 static struct net_device mptcp_napi_dev; 58 59 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not 60 * completed yet or has failed, return the subflow socket. 61 * Otherwise return NULL. 62 */ 63 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) 64 { 65 if (!msk->subflow || READ_ONCE(msk->can_ack)) 66 return NULL; 67 68 return msk->subflow; 69 } 70 71 /* Returns end sequence number of the receiver's advertised window */ 72 static u64 mptcp_wnd_end(const struct mptcp_sock *msk) 73 { 74 return READ_ONCE(msk->wnd_end); 75 } 76 77 static bool mptcp_is_tcpsk(struct sock *sk) 78 { 79 struct socket *sock = sk->sk_socket; 80 81 if (unlikely(sk->sk_prot == &tcp_prot)) { 82 /* we are being invoked after mptcp_accept() has 83 * accepted a non-mp-capable flow: sk is a tcp_sk, 84 * not an mptcp one. 85 * 86 * Hand the socket over to tcp so all further socket ops 87 * bypass mptcp. 88 */ 89 sock->ops = &inet_stream_ops; 90 return true; 91 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 92 } else if (unlikely(sk->sk_prot == &tcpv6_prot)) { 93 sock->ops = &inet6_stream_ops; 94 return true; 95 #endif 96 } 97 98 return false; 99 } 100 101 static int __mptcp_socket_create(struct mptcp_sock *msk) 102 { 103 struct mptcp_subflow_context *subflow; 104 struct sock *sk = (struct sock *)msk; 105 struct socket *ssock; 106 int err; 107 108 err = mptcp_subflow_create_socket(sk, &ssock); 109 if (err) 110 return err; 111 112 msk->first = ssock->sk; 113 msk->subflow = ssock; 114 subflow = mptcp_subflow_ctx(ssock->sk); 115 list_add(&subflow->node, &msk->conn_list); 116 sock_hold(ssock->sk); 117 subflow->request_mptcp = 1; 118 mptcp_sock_graft(msk->first, sk->sk_socket); 119 120 return 0; 121 } 122 123 static void mptcp_drop(struct sock *sk, struct sk_buff *skb) 124 { 125 sk_drops_add(sk, skb); 126 __kfree_skb(skb); 127 } 128 129 static void mptcp_rmem_charge(struct sock *sk, int size) 130 { 131 mptcp_sk(sk)->rmem_fwd_alloc -= size; 132 } 133 134 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, 135 struct sk_buff *from) 136 { 137 bool fragstolen; 138 int delta; 139 140 if (MPTCP_SKB_CB(from)->offset || 141 !skb_try_coalesce(to, from, &fragstolen, &delta)) 142 return false; 143 144 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx", 145 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, 146 to->len, MPTCP_SKB_CB(from)->end_seq); 147 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; 148 kfree_skb_partial(from, fragstolen); 149 atomic_add(delta, &sk->sk_rmem_alloc); 150 mptcp_rmem_charge(sk, delta); 151 return true; 152 } 153 154 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, 155 struct sk_buff *from) 156 { 157 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) 158 return false; 159 160 return mptcp_try_coalesce((struct sock *)msk, to, from); 161 } 162 163 static void __mptcp_rmem_reclaim(struct sock *sk, int amount) 164 { 165 amount >>= SK_MEM_QUANTUM_SHIFT; 166 mptcp_sk(sk)->rmem_fwd_alloc -= amount << SK_MEM_QUANTUM_SHIFT; 167 __sk_mem_reduce_allocated(sk, amount); 168 } 169 170 static void mptcp_rmem_uncharge(struct sock *sk, int size) 171 { 172 struct mptcp_sock *msk = mptcp_sk(sk); 173 int reclaimable; 174 175 msk->rmem_fwd_alloc += size; 176 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); 177 178 /* see sk_mem_uncharge() for the rationale behind the following schema */ 179 if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD)) 180 __mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK); 181 } 182 183 static void mptcp_rfree(struct sk_buff *skb) 184 { 185 unsigned int len = skb->truesize; 186 struct sock *sk = skb->sk; 187 188 atomic_sub(len, &sk->sk_rmem_alloc); 189 mptcp_rmem_uncharge(sk, len); 190 } 191 192 static void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk) 193 { 194 skb_orphan(skb); 195 skb->sk = sk; 196 skb->destructor = mptcp_rfree; 197 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 198 mptcp_rmem_charge(sk, skb->truesize); 199 } 200 201 /* "inspired" by tcp_data_queue_ofo(), main differences: 202 * - use mptcp seqs 203 * - don't cope with sacks 204 */ 205 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) 206 { 207 struct sock *sk = (struct sock *)msk; 208 struct rb_node **p, *parent; 209 u64 seq, end_seq, max_seq; 210 struct sk_buff *skb1; 211 212 seq = MPTCP_SKB_CB(skb)->map_seq; 213 end_seq = MPTCP_SKB_CB(skb)->end_seq; 214 max_seq = READ_ONCE(msk->rcv_wnd_sent); 215 216 pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq, 217 RB_EMPTY_ROOT(&msk->out_of_order_queue)); 218 if (after64(end_seq, max_seq)) { 219 /* out of window */ 220 mptcp_drop(sk, skb); 221 pr_debug("oow by %lld, rcv_wnd_sent %llu\n", 222 (unsigned long long)end_seq - (unsigned long)max_seq, 223 (unsigned long long)msk->rcv_wnd_sent); 224 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW); 225 return; 226 } 227 228 p = &msk->out_of_order_queue.rb_node; 229 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE); 230 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { 231 rb_link_node(&skb->rbnode, NULL, p); 232 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); 233 msk->ooo_last_skb = skb; 234 goto end; 235 } 236 237 /* with 2 subflows, adding at end of ooo queue is quite likely 238 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 239 */ 240 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { 241 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); 242 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); 243 return; 244 } 245 246 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ 247 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { 248 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); 249 parent = &msk->ooo_last_skb->rbnode; 250 p = &parent->rb_right; 251 goto insert; 252 } 253 254 /* Find place to insert this segment. Handle overlaps on the way. */ 255 parent = NULL; 256 while (*p) { 257 parent = *p; 258 skb1 = rb_to_skb(parent); 259 if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { 260 p = &parent->rb_left; 261 continue; 262 } 263 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { 264 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { 265 /* All the bits are present. Drop. */ 266 mptcp_drop(sk, skb); 267 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 268 return; 269 } 270 if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { 271 /* partial overlap: 272 * | skb | 273 * | skb1 | 274 * continue traversing 275 */ 276 } else { 277 /* skb's seq == skb1's seq and skb covers skb1. 278 * Replace skb1 with skb. 279 */ 280 rb_replace_node(&skb1->rbnode, &skb->rbnode, 281 &msk->out_of_order_queue); 282 mptcp_drop(sk, skb1); 283 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 284 goto merge_right; 285 } 286 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { 287 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); 288 return; 289 } 290 p = &parent->rb_right; 291 } 292 293 insert: 294 /* Insert segment into RB tree. */ 295 rb_link_node(&skb->rbnode, parent, p); 296 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); 297 298 merge_right: 299 /* Remove other segments covered by skb. */ 300 while ((skb1 = skb_rb_next(skb)) != NULL) { 301 if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) 302 break; 303 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); 304 mptcp_drop(sk, skb1); 305 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 306 } 307 /* If there is no skb after us, we are the last_skb ! */ 308 if (!skb1) 309 msk->ooo_last_skb = skb; 310 311 end: 312 skb_condense(skb); 313 mptcp_set_owner_r(skb, sk); 314 } 315 316 static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size) 317 { 318 struct mptcp_sock *msk = mptcp_sk(sk); 319 int amt, amount; 320 321 if (size < msk->rmem_fwd_alloc) 322 return true; 323 324 amt = sk_mem_pages(size); 325 amount = amt << SK_MEM_QUANTUM_SHIFT; 326 msk->rmem_fwd_alloc += amount; 327 if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) { 328 if (ssk->sk_forward_alloc < amount) { 329 msk->rmem_fwd_alloc -= amount; 330 return false; 331 } 332 333 ssk->sk_forward_alloc -= amount; 334 } 335 return true; 336 } 337 338 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, 339 struct sk_buff *skb, unsigned int offset, 340 size_t copy_len) 341 { 342 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 343 struct sock *sk = (struct sock *)msk; 344 struct sk_buff *tail; 345 bool has_rxtstamp; 346 347 __skb_unlink(skb, &ssk->sk_receive_queue); 348 349 skb_ext_reset(skb); 350 skb_orphan(skb); 351 352 /* try to fetch required memory from subflow */ 353 if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) 354 goto drop; 355 356 has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; 357 358 /* the skb map_seq accounts for the skb offset: 359 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq 360 * value 361 */ 362 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); 363 MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; 364 MPTCP_SKB_CB(skb)->offset = offset; 365 MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp; 366 367 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { 368 /* in sequence */ 369 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); 370 tail = skb_peek_tail(&sk->sk_receive_queue); 371 if (tail && mptcp_try_coalesce(sk, tail, skb)) 372 return true; 373 374 mptcp_set_owner_r(skb, sk); 375 __skb_queue_tail(&sk->sk_receive_queue, skb); 376 return true; 377 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { 378 mptcp_data_queue_ofo(msk, skb); 379 return false; 380 } 381 382 /* old data, keep it simple and drop the whole pkt, sender 383 * will retransmit as needed, if needed. 384 */ 385 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 386 drop: 387 mptcp_drop(sk, skb); 388 return false; 389 } 390 391 static void mptcp_stop_timer(struct sock *sk) 392 { 393 struct inet_connection_sock *icsk = inet_csk(sk); 394 395 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); 396 mptcp_sk(sk)->timer_ival = 0; 397 } 398 399 static void mptcp_close_wake_up(struct sock *sk) 400 { 401 if (sock_flag(sk, SOCK_DEAD)) 402 return; 403 404 sk->sk_state_change(sk); 405 if (sk->sk_shutdown == SHUTDOWN_MASK || 406 sk->sk_state == TCP_CLOSE) 407 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 408 else 409 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 410 } 411 412 static bool mptcp_pending_data_fin_ack(struct sock *sk) 413 { 414 struct mptcp_sock *msk = mptcp_sk(sk); 415 416 return !__mptcp_check_fallback(msk) && 417 ((1 << sk->sk_state) & 418 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && 419 msk->write_seq == READ_ONCE(msk->snd_una); 420 } 421 422 static void mptcp_check_data_fin_ack(struct sock *sk) 423 { 424 struct mptcp_sock *msk = mptcp_sk(sk); 425 426 /* Look for an acknowledged DATA_FIN */ 427 if (mptcp_pending_data_fin_ack(sk)) { 428 WRITE_ONCE(msk->snd_data_fin_enable, 0); 429 430 switch (sk->sk_state) { 431 case TCP_FIN_WAIT1: 432 inet_sk_state_store(sk, TCP_FIN_WAIT2); 433 break; 434 case TCP_CLOSING: 435 case TCP_LAST_ACK: 436 inet_sk_state_store(sk, TCP_CLOSE); 437 break; 438 } 439 440 mptcp_close_wake_up(sk); 441 } 442 } 443 444 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) 445 { 446 struct mptcp_sock *msk = mptcp_sk(sk); 447 448 if (READ_ONCE(msk->rcv_data_fin) && 449 ((1 << sk->sk_state) & 450 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { 451 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); 452 453 if (msk->ack_seq == rcv_data_fin_seq) { 454 if (seq) 455 *seq = rcv_data_fin_seq; 456 457 return true; 458 } 459 } 460 461 return false; 462 } 463 464 static void mptcp_set_datafin_timeout(const struct sock *sk) 465 { 466 struct inet_connection_sock *icsk = inet_csk(sk); 467 468 mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX, 469 TCP_RTO_MIN << icsk->icsk_retransmits); 470 } 471 472 static void __mptcp_set_timeout(struct sock *sk, long tout) 473 { 474 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; 475 } 476 477 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) 478 { 479 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 480 481 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? 482 inet_csk(ssk)->icsk_timeout - jiffies : 0; 483 } 484 485 static void mptcp_set_timeout(struct sock *sk) 486 { 487 struct mptcp_subflow_context *subflow; 488 long tout = 0; 489 490 mptcp_for_each_subflow(mptcp_sk(sk), subflow) 491 tout = max(tout, mptcp_timeout_from_subflow(subflow)); 492 __mptcp_set_timeout(sk, tout); 493 } 494 495 static bool tcp_can_send_ack(const struct sock *ssk) 496 { 497 return !((1 << inet_sk_state_load(ssk)) & 498 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN)); 499 } 500 501 void mptcp_subflow_send_ack(struct sock *ssk) 502 { 503 bool slow; 504 505 slow = lock_sock_fast(ssk); 506 if (tcp_can_send_ack(ssk)) 507 tcp_send_ack(ssk); 508 unlock_sock_fast(ssk, slow); 509 } 510 511 static void mptcp_send_ack(struct mptcp_sock *msk) 512 { 513 struct mptcp_subflow_context *subflow; 514 515 mptcp_for_each_subflow(msk, subflow) 516 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); 517 } 518 519 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk) 520 { 521 bool slow; 522 523 slow = lock_sock_fast(ssk); 524 if (tcp_can_send_ack(ssk)) 525 tcp_cleanup_rbuf(ssk, 1); 526 unlock_sock_fast(ssk, slow); 527 } 528 529 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) 530 { 531 const struct inet_connection_sock *icsk = inet_csk(ssk); 532 u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending); 533 const struct tcp_sock *tp = tcp_sk(ssk); 534 535 return (ack_pending & ICSK_ACK_SCHED) && 536 ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) > 537 READ_ONCE(icsk->icsk_ack.rcv_mss)) || 538 (rx_empty && ack_pending & 539 (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED))); 540 } 541 542 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk) 543 { 544 int old_space = READ_ONCE(msk->old_wspace); 545 struct mptcp_subflow_context *subflow; 546 struct sock *sk = (struct sock *)msk; 547 int space = __mptcp_space(sk); 548 bool cleanup, rx_empty; 549 550 cleanup = (space > 0) && (space >= (old_space << 1)); 551 rx_empty = !__mptcp_rmem(sk); 552 553 mptcp_for_each_subflow(msk, subflow) { 554 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 555 556 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) 557 mptcp_subflow_cleanup_rbuf(ssk); 558 } 559 } 560 561 static bool mptcp_check_data_fin(struct sock *sk) 562 { 563 struct mptcp_sock *msk = mptcp_sk(sk); 564 u64 rcv_data_fin_seq; 565 bool ret = false; 566 567 if (__mptcp_check_fallback(msk)) 568 return ret; 569 570 /* Need to ack a DATA_FIN received from a peer while this side 571 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. 572 * msk->rcv_data_fin was set when parsing the incoming options 573 * at the subflow level and the msk lock was not held, so this 574 * is the first opportunity to act on the DATA_FIN and change 575 * the msk state. 576 * 577 * If we are caught up to the sequence number of the incoming 578 * DATA_FIN, send the DATA_ACK now and do state transition. If 579 * not caught up, do nothing and let the recv code send DATA_ACK 580 * when catching up. 581 */ 582 583 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { 584 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); 585 WRITE_ONCE(msk->rcv_data_fin, 0); 586 587 sk->sk_shutdown |= RCV_SHUTDOWN; 588 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 589 590 switch (sk->sk_state) { 591 case TCP_ESTABLISHED: 592 inet_sk_state_store(sk, TCP_CLOSE_WAIT); 593 break; 594 case TCP_FIN_WAIT1: 595 inet_sk_state_store(sk, TCP_CLOSING); 596 break; 597 case TCP_FIN_WAIT2: 598 inet_sk_state_store(sk, TCP_CLOSE); 599 break; 600 default: 601 /* Other states not expected */ 602 WARN_ON_ONCE(1); 603 break; 604 } 605 606 ret = true; 607 mptcp_send_ack(msk); 608 mptcp_close_wake_up(sk); 609 } 610 return ret; 611 } 612 613 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, 614 struct sock *ssk, 615 unsigned int *bytes) 616 { 617 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 618 struct sock *sk = (struct sock *)msk; 619 unsigned int moved = 0; 620 bool more_data_avail; 621 struct tcp_sock *tp; 622 bool done = false; 623 int sk_rbuf; 624 625 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); 626 627 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 628 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); 629 630 if (unlikely(ssk_rbuf > sk_rbuf)) { 631 WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf); 632 sk_rbuf = ssk_rbuf; 633 } 634 } 635 636 pr_debug("msk=%p ssk=%p", msk, ssk); 637 tp = tcp_sk(ssk); 638 do { 639 u32 map_remaining, offset; 640 u32 seq = tp->copied_seq; 641 struct sk_buff *skb; 642 bool fin; 643 644 /* try to move as much data as available */ 645 map_remaining = subflow->map_data_len - 646 mptcp_subflow_get_map_offset(subflow); 647 648 skb = skb_peek(&ssk->sk_receive_queue); 649 if (!skb) { 650 /* if no data is found, a racing workqueue/recvmsg 651 * already processed the new data, stop here or we 652 * can enter an infinite loop 653 */ 654 if (!moved) 655 done = true; 656 break; 657 } 658 659 if (__mptcp_check_fallback(msk)) { 660 /* if we are running under the workqueue, TCP could have 661 * collapsed skbs between dummy map creation and now 662 * be sure to adjust the size 663 */ 664 map_remaining = skb->len; 665 subflow->map_data_len = skb->len; 666 } 667 668 offset = seq - TCP_SKB_CB(skb)->seq; 669 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 670 if (fin) { 671 done = true; 672 seq++; 673 } 674 675 if (offset < skb->len) { 676 size_t len = skb->len - offset; 677 678 if (tp->urg_data) 679 done = true; 680 681 if (__mptcp_move_skb(msk, ssk, skb, offset, len)) 682 moved += len; 683 seq += len; 684 685 if (WARN_ON_ONCE(map_remaining < len)) 686 break; 687 } else { 688 WARN_ON_ONCE(!fin); 689 sk_eat_skb(ssk, skb); 690 done = true; 691 } 692 693 WRITE_ONCE(tp->copied_seq, seq); 694 more_data_avail = mptcp_subflow_data_available(ssk); 695 696 if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) { 697 done = true; 698 break; 699 } 700 } while (more_data_avail); 701 702 *bytes += moved; 703 return done; 704 } 705 706 static bool __mptcp_ofo_queue(struct mptcp_sock *msk) 707 { 708 struct sock *sk = (struct sock *)msk; 709 struct sk_buff *skb, *tail; 710 bool moved = false; 711 struct rb_node *p; 712 u64 end_seq; 713 714 p = rb_first(&msk->out_of_order_queue); 715 pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); 716 while (p) { 717 skb = rb_to_skb(p); 718 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) 719 break; 720 721 p = rb_next(p); 722 rb_erase(&skb->rbnode, &msk->out_of_order_queue); 723 724 if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq, 725 msk->ack_seq))) { 726 mptcp_drop(sk, skb); 727 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 728 continue; 729 } 730 731 end_seq = MPTCP_SKB_CB(skb)->end_seq; 732 tail = skb_peek_tail(&sk->sk_receive_queue); 733 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { 734 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; 735 736 /* skip overlapping data, if any */ 737 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d", 738 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, 739 delta); 740 MPTCP_SKB_CB(skb)->offset += delta; 741 __skb_queue_tail(&sk->sk_receive_queue, skb); 742 } 743 msk->ack_seq = end_seq; 744 moved = true; 745 } 746 return moved; 747 } 748 749 /* In most cases we will be able to lock the mptcp socket. If its already 750 * owned, we need to defer to the work queue to avoid ABBA deadlock. 751 */ 752 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) 753 { 754 struct sock *sk = (struct sock *)msk; 755 unsigned int moved = 0; 756 757 __mptcp_move_skbs_from_subflow(msk, ssk, &moved); 758 __mptcp_ofo_queue(msk); 759 if (unlikely(ssk->sk_err)) { 760 if (!sock_owned_by_user(sk)) 761 __mptcp_error_report(sk); 762 else 763 set_bit(MPTCP_ERROR_REPORT, &msk->flags); 764 } 765 766 /* If the moves have caught up with the DATA_FIN sequence number 767 * it's time to ack the DATA_FIN and change socket state, but 768 * this is not a good place to change state. Let the workqueue 769 * do it. 770 */ 771 if (mptcp_pending_data_fin(sk, NULL)) 772 mptcp_schedule_work(sk); 773 return moved > 0; 774 } 775 776 void mptcp_data_ready(struct sock *sk, struct sock *ssk) 777 { 778 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 779 struct mptcp_sock *msk = mptcp_sk(sk); 780 int sk_rbuf, ssk_rbuf; 781 782 /* The peer can send data while we are shutting down this 783 * subflow at msk destruction time, but we must avoid enqueuing 784 * more data to the msk receive queue 785 */ 786 if (unlikely(subflow->disposable)) 787 return; 788 789 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); 790 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); 791 if (unlikely(ssk_rbuf > sk_rbuf)) 792 sk_rbuf = ssk_rbuf; 793 794 /* over limit? can't append more skbs to msk, Also, no need to wake-up*/ 795 if (__mptcp_rmem(sk) > sk_rbuf) { 796 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); 797 return; 798 } 799 800 /* Wake-up the reader only for in-sequence data */ 801 mptcp_data_lock(sk); 802 if (move_skbs_to_msk(msk, ssk)) 803 sk->sk_data_ready(sk); 804 805 mptcp_data_unlock(sk); 806 } 807 808 static bool mptcp_do_flush_join_list(struct mptcp_sock *msk) 809 { 810 struct mptcp_subflow_context *subflow; 811 bool ret = false; 812 813 if (likely(list_empty(&msk->join_list))) 814 return false; 815 816 spin_lock_bh(&msk->join_list_lock); 817 list_for_each_entry(subflow, &msk->join_list, node) { 818 u32 sseq = READ_ONCE(subflow->setsockopt_seq); 819 820 mptcp_propagate_sndbuf((struct sock *)msk, mptcp_subflow_tcp_sock(subflow)); 821 if (READ_ONCE(msk->setsockopt_seq) != sseq) 822 ret = true; 823 } 824 list_splice_tail_init(&msk->join_list, &msk->conn_list); 825 spin_unlock_bh(&msk->join_list_lock); 826 827 return ret; 828 } 829 830 void __mptcp_flush_join_list(struct mptcp_sock *msk) 831 { 832 if (likely(!mptcp_do_flush_join_list(msk))) 833 return; 834 835 if (!test_and_set_bit(MPTCP_WORK_SYNC_SETSOCKOPT, &msk->flags)) 836 mptcp_schedule_work((struct sock *)msk); 837 } 838 839 static void mptcp_flush_join_list(struct mptcp_sock *msk) 840 { 841 bool sync_needed = test_and_clear_bit(MPTCP_WORK_SYNC_SETSOCKOPT, &msk->flags); 842 843 might_sleep(); 844 845 if (!mptcp_do_flush_join_list(msk) && !sync_needed) 846 return; 847 848 mptcp_sockopt_sync_all(msk); 849 } 850 851 static bool mptcp_timer_pending(struct sock *sk) 852 { 853 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); 854 } 855 856 static void mptcp_reset_timer(struct sock *sk) 857 { 858 struct inet_connection_sock *icsk = inet_csk(sk); 859 unsigned long tout; 860 861 /* prevent rescheduling on close */ 862 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE)) 863 return; 864 865 tout = mptcp_sk(sk)->timer_ival; 866 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); 867 } 868 869 bool mptcp_schedule_work(struct sock *sk) 870 { 871 if (inet_sk_state_load(sk) != TCP_CLOSE && 872 schedule_work(&mptcp_sk(sk)->work)) { 873 /* each subflow already holds a reference to the sk, and the 874 * workqueue is invoked by a subflow, so sk can't go away here. 875 */ 876 sock_hold(sk); 877 return true; 878 } 879 return false; 880 } 881 882 void mptcp_subflow_eof(struct sock *sk) 883 { 884 if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags)) 885 mptcp_schedule_work(sk); 886 } 887 888 static void mptcp_check_for_eof(struct mptcp_sock *msk) 889 { 890 struct mptcp_subflow_context *subflow; 891 struct sock *sk = (struct sock *)msk; 892 int receivers = 0; 893 894 mptcp_for_each_subflow(msk, subflow) 895 receivers += !subflow->rx_eof; 896 if (receivers) 897 return; 898 899 if (!(sk->sk_shutdown & RCV_SHUTDOWN)) { 900 /* hopefully temporary hack: propagate shutdown status 901 * to msk, when all subflows agree on it 902 */ 903 sk->sk_shutdown |= RCV_SHUTDOWN; 904 905 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 906 sk->sk_data_ready(sk); 907 } 908 909 switch (sk->sk_state) { 910 case TCP_ESTABLISHED: 911 inet_sk_state_store(sk, TCP_CLOSE_WAIT); 912 break; 913 case TCP_FIN_WAIT1: 914 inet_sk_state_store(sk, TCP_CLOSING); 915 break; 916 case TCP_FIN_WAIT2: 917 inet_sk_state_store(sk, TCP_CLOSE); 918 break; 919 default: 920 return; 921 } 922 mptcp_close_wake_up(sk); 923 } 924 925 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) 926 { 927 struct mptcp_subflow_context *subflow; 928 struct sock *sk = (struct sock *)msk; 929 930 sock_owned_by_me(sk); 931 932 mptcp_for_each_subflow(msk, subflow) { 933 if (READ_ONCE(subflow->data_avail)) 934 return mptcp_subflow_tcp_sock(subflow); 935 } 936 937 return NULL; 938 } 939 940 static bool mptcp_skb_can_collapse_to(u64 write_seq, 941 const struct sk_buff *skb, 942 const struct mptcp_ext *mpext) 943 { 944 if (!tcp_skb_can_collapse_to(skb)) 945 return false; 946 947 /* can collapse only if MPTCP level sequence is in order and this 948 * mapping has not been xmitted yet 949 */ 950 return mpext && mpext->data_seq + mpext->data_len == write_seq && 951 !mpext->frozen; 952 } 953 954 /* we can append data to the given data frag if: 955 * - there is space available in the backing page_frag 956 * - the data frag tail matches the current page_frag free offset 957 * - the data frag end sequence number matches the current write seq 958 */ 959 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, 960 const struct page_frag *pfrag, 961 const struct mptcp_data_frag *df) 962 { 963 return df && pfrag->page == df->page && 964 pfrag->size - pfrag->offset > 0 && 965 pfrag->offset == (df->offset + df->data_len) && 966 df->data_seq + df->data_len == msk->write_seq; 967 } 968 969 static void __mptcp_mem_reclaim_partial(struct sock *sk) 970 { 971 int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk); 972 973 lockdep_assert_held_once(&sk->sk_lock.slock); 974 975 __mptcp_rmem_reclaim(sk, reclaimable - 1); 976 sk_mem_reclaim_partial(sk); 977 } 978 979 static void mptcp_mem_reclaim_partial(struct sock *sk) 980 { 981 mptcp_data_lock(sk); 982 __mptcp_mem_reclaim_partial(sk); 983 mptcp_data_unlock(sk); 984 } 985 986 static void dfrag_uncharge(struct sock *sk, int len) 987 { 988 sk_mem_uncharge(sk, len); 989 sk_wmem_queued_add(sk, -len); 990 } 991 992 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) 993 { 994 int len = dfrag->data_len + dfrag->overhead; 995 996 list_del(&dfrag->list); 997 dfrag_uncharge(sk, len); 998 put_page(dfrag->page); 999 } 1000 1001 static void __mptcp_clean_una(struct sock *sk) 1002 { 1003 struct mptcp_sock *msk = mptcp_sk(sk); 1004 struct mptcp_data_frag *dtmp, *dfrag; 1005 bool cleaned = false; 1006 u64 snd_una; 1007 1008 /* on fallback we just need to ignore snd_una, as this is really 1009 * plain TCP 1010 */ 1011 if (__mptcp_check_fallback(msk)) 1012 msk->snd_una = READ_ONCE(msk->snd_nxt); 1013 1014 snd_una = msk->snd_una; 1015 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { 1016 if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) 1017 break; 1018 1019 if (unlikely(dfrag == msk->first_pending)) { 1020 /* in recovery mode can see ack after the current snd head */ 1021 if (WARN_ON_ONCE(!msk->recovery)) 1022 break; 1023 1024 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 1025 } 1026 1027 dfrag_clear(sk, dfrag); 1028 cleaned = true; 1029 } 1030 1031 dfrag = mptcp_rtx_head(sk); 1032 if (dfrag && after64(snd_una, dfrag->data_seq)) { 1033 u64 delta = snd_una - dfrag->data_seq; 1034 1035 /* prevent wrap around in recovery mode */ 1036 if (unlikely(delta > dfrag->already_sent)) { 1037 if (WARN_ON_ONCE(!msk->recovery)) 1038 goto out; 1039 if (WARN_ON_ONCE(delta > dfrag->data_len)) 1040 goto out; 1041 dfrag->already_sent += delta - dfrag->already_sent; 1042 } 1043 1044 dfrag->data_seq += delta; 1045 dfrag->offset += delta; 1046 dfrag->data_len -= delta; 1047 dfrag->already_sent -= delta; 1048 1049 dfrag_uncharge(sk, delta); 1050 cleaned = true; 1051 } 1052 1053 /* all retransmitted data acked, recovery completed */ 1054 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt)) 1055 msk->recovery = false; 1056 1057 out: 1058 if (cleaned && tcp_under_memory_pressure(sk)) 1059 __mptcp_mem_reclaim_partial(sk); 1060 1061 if (snd_una == READ_ONCE(msk->snd_nxt) && 1062 snd_una == READ_ONCE(msk->write_seq)) { 1063 if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) 1064 mptcp_stop_timer(sk); 1065 } else { 1066 mptcp_reset_timer(sk); 1067 } 1068 } 1069 1070 static void __mptcp_clean_una_wakeup(struct sock *sk) 1071 { 1072 lockdep_assert_held_once(&sk->sk_lock.slock); 1073 1074 __mptcp_clean_una(sk); 1075 mptcp_write_space(sk); 1076 } 1077 1078 static void mptcp_clean_una_wakeup(struct sock *sk) 1079 { 1080 mptcp_data_lock(sk); 1081 __mptcp_clean_una_wakeup(sk); 1082 mptcp_data_unlock(sk); 1083 } 1084 1085 static void mptcp_enter_memory_pressure(struct sock *sk) 1086 { 1087 struct mptcp_subflow_context *subflow; 1088 struct mptcp_sock *msk = mptcp_sk(sk); 1089 bool first = true; 1090 1091 sk_stream_moderate_sndbuf(sk); 1092 mptcp_for_each_subflow(msk, subflow) { 1093 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 1094 1095 if (first) 1096 tcp_enter_memory_pressure(ssk); 1097 sk_stream_moderate_sndbuf(ssk); 1098 first = false; 1099 } 1100 } 1101 1102 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of 1103 * data 1104 */ 1105 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 1106 { 1107 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), 1108 pfrag, sk->sk_allocation))) 1109 return true; 1110 1111 mptcp_enter_memory_pressure(sk); 1112 return false; 1113 } 1114 1115 static struct mptcp_data_frag * 1116 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, 1117 int orig_offset) 1118 { 1119 int offset = ALIGN(orig_offset, sizeof(long)); 1120 struct mptcp_data_frag *dfrag; 1121 1122 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); 1123 dfrag->data_len = 0; 1124 dfrag->data_seq = msk->write_seq; 1125 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); 1126 dfrag->offset = offset + sizeof(struct mptcp_data_frag); 1127 dfrag->already_sent = 0; 1128 dfrag->page = pfrag->page; 1129 1130 return dfrag; 1131 } 1132 1133 struct mptcp_sendmsg_info { 1134 int mss_now; 1135 int size_goal; 1136 u16 limit; 1137 u16 sent; 1138 unsigned int flags; 1139 bool data_lock_held; 1140 }; 1141 1142 static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq, 1143 int avail_size) 1144 { 1145 u64 window_end = mptcp_wnd_end(msk); 1146 1147 if (__mptcp_check_fallback(msk)) 1148 return avail_size; 1149 1150 if (!before64(data_seq + avail_size, window_end)) { 1151 u64 allowed_size = window_end - data_seq; 1152 1153 return min_t(unsigned int, allowed_size, avail_size); 1154 } 1155 1156 return avail_size; 1157 } 1158 1159 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp) 1160 { 1161 struct skb_ext *mpext = __skb_ext_alloc(gfp); 1162 1163 if (!mpext) 1164 return false; 1165 __skb_ext_set(skb, SKB_EXT_MPTCP, mpext); 1166 return true; 1167 } 1168 1169 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) 1170 { 1171 struct sk_buff *skb; 1172 1173 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 1174 if (likely(skb)) { 1175 if (likely(__mptcp_add_ext(skb, gfp))) { 1176 skb_reserve(skb, MAX_TCP_HEADER); 1177 skb->ip_summed = CHECKSUM_PARTIAL; 1178 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 1179 return skb; 1180 } 1181 __kfree_skb(skb); 1182 } else { 1183 mptcp_enter_memory_pressure(sk); 1184 } 1185 return NULL; 1186 } 1187 1188 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) 1189 { 1190 struct sk_buff *skb; 1191 1192 skb = __mptcp_do_alloc_tx_skb(sk, gfp); 1193 if (!skb) 1194 return NULL; 1195 1196 if (likely(sk_wmem_schedule(ssk, skb->truesize))) { 1197 tcp_skb_entail(ssk, skb); 1198 return skb; 1199 } 1200 kfree_skb(skb); 1201 return NULL; 1202 } 1203 1204 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) 1205 { 1206 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation; 1207 1208 if (unlikely(tcp_under_memory_pressure(sk))) { 1209 if (data_lock_held) 1210 __mptcp_mem_reclaim_partial(sk); 1211 else 1212 mptcp_mem_reclaim_partial(sk); 1213 } 1214 return __mptcp_alloc_tx_skb(sk, ssk, gfp); 1215 } 1216 1217 /* note: this always recompute the csum on the whole skb, even 1218 * if we just appended a single frag. More status info needed 1219 */ 1220 static void mptcp_update_data_checksum(struct sk_buff *skb, int added) 1221 { 1222 struct mptcp_ext *mpext = mptcp_get_ext(skb); 1223 __wsum csum = ~csum_unfold(mpext->csum); 1224 int offset = skb->len - added; 1225 1226 mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset)); 1227 } 1228 1229 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, 1230 struct mptcp_data_frag *dfrag, 1231 struct mptcp_sendmsg_info *info) 1232 { 1233 u64 data_seq = dfrag->data_seq + info->sent; 1234 int offset = dfrag->offset + info->sent; 1235 struct mptcp_sock *msk = mptcp_sk(sk); 1236 bool zero_window_probe = false; 1237 struct mptcp_ext *mpext = NULL; 1238 bool can_coalesce = false; 1239 bool reuse_skb = true; 1240 struct sk_buff *skb; 1241 size_t copy; 1242 int i; 1243 1244 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u", 1245 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); 1246 1247 if (WARN_ON_ONCE(info->sent > info->limit || 1248 info->limit > dfrag->data_len)) 1249 return 0; 1250 1251 /* compute send limit */ 1252 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); 1253 copy = info->size_goal; 1254 1255 skb = tcp_write_queue_tail(ssk); 1256 if (skb && copy > skb->len) { 1257 /* Limit the write to the size available in the 1258 * current skb, if any, so that we create at most a new skb. 1259 * Explicitly tells TCP internals to avoid collapsing on later 1260 * queue management operation, to avoid breaking the ext <-> 1261 * SSN association set here 1262 */ 1263 mpext = skb_ext_find(skb, SKB_EXT_MPTCP); 1264 if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) { 1265 TCP_SKB_CB(skb)->eor = 1; 1266 goto alloc_skb; 1267 } 1268 1269 i = skb_shinfo(skb)->nr_frags; 1270 can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset); 1271 if (!can_coalesce && i >= sysctl_max_skb_frags) { 1272 tcp_mark_push(tcp_sk(ssk), skb); 1273 goto alloc_skb; 1274 } 1275 1276 copy -= skb->len; 1277 } else { 1278 alloc_skb: 1279 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); 1280 if (!skb) 1281 return -ENOMEM; 1282 1283 i = skb_shinfo(skb)->nr_frags; 1284 reuse_skb = false; 1285 mpext = skb_ext_find(skb, SKB_EXT_MPTCP); 1286 } 1287 1288 /* Zero window and all data acked? Probe. */ 1289 copy = mptcp_check_allowed_size(msk, data_seq, copy); 1290 if (copy == 0) { 1291 u64 snd_una = READ_ONCE(msk->snd_una); 1292 1293 if (snd_una != msk->snd_nxt) { 1294 tcp_remove_empty_skb(ssk); 1295 return 0; 1296 } 1297 1298 zero_window_probe = true; 1299 data_seq = snd_una - 1; 1300 copy = 1; 1301 1302 /* all mptcp-level data is acked, no skbs should be present into the 1303 * ssk write queue 1304 */ 1305 WARN_ON_ONCE(reuse_skb); 1306 } 1307 1308 copy = min_t(size_t, copy, info->limit - info->sent); 1309 if (!sk_wmem_schedule(ssk, copy)) { 1310 tcp_remove_empty_skb(ssk); 1311 return -ENOMEM; 1312 } 1313 1314 if (can_coalesce) { 1315 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1316 } else { 1317 get_page(dfrag->page); 1318 skb_fill_page_desc(skb, i, dfrag->page, offset, copy); 1319 } 1320 1321 skb->len += copy; 1322 skb->data_len += copy; 1323 skb->truesize += copy; 1324 sk_wmem_queued_add(ssk, copy); 1325 sk_mem_charge(ssk, copy); 1326 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); 1327 TCP_SKB_CB(skb)->end_seq += copy; 1328 tcp_skb_pcount_set(skb, 0); 1329 1330 /* on skb reuse we just need to update the DSS len */ 1331 if (reuse_skb) { 1332 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1333 mpext->data_len += copy; 1334 WARN_ON_ONCE(zero_window_probe); 1335 goto out; 1336 } 1337 1338 memset(mpext, 0, sizeof(*mpext)); 1339 mpext->data_seq = data_seq; 1340 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; 1341 mpext->data_len = copy; 1342 mpext->use_map = 1; 1343 mpext->dsn64 = 1; 1344 1345 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", 1346 mpext->data_seq, mpext->subflow_seq, mpext->data_len, 1347 mpext->dsn64); 1348 1349 if (zero_window_probe) { 1350 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; 1351 mpext->frozen = 1; 1352 if (READ_ONCE(msk->csum_enabled)) 1353 mptcp_update_data_checksum(skb, copy); 1354 tcp_push_pending_frames(ssk); 1355 return 0; 1356 } 1357 out: 1358 if (READ_ONCE(msk->csum_enabled)) 1359 mptcp_update_data_checksum(skb, copy); 1360 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; 1361 return copy; 1362 } 1363 1364 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \ 1365 sizeof(struct tcphdr) - \ 1366 MAX_TCP_OPTION_SPACE - \ 1367 sizeof(struct ipv6hdr) - \ 1368 sizeof(struct frag_hdr)) 1369 1370 struct subflow_send_info { 1371 struct sock *ssk; 1372 u64 ratio; 1373 }; 1374 1375 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) 1376 { 1377 if (!subflow->stale) 1378 return; 1379 1380 subflow->stale = 0; 1381 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER); 1382 } 1383 1384 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) 1385 { 1386 if (unlikely(subflow->stale)) { 1387 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); 1388 1389 if (subflow->stale_rcv_tstamp == rcv_tstamp) 1390 return false; 1391 1392 mptcp_subflow_set_active(subflow); 1393 } 1394 return __mptcp_subflow_active(subflow); 1395 } 1396 1397 /* implement the mptcp packet scheduler; 1398 * returns the subflow that will transmit the next DSS 1399 * additionally updates the rtx timeout 1400 */ 1401 static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) 1402 { 1403 struct subflow_send_info send_info[2]; 1404 struct mptcp_subflow_context *subflow; 1405 struct sock *sk = (struct sock *)msk; 1406 int i, nr_active = 0; 1407 struct sock *ssk; 1408 long tout = 0; 1409 u64 ratio; 1410 u32 pace; 1411 1412 sock_owned_by_me(sk); 1413 1414 if (__mptcp_check_fallback(msk)) { 1415 if (!msk->first) 1416 return NULL; 1417 return sk_stream_memory_free(msk->first) ? msk->first : NULL; 1418 } 1419 1420 /* re-use last subflow, if the burst allow that */ 1421 if (msk->last_snd && msk->snd_burst > 0 && 1422 sk_stream_memory_free(msk->last_snd) && 1423 mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) { 1424 mptcp_set_timeout(sk); 1425 return msk->last_snd; 1426 } 1427 1428 /* pick the subflow with the lower wmem/wspace ratio */ 1429 for (i = 0; i < 2; ++i) { 1430 send_info[i].ssk = NULL; 1431 send_info[i].ratio = -1; 1432 } 1433 mptcp_for_each_subflow(msk, subflow) { 1434 trace_mptcp_subflow_get_send(subflow); 1435 ssk = mptcp_subflow_tcp_sock(subflow); 1436 if (!mptcp_subflow_active(subflow)) 1437 continue; 1438 1439 tout = max(tout, mptcp_timeout_from_subflow(subflow)); 1440 nr_active += !subflow->backup; 1441 if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd) 1442 continue; 1443 1444 pace = READ_ONCE(ssk->sk_pacing_rate); 1445 if (!pace) 1446 continue; 1447 1448 ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, 1449 pace); 1450 if (ratio < send_info[subflow->backup].ratio) { 1451 send_info[subflow->backup].ssk = ssk; 1452 send_info[subflow->backup].ratio = ratio; 1453 } 1454 } 1455 __mptcp_set_timeout(sk, tout); 1456 1457 /* pick the best backup if no other subflow is active */ 1458 if (!nr_active) 1459 send_info[0].ssk = send_info[1].ssk; 1460 1461 if (send_info[0].ssk) { 1462 msk->last_snd = send_info[0].ssk; 1463 msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE, 1464 tcp_sk(msk->last_snd)->snd_wnd); 1465 return msk->last_snd; 1466 } 1467 1468 return NULL; 1469 } 1470 1471 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) 1472 { 1473 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); 1474 release_sock(ssk); 1475 } 1476 1477 static void mptcp_update_post_push(struct mptcp_sock *msk, 1478 struct mptcp_data_frag *dfrag, 1479 u32 sent) 1480 { 1481 u64 snd_nxt_new = dfrag->data_seq; 1482 1483 dfrag->already_sent += sent; 1484 1485 msk->snd_burst -= sent; 1486 1487 snd_nxt_new += dfrag->already_sent; 1488 1489 /* snd_nxt_new can be smaller than snd_nxt in case mptcp 1490 * is recovering after a failover. In that event, this re-sends 1491 * old segments. 1492 * 1493 * Thus compute snd_nxt_new candidate based on 1494 * the dfrag->data_seq that was sent and the data 1495 * that has been handed to the subflow for transmission 1496 * and skip update in case it was old dfrag. 1497 */ 1498 if (likely(after64(snd_nxt_new, msk->snd_nxt))) 1499 msk->snd_nxt = snd_nxt_new; 1500 } 1501 1502 static void mptcp_check_and_set_pending(struct sock *sk) 1503 { 1504 if (mptcp_send_head(sk) && 1505 !test_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) 1506 set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); 1507 } 1508 1509 void __mptcp_push_pending(struct sock *sk, unsigned int flags) 1510 { 1511 struct sock *prev_ssk = NULL, *ssk = NULL; 1512 struct mptcp_sock *msk = mptcp_sk(sk); 1513 struct mptcp_sendmsg_info info = { 1514 .flags = flags, 1515 }; 1516 struct mptcp_data_frag *dfrag; 1517 int len, copied = 0; 1518 1519 while ((dfrag = mptcp_send_head(sk))) { 1520 info.sent = dfrag->already_sent; 1521 info.limit = dfrag->data_len; 1522 len = dfrag->data_len - dfrag->already_sent; 1523 while (len > 0) { 1524 int ret = 0; 1525 1526 prev_ssk = ssk; 1527 mptcp_flush_join_list(msk); 1528 ssk = mptcp_subflow_get_send(msk); 1529 1530 /* First check. If the ssk has changed since 1531 * the last round, release prev_ssk 1532 */ 1533 if (ssk != prev_ssk && prev_ssk) 1534 mptcp_push_release(prev_ssk, &info); 1535 if (!ssk) 1536 goto out; 1537 1538 /* Need to lock the new subflow only if different 1539 * from the previous one, otherwise we are still 1540 * helding the relevant lock 1541 */ 1542 if (ssk != prev_ssk) 1543 lock_sock(ssk); 1544 1545 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); 1546 if (ret <= 0) { 1547 mptcp_push_release(ssk, &info); 1548 goto out; 1549 } 1550 1551 info.sent += ret; 1552 copied += ret; 1553 len -= ret; 1554 1555 mptcp_update_post_push(msk, dfrag, ret); 1556 } 1557 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 1558 } 1559 1560 /* at this point we held the socket lock for the last subflow we used */ 1561 if (ssk) 1562 mptcp_push_release(ssk, &info); 1563 1564 out: 1565 /* ensure the rtx timer is running */ 1566 if (!mptcp_timer_pending(sk)) 1567 mptcp_reset_timer(sk); 1568 if (copied) 1569 __mptcp_check_send_data_fin(sk); 1570 } 1571 1572 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) 1573 { 1574 struct mptcp_sock *msk = mptcp_sk(sk); 1575 struct mptcp_sendmsg_info info = { 1576 .data_lock_held = true, 1577 }; 1578 struct mptcp_data_frag *dfrag; 1579 struct sock *xmit_ssk; 1580 int len, copied = 0; 1581 bool first = true; 1582 1583 info.flags = 0; 1584 while ((dfrag = mptcp_send_head(sk))) { 1585 info.sent = dfrag->already_sent; 1586 info.limit = dfrag->data_len; 1587 len = dfrag->data_len - dfrag->already_sent; 1588 while (len > 0) { 1589 int ret = 0; 1590 1591 /* the caller already invoked the packet scheduler, 1592 * check for a different subflow usage only after 1593 * spooling the first chunk of data 1594 */ 1595 xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk)); 1596 if (!xmit_ssk) 1597 goto out; 1598 if (xmit_ssk != ssk) { 1599 mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), 1600 MPTCP_DELEGATE_SEND); 1601 goto out; 1602 } 1603 1604 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); 1605 if (ret <= 0) 1606 goto out; 1607 1608 info.sent += ret; 1609 copied += ret; 1610 len -= ret; 1611 first = false; 1612 1613 mptcp_update_post_push(msk, dfrag, ret); 1614 } 1615 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 1616 } 1617 1618 out: 1619 /* __mptcp_alloc_tx_skb could have released some wmem and we are 1620 * not going to flush it via release_sock() 1621 */ 1622 if (copied) { 1623 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, 1624 info.size_goal); 1625 if (!mptcp_timer_pending(sk)) 1626 mptcp_reset_timer(sk); 1627 1628 if (msk->snd_data_fin_enable && 1629 msk->snd_nxt + 1 == msk->write_seq) 1630 mptcp_schedule_work(sk); 1631 } 1632 } 1633 1634 static void mptcp_set_nospace(struct sock *sk) 1635 { 1636 /* enable autotune */ 1637 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1638 1639 /* will be cleared on avail space */ 1640 set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags); 1641 } 1642 1643 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1644 { 1645 struct mptcp_sock *msk = mptcp_sk(sk); 1646 struct page_frag *pfrag; 1647 size_t copied = 0; 1648 int ret = 0; 1649 long timeo; 1650 1651 /* we don't support FASTOPEN yet */ 1652 if (msg->msg_flags & MSG_FASTOPEN) 1653 return -EOPNOTSUPP; 1654 1655 /* silently ignore everything else */ 1656 msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL; 1657 1658 lock_sock(sk); 1659 1660 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1661 1662 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { 1663 ret = sk_stream_wait_connect(sk, &timeo); 1664 if (ret) 1665 goto out; 1666 } 1667 1668 pfrag = sk_page_frag(sk); 1669 1670 while (msg_data_left(msg)) { 1671 int total_ts, frag_truesize = 0; 1672 struct mptcp_data_frag *dfrag; 1673 bool dfrag_collapsed; 1674 size_t psize, offset; 1675 1676 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { 1677 ret = -EPIPE; 1678 goto out; 1679 } 1680 1681 /* reuse tail pfrag, if possible, or carve a new one from the 1682 * page allocator 1683 */ 1684 dfrag = mptcp_pending_tail(sk); 1685 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); 1686 if (!dfrag_collapsed) { 1687 if (!sk_stream_memory_free(sk)) 1688 goto wait_for_memory; 1689 1690 if (!mptcp_page_frag_refill(sk, pfrag)) 1691 goto wait_for_memory; 1692 1693 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset); 1694 frag_truesize = dfrag->overhead; 1695 } 1696 1697 /* we do not bound vs wspace, to allow a single packet. 1698 * memory accounting will prevent execessive memory usage 1699 * anyway 1700 */ 1701 offset = dfrag->offset + dfrag->data_len; 1702 psize = pfrag->size - offset; 1703 psize = min_t(size_t, psize, msg_data_left(msg)); 1704 total_ts = psize + frag_truesize; 1705 1706 if (!sk_wmem_schedule(sk, total_ts)) 1707 goto wait_for_memory; 1708 1709 if (copy_page_from_iter(dfrag->page, offset, psize, 1710 &msg->msg_iter) != psize) { 1711 ret = -EFAULT; 1712 goto out; 1713 } 1714 1715 /* data successfully copied into the write queue */ 1716 sk->sk_forward_alloc -= total_ts; 1717 copied += psize; 1718 dfrag->data_len += psize; 1719 frag_truesize += psize; 1720 pfrag->offset += frag_truesize; 1721 WRITE_ONCE(msk->write_seq, msk->write_seq + psize); 1722 1723 /* charge data on mptcp pending queue to the msk socket 1724 * Note: we charge such data both to sk and ssk 1725 */ 1726 sk_wmem_queued_add(sk, frag_truesize); 1727 if (!dfrag_collapsed) { 1728 get_page(dfrag->page); 1729 list_add_tail(&dfrag->list, &msk->rtx_queue); 1730 if (!msk->first_pending) 1731 WRITE_ONCE(msk->first_pending, dfrag); 1732 } 1733 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk, 1734 dfrag->data_seq, dfrag->data_len, dfrag->already_sent, 1735 !dfrag_collapsed); 1736 1737 continue; 1738 1739 wait_for_memory: 1740 mptcp_set_nospace(sk); 1741 __mptcp_push_pending(sk, msg->msg_flags); 1742 ret = sk_stream_wait_memory(sk, &timeo); 1743 if (ret) 1744 goto out; 1745 } 1746 1747 if (copied) 1748 __mptcp_push_pending(sk, msg->msg_flags); 1749 1750 out: 1751 release_sock(sk); 1752 return copied ? : ret; 1753 } 1754 1755 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, 1756 struct msghdr *msg, 1757 size_t len, int flags, 1758 struct scm_timestamping_internal *tss, 1759 int *cmsg_flags) 1760 { 1761 struct sk_buff *skb, *tmp; 1762 int copied = 0; 1763 1764 skb_queue_walk_safe(&msk->receive_queue, skb, tmp) { 1765 u32 offset = MPTCP_SKB_CB(skb)->offset; 1766 u32 data_len = skb->len - offset; 1767 u32 count = min_t(size_t, len - copied, data_len); 1768 int err; 1769 1770 if (!(flags & MSG_TRUNC)) { 1771 err = skb_copy_datagram_msg(skb, offset, msg, count); 1772 if (unlikely(err < 0)) { 1773 if (!copied) 1774 return err; 1775 break; 1776 } 1777 } 1778 1779 if (MPTCP_SKB_CB(skb)->has_rxtstamp) { 1780 tcp_update_recv_tstamps(skb, tss); 1781 *cmsg_flags |= MPTCP_CMSG_TS; 1782 } 1783 1784 copied += count; 1785 1786 if (count < data_len) { 1787 if (!(flags & MSG_PEEK)) 1788 MPTCP_SKB_CB(skb)->offset += count; 1789 break; 1790 } 1791 1792 if (!(flags & MSG_PEEK)) { 1793 /* we will bulk release the skb memory later */ 1794 skb->destructor = NULL; 1795 WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize); 1796 __skb_unlink(skb, &msk->receive_queue); 1797 __kfree_skb(skb); 1798 } 1799 1800 if (copied >= len) 1801 break; 1802 } 1803 1804 return copied; 1805 } 1806 1807 /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. 1808 * 1809 * Only difference: Use highest rtt estimate of the subflows in use. 1810 */ 1811 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) 1812 { 1813 struct mptcp_subflow_context *subflow; 1814 struct sock *sk = (struct sock *)msk; 1815 u32 time, advmss = 1; 1816 u64 rtt_us, mstamp; 1817 1818 sock_owned_by_me(sk); 1819 1820 if (copied <= 0) 1821 return; 1822 1823 msk->rcvq_space.copied += copied; 1824 1825 mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); 1826 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); 1827 1828 rtt_us = msk->rcvq_space.rtt_us; 1829 if (rtt_us && time < (rtt_us >> 3)) 1830 return; 1831 1832 rtt_us = 0; 1833 mptcp_for_each_subflow(msk, subflow) { 1834 const struct tcp_sock *tp; 1835 u64 sf_rtt_us; 1836 u32 sf_advmss; 1837 1838 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); 1839 1840 sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); 1841 sf_advmss = READ_ONCE(tp->advmss); 1842 1843 rtt_us = max(sf_rtt_us, rtt_us); 1844 advmss = max(sf_advmss, advmss); 1845 } 1846 1847 msk->rcvq_space.rtt_us = rtt_us; 1848 if (time < (rtt_us >> 3) || rtt_us == 0) 1849 return; 1850 1851 if (msk->rcvq_space.copied <= msk->rcvq_space.space) 1852 goto new_measure; 1853 1854 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && 1855 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 1856 int rcvmem, rcvbuf; 1857 u64 rcvwin, grow; 1858 1859 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; 1860 1861 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); 1862 1863 do_div(grow, msk->rcvq_space.space); 1864 rcvwin += (grow << 1); 1865 1866 rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER); 1867 while (tcp_win_from_space(sk, rcvmem) < advmss) 1868 rcvmem += 128; 1869 1870 do_div(rcvwin, advmss); 1871 rcvbuf = min_t(u64, rcvwin * rcvmem, 1872 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); 1873 1874 if (rcvbuf > sk->sk_rcvbuf) { 1875 u32 window_clamp; 1876 1877 window_clamp = tcp_win_from_space(sk, rcvbuf); 1878 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); 1879 1880 /* Make subflows follow along. If we do not do this, we 1881 * get drops at subflow level if skbs can't be moved to 1882 * the mptcp rx queue fast enough (announced rcv_win can 1883 * exceed ssk->sk_rcvbuf). 1884 */ 1885 mptcp_for_each_subflow(msk, subflow) { 1886 struct sock *ssk; 1887 bool slow; 1888 1889 ssk = mptcp_subflow_tcp_sock(subflow); 1890 slow = lock_sock_fast(ssk); 1891 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); 1892 tcp_sk(ssk)->window_clamp = window_clamp; 1893 tcp_cleanup_rbuf(ssk, 1); 1894 unlock_sock_fast(ssk, slow); 1895 } 1896 } 1897 } 1898 1899 msk->rcvq_space.space = msk->rcvq_space.copied; 1900 new_measure: 1901 msk->rcvq_space.copied = 0; 1902 msk->rcvq_space.time = mstamp; 1903 } 1904 1905 static void __mptcp_update_rmem(struct sock *sk) 1906 { 1907 struct mptcp_sock *msk = mptcp_sk(sk); 1908 1909 if (!msk->rmem_released) 1910 return; 1911 1912 atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); 1913 mptcp_rmem_uncharge(sk, msk->rmem_released); 1914 WRITE_ONCE(msk->rmem_released, 0); 1915 } 1916 1917 static void __mptcp_splice_receive_queue(struct sock *sk) 1918 { 1919 struct mptcp_sock *msk = mptcp_sk(sk); 1920 1921 skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue); 1922 } 1923 1924 static bool __mptcp_move_skbs(struct mptcp_sock *msk) 1925 { 1926 struct sock *sk = (struct sock *)msk; 1927 unsigned int moved = 0; 1928 bool ret, done; 1929 1930 mptcp_flush_join_list(msk); 1931 do { 1932 struct sock *ssk = mptcp_subflow_recv_lookup(msk); 1933 bool slowpath; 1934 1935 /* we can have data pending in the subflows only if the msk 1936 * receive buffer was full at subflow_data_ready() time, 1937 * that is an unlikely slow path. 1938 */ 1939 if (likely(!ssk)) 1940 break; 1941 1942 slowpath = lock_sock_fast(ssk); 1943 mptcp_data_lock(sk); 1944 __mptcp_update_rmem(sk); 1945 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); 1946 mptcp_data_unlock(sk); 1947 1948 if (unlikely(ssk->sk_err)) 1949 __mptcp_error_report(sk); 1950 unlock_sock_fast(ssk, slowpath); 1951 } while (!done); 1952 1953 /* acquire the data lock only if some input data is pending */ 1954 ret = moved > 0; 1955 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) || 1956 !skb_queue_empty_lockless(&sk->sk_receive_queue)) { 1957 mptcp_data_lock(sk); 1958 __mptcp_update_rmem(sk); 1959 ret |= __mptcp_ofo_queue(msk); 1960 __mptcp_splice_receive_queue(sk); 1961 mptcp_data_unlock(sk); 1962 } 1963 if (ret) 1964 mptcp_check_data_fin((struct sock *)msk); 1965 return !skb_queue_empty(&msk->receive_queue); 1966 } 1967 1968 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 1969 int nonblock, int flags, int *addr_len) 1970 { 1971 struct mptcp_sock *msk = mptcp_sk(sk); 1972 struct scm_timestamping_internal tss; 1973 int copied = 0, cmsg_flags = 0; 1974 int target; 1975 long timeo; 1976 1977 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */ 1978 if (unlikely(flags & MSG_ERRQUEUE)) 1979 return inet_recv_error(sk, msg, len, addr_len); 1980 1981 lock_sock(sk); 1982 if (unlikely(sk->sk_state == TCP_LISTEN)) { 1983 copied = -ENOTCONN; 1984 goto out_err; 1985 } 1986 1987 timeo = sock_rcvtimeo(sk, nonblock); 1988 1989 len = min_t(size_t, len, INT_MAX); 1990 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1991 1992 while (copied < len) { 1993 int bytes_read; 1994 1995 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags); 1996 if (unlikely(bytes_read < 0)) { 1997 if (!copied) 1998 copied = bytes_read; 1999 goto out_err; 2000 } 2001 2002 copied += bytes_read; 2003 2004 /* be sure to advertise window change */ 2005 mptcp_cleanup_rbuf(msk); 2006 2007 if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk)) 2008 continue; 2009 2010 /* only the master socket status is relevant here. The exit 2011 * conditions mirror closely tcp_recvmsg() 2012 */ 2013 if (copied >= target) 2014 break; 2015 2016 if (copied) { 2017 if (sk->sk_err || 2018 sk->sk_state == TCP_CLOSE || 2019 (sk->sk_shutdown & RCV_SHUTDOWN) || 2020 !timeo || 2021 signal_pending(current)) 2022 break; 2023 } else { 2024 if (sk->sk_err) { 2025 copied = sock_error(sk); 2026 break; 2027 } 2028 2029 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) 2030 mptcp_check_for_eof(msk); 2031 2032 if (sk->sk_shutdown & RCV_SHUTDOWN) { 2033 /* race breaker: the shutdown could be after the 2034 * previous receive queue check 2035 */ 2036 if (__mptcp_move_skbs(msk)) 2037 continue; 2038 break; 2039 } 2040 2041 if (sk->sk_state == TCP_CLOSE) { 2042 copied = -ENOTCONN; 2043 break; 2044 } 2045 2046 if (!timeo) { 2047 copied = -EAGAIN; 2048 break; 2049 } 2050 2051 if (signal_pending(current)) { 2052 copied = sock_intr_errno(timeo); 2053 break; 2054 } 2055 } 2056 2057 pr_debug("block timeout %ld", timeo); 2058 sk_wait_data(sk, &timeo, NULL); 2059 } 2060 2061 out_err: 2062 if (cmsg_flags && copied >= 0) { 2063 if (cmsg_flags & MPTCP_CMSG_TS) 2064 tcp_recv_timestamp(msg, sk, &tss); 2065 } 2066 2067 pr_debug("msk=%p rx queue empty=%d:%d copied=%d", 2068 msk, skb_queue_empty_lockless(&sk->sk_receive_queue), 2069 skb_queue_empty(&msk->receive_queue), copied); 2070 if (!(flags & MSG_PEEK)) 2071 mptcp_rcv_space_adjust(msk, copied); 2072 2073 release_sock(sk); 2074 return copied; 2075 } 2076 2077 static void mptcp_retransmit_timer(struct timer_list *t) 2078 { 2079 struct inet_connection_sock *icsk = from_timer(icsk, t, 2080 icsk_retransmit_timer); 2081 struct sock *sk = &icsk->icsk_inet.sk; 2082 struct mptcp_sock *msk = mptcp_sk(sk); 2083 2084 bh_lock_sock(sk); 2085 if (!sock_owned_by_user(sk)) { 2086 /* we need a process context to retransmit */ 2087 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags)) 2088 mptcp_schedule_work(sk); 2089 } else { 2090 /* delegate our work to tcp_release_cb() */ 2091 set_bit(MPTCP_RETRANSMIT, &msk->flags); 2092 } 2093 bh_unlock_sock(sk); 2094 sock_put(sk); 2095 } 2096 2097 static void mptcp_timeout_timer(struct timer_list *t) 2098 { 2099 struct sock *sk = from_timer(sk, t, sk_timer); 2100 2101 mptcp_schedule_work(sk); 2102 sock_put(sk); 2103 } 2104 2105 /* Find an idle subflow. Return NULL if there is unacked data at tcp 2106 * level. 2107 * 2108 * A backup subflow is returned only if that is the only kind available. 2109 */ 2110 static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) 2111 { 2112 struct sock *backup = NULL, *pick = NULL; 2113 struct mptcp_subflow_context *subflow; 2114 int min_stale_count = INT_MAX; 2115 2116 sock_owned_by_me((const struct sock *)msk); 2117 2118 if (__mptcp_check_fallback(msk)) 2119 return NULL; 2120 2121 mptcp_for_each_subflow(msk, subflow) { 2122 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2123 2124 if (!__mptcp_subflow_active(subflow)) 2125 continue; 2126 2127 /* still data outstanding at TCP level? skip this */ 2128 if (!tcp_rtx_and_write_queues_empty(ssk)) { 2129 mptcp_pm_subflow_chk_stale(msk, ssk); 2130 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); 2131 continue; 2132 } 2133 2134 if (subflow->backup) { 2135 if (!backup) 2136 backup = ssk; 2137 continue; 2138 } 2139 2140 if (!pick) 2141 pick = ssk; 2142 } 2143 2144 if (pick) 2145 return pick; 2146 2147 /* use backup only if there are no progresses anywhere */ 2148 return min_stale_count > 1 ? backup : NULL; 2149 } 2150 2151 static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk) 2152 { 2153 if (msk->subflow) { 2154 iput(SOCK_INODE(msk->subflow)); 2155 msk->subflow = NULL; 2156 } 2157 } 2158 2159 bool __mptcp_retransmit_pending_data(struct sock *sk) 2160 { 2161 struct mptcp_data_frag *cur, *rtx_head; 2162 struct mptcp_sock *msk = mptcp_sk(sk); 2163 2164 if (__mptcp_check_fallback(mptcp_sk(sk))) 2165 return false; 2166 2167 if (tcp_rtx_and_write_queues_empty(sk)) 2168 return false; 2169 2170 /* the closing socket has some data untransmitted and/or unacked: 2171 * some data in the mptcp rtx queue has not really xmitted yet. 2172 * keep it simple and re-inject the whole mptcp level rtx queue 2173 */ 2174 mptcp_data_lock(sk); 2175 __mptcp_clean_una_wakeup(sk); 2176 rtx_head = mptcp_rtx_head(sk); 2177 if (!rtx_head) { 2178 mptcp_data_unlock(sk); 2179 return false; 2180 } 2181 2182 msk->recovery_snd_nxt = msk->snd_nxt; 2183 msk->recovery = true; 2184 mptcp_data_unlock(sk); 2185 2186 msk->first_pending = rtx_head; 2187 msk->snd_burst = 0; 2188 2189 /* be sure to clear the "sent status" on all re-injected fragments */ 2190 list_for_each_entry(cur, &msk->rtx_queue, list) { 2191 if (!cur->already_sent) 2192 break; 2193 cur->already_sent = 0; 2194 } 2195 2196 return true; 2197 } 2198 2199 /* subflow sockets can be either outgoing (connect) or incoming 2200 * (accept). 2201 * 2202 * Outgoing subflows use in-kernel sockets. 2203 * Incoming subflows do not have their own 'struct socket' allocated, 2204 * so we need to use tcp_close() after detaching them from the mptcp 2205 * parent socket. 2206 */ 2207 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, 2208 struct mptcp_subflow_context *subflow) 2209 { 2210 struct mptcp_sock *msk = mptcp_sk(sk); 2211 bool need_push; 2212 2213 list_del(&subflow->node); 2214 2215 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); 2216 2217 /* if we are invoked by the msk cleanup code, the subflow is 2218 * already orphaned 2219 */ 2220 if (ssk->sk_socket) 2221 sock_orphan(ssk); 2222 2223 need_push = __mptcp_retransmit_pending_data(sk); 2224 subflow->disposable = 1; 2225 2226 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops 2227 * the ssk has been already destroyed, we just need to release the 2228 * reference owned by msk; 2229 */ 2230 if (!inet_csk(ssk)->icsk_ulp_ops) { 2231 kfree_rcu(subflow, rcu); 2232 } else { 2233 /* otherwise tcp will dispose of the ssk and subflow ctx */ 2234 __tcp_close(ssk, 0); 2235 2236 /* close acquired an extra ref */ 2237 __sock_put(ssk); 2238 } 2239 release_sock(ssk); 2240 2241 sock_put(ssk); 2242 2243 if (ssk == msk->last_snd) 2244 msk->last_snd = NULL; 2245 2246 if (ssk == msk->first) 2247 msk->first = NULL; 2248 2249 if (msk->subflow && ssk == msk->subflow->sk) 2250 mptcp_dispose_initial_subflow(msk); 2251 2252 if (need_push) 2253 __mptcp_push_pending(sk, 0); 2254 } 2255 2256 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, 2257 struct mptcp_subflow_context *subflow) 2258 { 2259 if (sk->sk_state == TCP_ESTABLISHED) 2260 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); 2261 __mptcp_close_ssk(sk, ssk, subflow); 2262 } 2263 2264 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) 2265 { 2266 return 0; 2267 } 2268 2269 static void __mptcp_close_subflow(struct mptcp_sock *msk) 2270 { 2271 struct mptcp_subflow_context *subflow, *tmp; 2272 2273 might_sleep(); 2274 2275 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { 2276 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2277 2278 if (inet_sk_state_load(ssk) != TCP_CLOSE) 2279 continue; 2280 2281 /* 'subflow_data_ready' will re-sched once rx queue is empty */ 2282 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) 2283 continue; 2284 2285 mptcp_close_ssk((struct sock *)msk, ssk, subflow); 2286 } 2287 } 2288 2289 static bool mptcp_check_close_timeout(const struct sock *sk) 2290 { 2291 s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp; 2292 struct mptcp_subflow_context *subflow; 2293 2294 if (delta >= TCP_TIMEWAIT_LEN) 2295 return true; 2296 2297 /* if all subflows are in closed status don't bother with additional 2298 * timeout 2299 */ 2300 mptcp_for_each_subflow(mptcp_sk(sk), subflow) { 2301 if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) != 2302 TCP_CLOSE) 2303 return false; 2304 } 2305 return true; 2306 } 2307 2308 static void mptcp_check_fastclose(struct mptcp_sock *msk) 2309 { 2310 struct mptcp_subflow_context *subflow, *tmp; 2311 struct sock *sk = &msk->sk.icsk_inet.sk; 2312 2313 if (likely(!READ_ONCE(msk->rcv_fastclose))) 2314 return; 2315 2316 mptcp_token_destroy(msk); 2317 2318 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { 2319 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); 2320 bool slow; 2321 2322 slow = lock_sock_fast(tcp_sk); 2323 if (tcp_sk->sk_state != TCP_CLOSE) { 2324 tcp_send_active_reset(tcp_sk, GFP_ATOMIC); 2325 tcp_set_state(tcp_sk, TCP_CLOSE); 2326 } 2327 unlock_sock_fast(tcp_sk, slow); 2328 } 2329 2330 inet_sk_state_store(sk, TCP_CLOSE); 2331 sk->sk_shutdown = SHUTDOWN_MASK; 2332 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 2333 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); 2334 2335 mptcp_close_wake_up(sk); 2336 } 2337 2338 static void __mptcp_retrans(struct sock *sk) 2339 { 2340 struct mptcp_sock *msk = mptcp_sk(sk); 2341 struct mptcp_sendmsg_info info = {}; 2342 struct mptcp_data_frag *dfrag; 2343 size_t copied = 0; 2344 struct sock *ssk; 2345 int ret; 2346 2347 mptcp_clean_una_wakeup(sk); 2348 2349 /* first check ssk: need to kick "stale" logic */ 2350 ssk = mptcp_subflow_get_retrans(msk); 2351 dfrag = mptcp_rtx_head(sk); 2352 if (!dfrag) { 2353 if (mptcp_data_fin_enabled(msk)) { 2354 struct inet_connection_sock *icsk = inet_csk(sk); 2355 2356 icsk->icsk_retransmits++; 2357 mptcp_set_datafin_timeout(sk); 2358 mptcp_send_ack(msk); 2359 2360 goto reset_timer; 2361 } 2362 2363 if (!mptcp_send_head(sk)) 2364 return; 2365 2366 goto reset_timer; 2367 } 2368 2369 if (!ssk) 2370 goto reset_timer; 2371 2372 lock_sock(ssk); 2373 2374 /* limit retransmission to the bytes already sent on some subflows */ 2375 info.sent = 0; 2376 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent; 2377 while (info.sent < info.limit) { 2378 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); 2379 if (ret <= 0) 2380 break; 2381 2382 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); 2383 copied += ret; 2384 info.sent += ret; 2385 } 2386 if (copied) { 2387 dfrag->already_sent = max(dfrag->already_sent, info.sent); 2388 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, 2389 info.size_goal); 2390 } 2391 2392 release_sock(ssk); 2393 2394 reset_timer: 2395 mptcp_check_and_set_pending(sk); 2396 2397 if (!mptcp_timer_pending(sk)) 2398 mptcp_reset_timer(sk); 2399 } 2400 2401 static void mptcp_worker(struct work_struct *work) 2402 { 2403 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); 2404 struct sock *sk = &msk->sk.icsk_inet.sk; 2405 int state; 2406 2407 lock_sock(sk); 2408 state = sk->sk_state; 2409 if (unlikely(state == TCP_CLOSE)) 2410 goto unlock; 2411 2412 mptcp_check_data_fin_ack(sk); 2413 mptcp_flush_join_list(msk); 2414 2415 mptcp_check_fastclose(msk); 2416 2417 if (msk->pm.status) 2418 mptcp_pm_nl_work(msk); 2419 2420 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) 2421 mptcp_check_for_eof(msk); 2422 2423 __mptcp_check_send_data_fin(sk); 2424 mptcp_check_data_fin(sk); 2425 2426 /* There is no point in keeping around an orphaned sk timedout or 2427 * closed, but we need the msk around to reply to incoming DATA_FIN, 2428 * even if it is orphaned and in FIN_WAIT2 state 2429 */ 2430 if (sock_flag(sk, SOCK_DEAD) && 2431 (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) { 2432 inet_sk_state_store(sk, TCP_CLOSE); 2433 __mptcp_destroy_sock(sk); 2434 goto unlock; 2435 } 2436 2437 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 2438 __mptcp_close_subflow(msk); 2439 2440 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) 2441 __mptcp_retrans(sk); 2442 2443 unlock: 2444 release_sock(sk); 2445 sock_put(sk); 2446 } 2447 2448 static int __mptcp_init_sock(struct sock *sk) 2449 { 2450 struct mptcp_sock *msk = mptcp_sk(sk); 2451 2452 spin_lock_init(&msk->join_list_lock); 2453 2454 INIT_LIST_HEAD(&msk->conn_list); 2455 INIT_LIST_HEAD(&msk->join_list); 2456 INIT_LIST_HEAD(&msk->rtx_queue); 2457 INIT_WORK(&msk->work, mptcp_worker); 2458 __skb_queue_head_init(&msk->receive_queue); 2459 msk->out_of_order_queue = RB_ROOT; 2460 msk->first_pending = NULL; 2461 msk->rmem_fwd_alloc = 0; 2462 WRITE_ONCE(msk->rmem_released, 0); 2463 msk->timer_ival = TCP_RTO_MIN; 2464 2465 msk->first = NULL; 2466 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; 2467 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); 2468 msk->recovery = false; 2469 2470 mptcp_pm_data_init(msk); 2471 2472 /* re-use the csk retrans timer for MPTCP-level retrans */ 2473 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); 2474 timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0); 2475 2476 return 0; 2477 } 2478 2479 static int mptcp_init_sock(struct sock *sk) 2480 { 2481 struct inet_connection_sock *icsk = inet_csk(sk); 2482 struct net *net = sock_net(sk); 2483 int ret; 2484 2485 ret = __mptcp_init_sock(sk); 2486 if (ret) 2487 return ret; 2488 2489 if (!mptcp_is_enabled(net)) 2490 return -ENOPROTOOPT; 2491 2492 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) 2493 return -ENOMEM; 2494 2495 ret = __mptcp_socket_create(mptcp_sk(sk)); 2496 if (ret) 2497 return ret; 2498 2499 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will 2500 * propagate the correct value 2501 */ 2502 tcp_assign_congestion_control(sk); 2503 strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name); 2504 2505 /* no need to keep a reference to the ops, the name will suffice */ 2506 tcp_cleanup_congestion_control(sk); 2507 icsk->icsk_ca_ops = NULL; 2508 2509 sk_sockets_allocated_inc(sk); 2510 sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; 2511 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; 2512 2513 return 0; 2514 } 2515 2516 static void __mptcp_clear_xmit(struct sock *sk) 2517 { 2518 struct mptcp_sock *msk = mptcp_sk(sk); 2519 struct mptcp_data_frag *dtmp, *dfrag; 2520 2521 WRITE_ONCE(msk->first_pending, NULL); 2522 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) 2523 dfrag_clear(sk, dfrag); 2524 } 2525 2526 static void mptcp_cancel_work(struct sock *sk) 2527 { 2528 struct mptcp_sock *msk = mptcp_sk(sk); 2529 2530 if (cancel_work_sync(&msk->work)) 2531 __sock_put(sk); 2532 } 2533 2534 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) 2535 { 2536 lock_sock(ssk); 2537 2538 switch (ssk->sk_state) { 2539 case TCP_LISTEN: 2540 if (!(how & RCV_SHUTDOWN)) 2541 break; 2542 fallthrough; 2543 case TCP_SYN_SENT: 2544 tcp_disconnect(ssk, O_NONBLOCK); 2545 break; 2546 default: 2547 if (__mptcp_check_fallback(mptcp_sk(sk))) { 2548 pr_debug("Fallback"); 2549 ssk->sk_shutdown |= how; 2550 tcp_shutdown(ssk, how); 2551 } else { 2552 pr_debug("Sending DATA_FIN on subflow %p", ssk); 2553 tcp_send_ack(ssk); 2554 if (!mptcp_timer_pending(sk)) 2555 mptcp_reset_timer(sk); 2556 } 2557 break; 2558 } 2559 2560 release_sock(ssk); 2561 } 2562 2563 static const unsigned char new_state[16] = { 2564 /* current state: new state: action: */ 2565 [0 /* (Invalid) */] = TCP_CLOSE, 2566 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2567 [TCP_SYN_SENT] = TCP_CLOSE, 2568 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2569 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 2570 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 2571 [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ 2572 [TCP_CLOSE] = TCP_CLOSE, 2573 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 2574 [TCP_LAST_ACK] = TCP_LAST_ACK, 2575 [TCP_LISTEN] = TCP_CLOSE, 2576 [TCP_CLOSING] = TCP_CLOSING, 2577 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 2578 }; 2579 2580 static int mptcp_close_state(struct sock *sk) 2581 { 2582 int next = (int)new_state[sk->sk_state]; 2583 int ns = next & TCP_STATE_MASK; 2584 2585 inet_sk_state_store(sk, ns); 2586 2587 return next & TCP_ACTION_FIN; 2588 } 2589 2590 static void __mptcp_check_send_data_fin(struct sock *sk) 2591 { 2592 struct mptcp_subflow_context *subflow; 2593 struct mptcp_sock *msk = mptcp_sk(sk); 2594 2595 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu", 2596 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), 2597 msk->snd_nxt, msk->write_seq); 2598 2599 /* we still need to enqueue subflows or not really shutting down, 2600 * skip this 2601 */ 2602 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || 2603 mptcp_send_head(sk)) 2604 return; 2605 2606 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 2607 2608 /* fallback socket will not get data_fin/ack, can move to the next 2609 * state now 2610 */ 2611 if (__mptcp_check_fallback(msk)) { 2612 if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) { 2613 inet_sk_state_store(sk, TCP_CLOSE); 2614 mptcp_close_wake_up(sk); 2615 } else if (sk->sk_state == TCP_FIN_WAIT1) { 2616 inet_sk_state_store(sk, TCP_FIN_WAIT2); 2617 } 2618 } 2619 2620 mptcp_flush_join_list(msk); 2621 mptcp_for_each_subflow(msk, subflow) { 2622 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); 2623 2624 mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN); 2625 } 2626 } 2627 2628 static void __mptcp_wr_shutdown(struct sock *sk) 2629 { 2630 struct mptcp_sock *msk = mptcp_sk(sk); 2631 2632 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d", 2633 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, 2634 !!mptcp_send_head(sk)); 2635 2636 /* will be ignored by fallback sockets */ 2637 WRITE_ONCE(msk->write_seq, msk->write_seq + 1); 2638 WRITE_ONCE(msk->snd_data_fin_enable, 1); 2639 2640 __mptcp_check_send_data_fin(sk); 2641 } 2642 2643 static void __mptcp_destroy_sock(struct sock *sk) 2644 { 2645 struct mptcp_subflow_context *subflow, *tmp; 2646 struct mptcp_sock *msk = mptcp_sk(sk); 2647 LIST_HEAD(conn_list); 2648 2649 pr_debug("msk=%p", msk); 2650 2651 might_sleep(); 2652 2653 /* be sure to always acquire the join list lock, to sync vs 2654 * mptcp_finish_join(). 2655 */ 2656 spin_lock_bh(&msk->join_list_lock); 2657 list_splice_tail_init(&msk->join_list, &msk->conn_list); 2658 spin_unlock_bh(&msk->join_list_lock); 2659 list_splice_init(&msk->conn_list, &conn_list); 2660 2661 sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer); 2662 sk_stop_timer(sk, &sk->sk_timer); 2663 msk->pm.status = 0; 2664 2665 list_for_each_entry_safe(subflow, tmp, &conn_list, node) { 2666 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2667 __mptcp_close_ssk(sk, ssk, subflow); 2668 } 2669 2670 sk->sk_prot->destroy(sk); 2671 2672 WARN_ON_ONCE(msk->rmem_fwd_alloc); 2673 WARN_ON_ONCE(msk->rmem_released); 2674 sk_stream_kill_queues(sk); 2675 xfrm_sk_free_policy(sk); 2676 2677 sk_refcnt_debug_release(sk); 2678 mptcp_dispose_initial_subflow(msk); 2679 sock_put(sk); 2680 } 2681 2682 static void mptcp_close(struct sock *sk, long timeout) 2683 { 2684 struct mptcp_subflow_context *subflow; 2685 bool do_cancel_work = false; 2686 2687 lock_sock(sk); 2688 sk->sk_shutdown = SHUTDOWN_MASK; 2689 2690 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { 2691 inet_sk_state_store(sk, TCP_CLOSE); 2692 goto cleanup; 2693 } 2694 2695 if (mptcp_close_state(sk)) 2696 __mptcp_wr_shutdown(sk); 2697 2698 sk_stream_wait_close(sk, timeout); 2699 2700 cleanup: 2701 /* orphan all the subflows */ 2702 inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32; 2703 mptcp_for_each_subflow(mptcp_sk(sk), subflow) { 2704 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2705 bool slow = lock_sock_fast_nested(ssk); 2706 2707 sock_orphan(ssk); 2708 unlock_sock_fast(ssk, slow); 2709 } 2710 sock_orphan(sk); 2711 2712 sock_hold(sk); 2713 pr_debug("msk=%p state=%d", sk, sk->sk_state); 2714 if (sk->sk_state == TCP_CLOSE) { 2715 __mptcp_destroy_sock(sk); 2716 do_cancel_work = true; 2717 } else { 2718 sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN); 2719 } 2720 release_sock(sk); 2721 if (do_cancel_work) 2722 mptcp_cancel_work(sk); 2723 2724 if (mptcp_sk(sk)->token) 2725 mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL); 2726 2727 sock_put(sk); 2728 } 2729 2730 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) 2731 { 2732 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2733 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); 2734 struct ipv6_pinfo *msk6 = inet6_sk(msk); 2735 2736 msk->sk_v6_daddr = ssk->sk_v6_daddr; 2737 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; 2738 2739 if (msk6 && ssk6) { 2740 msk6->saddr = ssk6->saddr; 2741 msk6->flow_label = ssk6->flow_label; 2742 } 2743 #endif 2744 2745 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; 2746 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; 2747 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; 2748 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; 2749 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; 2750 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; 2751 } 2752 2753 static int mptcp_disconnect(struct sock *sk, int flags) 2754 { 2755 struct mptcp_subflow_context *subflow; 2756 struct mptcp_sock *msk = mptcp_sk(sk); 2757 2758 mptcp_do_flush_join_list(msk); 2759 2760 mptcp_for_each_subflow(msk, subflow) { 2761 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2762 2763 lock_sock(ssk); 2764 tcp_disconnect(ssk, flags); 2765 release_sock(ssk); 2766 } 2767 return 0; 2768 } 2769 2770 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2771 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) 2772 { 2773 unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); 2774 2775 return (struct ipv6_pinfo *)(((u8 *)sk) + offset); 2776 } 2777 #endif 2778 2779 struct sock *mptcp_sk_clone(const struct sock *sk, 2780 const struct mptcp_options_received *mp_opt, 2781 struct request_sock *req) 2782 { 2783 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 2784 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); 2785 struct mptcp_sock *msk; 2786 u64 ack_seq; 2787 2788 if (!nsk) 2789 return NULL; 2790 2791 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2792 if (nsk->sk_family == AF_INET6) 2793 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); 2794 #endif 2795 2796 __mptcp_init_sock(nsk); 2797 2798 msk = mptcp_sk(nsk); 2799 msk->local_key = subflow_req->local_key; 2800 msk->token = subflow_req->token; 2801 msk->subflow = NULL; 2802 WRITE_ONCE(msk->fully_established, false); 2803 if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) 2804 WRITE_ONCE(msk->csum_enabled, true); 2805 2806 msk->write_seq = subflow_req->idsn + 1; 2807 msk->snd_nxt = msk->write_seq; 2808 msk->snd_una = msk->write_seq; 2809 msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd; 2810 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; 2811 2812 if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) { 2813 msk->can_ack = true; 2814 msk->remote_key = mp_opt->sndr_key; 2815 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); 2816 ack_seq++; 2817 WRITE_ONCE(msk->ack_seq, ack_seq); 2818 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); 2819 } 2820 2821 sock_reset_flag(nsk, SOCK_RCU_FREE); 2822 /* will be fully established after successful MPC subflow creation */ 2823 inet_sk_state_store(nsk, TCP_SYN_RECV); 2824 2825 security_inet_csk_clone(nsk, req); 2826 bh_unlock_sock(nsk); 2827 2828 /* keep a single reference */ 2829 __sock_put(nsk); 2830 return nsk; 2831 } 2832 2833 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) 2834 { 2835 const struct tcp_sock *tp = tcp_sk(ssk); 2836 2837 msk->rcvq_space.copied = 0; 2838 msk->rcvq_space.rtt_us = 0; 2839 2840 msk->rcvq_space.time = tp->tcp_mstamp; 2841 2842 /* initial rcv_space offering made to peer */ 2843 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, 2844 TCP_INIT_CWND * tp->advmss); 2845 if (msk->rcvq_space.space == 0) 2846 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; 2847 2848 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); 2849 } 2850 2851 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, 2852 bool kern) 2853 { 2854 struct mptcp_sock *msk = mptcp_sk(sk); 2855 struct socket *listener; 2856 struct sock *newsk; 2857 2858 listener = __mptcp_nmpc_socket(msk); 2859 if (WARN_ON_ONCE(!listener)) { 2860 *err = -EINVAL; 2861 return NULL; 2862 } 2863 2864 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk)); 2865 newsk = inet_csk_accept(listener->sk, flags, err, kern); 2866 if (!newsk) 2867 return NULL; 2868 2869 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk)); 2870 if (sk_is_mptcp(newsk)) { 2871 struct mptcp_subflow_context *subflow; 2872 struct sock *new_mptcp_sock; 2873 2874 subflow = mptcp_subflow_ctx(newsk); 2875 new_mptcp_sock = subflow->conn; 2876 2877 /* is_mptcp should be false if subflow->conn is missing, see 2878 * subflow_syn_recv_sock() 2879 */ 2880 if (WARN_ON_ONCE(!new_mptcp_sock)) { 2881 tcp_sk(newsk)->is_mptcp = 0; 2882 return newsk; 2883 } 2884 2885 /* acquire the 2nd reference for the owning socket */ 2886 sock_hold(new_mptcp_sock); 2887 newsk = new_mptcp_sock; 2888 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); 2889 } else { 2890 MPTCP_INC_STATS(sock_net(sk), 2891 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); 2892 } 2893 2894 return newsk; 2895 } 2896 2897 void mptcp_destroy_common(struct mptcp_sock *msk) 2898 { 2899 struct sock *sk = (struct sock *)msk; 2900 2901 __mptcp_clear_xmit(sk); 2902 2903 /* move to sk_receive_queue, sk_stream_kill_queues will purge it */ 2904 skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue); 2905 __skb_queue_purge(&sk->sk_receive_queue); 2906 skb_rbtree_purge(&msk->out_of_order_queue); 2907 2908 /* move all the rx fwd alloc into the sk_mem_reclaim_final in 2909 * inet_sock_destruct() will dispose it 2910 */ 2911 sk->sk_forward_alloc += msk->rmem_fwd_alloc; 2912 msk->rmem_fwd_alloc = 0; 2913 mptcp_token_destroy(msk); 2914 mptcp_pm_free_anno_list(msk); 2915 } 2916 2917 static void mptcp_destroy(struct sock *sk) 2918 { 2919 struct mptcp_sock *msk = mptcp_sk(sk); 2920 2921 mptcp_destroy_common(msk); 2922 sk_sockets_allocated_dec(sk); 2923 } 2924 2925 void __mptcp_data_acked(struct sock *sk) 2926 { 2927 if (!sock_owned_by_user(sk)) 2928 __mptcp_clean_una(sk); 2929 else 2930 set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags); 2931 2932 if (mptcp_pending_data_fin_ack(sk)) 2933 mptcp_schedule_work(sk); 2934 } 2935 2936 void __mptcp_check_push(struct sock *sk, struct sock *ssk) 2937 { 2938 if (!mptcp_send_head(sk)) 2939 return; 2940 2941 if (!sock_owned_by_user(sk)) { 2942 struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk)); 2943 2944 if (xmit_ssk == ssk) 2945 __mptcp_subflow_push_pending(sk, ssk); 2946 else if (xmit_ssk) 2947 mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND); 2948 } else { 2949 set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); 2950 } 2951 } 2952 2953 /* processes deferred events and flush wmem */ 2954 static void mptcp_release_cb(struct sock *sk) 2955 { 2956 for (;;) { 2957 unsigned long flags = 0; 2958 2959 if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) 2960 flags |= BIT(MPTCP_PUSH_PENDING); 2961 if (test_and_clear_bit(MPTCP_RETRANSMIT, &mptcp_sk(sk)->flags)) 2962 flags |= BIT(MPTCP_RETRANSMIT); 2963 if (!flags) 2964 break; 2965 2966 /* the following actions acquire the subflow socket lock 2967 * 2968 * 1) can't be invoked in atomic scope 2969 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX 2970 * datapath acquires the msk socket spinlock while helding 2971 * the subflow socket lock 2972 */ 2973 2974 spin_unlock_bh(&sk->sk_lock.slock); 2975 if (flags & BIT(MPTCP_PUSH_PENDING)) 2976 __mptcp_push_pending(sk, 0); 2977 if (flags & BIT(MPTCP_RETRANSMIT)) 2978 __mptcp_retrans(sk); 2979 2980 cond_resched(); 2981 spin_lock_bh(&sk->sk_lock.slock); 2982 } 2983 2984 /* be sure to set the current sk state before tacking actions 2985 * depending on sk_state 2986 */ 2987 if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags)) 2988 __mptcp_set_connected(sk); 2989 if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags)) 2990 __mptcp_clean_una_wakeup(sk); 2991 if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags)) 2992 __mptcp_error_report(sk); 2993 2994 __mptcp_update_rmem(sk); 2995 } 2996 2997 /* MP_JOIN client subflow must wait for 4th ack before sending any data: 2998 * TCP can't schedule delack timer before the subflow is fully established. 2999 * MPTCP uses the delack timer to do 3rd ack retransmissions 3000 */ 3001 static void schedule_3rdack_retransmission(struct sock *ssk) 3002 { 3003 struct inet_connection_sock *icsk = inet_csk(ssk); 3004 struct tcp_sock *tp = tcp_sk(ssk); 3005 unsigned long timeout; 3006 3007 if (mptcp_subflow_ctx(ssk)->fully_established) 3008 return; 3009 3010 /* reschedule with a timeout above RTT, as we must look only for drop */ 3011 if (tp->srtt_us) 3012 timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1)); 3013 else 3014 timeout = TCP_TIMEOUT_INIT; 3015 timeout += jiffies; 3016 3017 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); 3018 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 3019 icsk->icsk_ack.timeout = timeout; 3020 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); 3021 } 3022 3023 void mptcp_subflow_process_delegated(struct sock *ssk) 3024 { 3025 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 3026 struct sock *sk = subflow->conn; 3027 3028 if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) { 3029 mptcp_data_lock(sk); 3030 if (!sock_owned_by_user(sk)) 3031 __mptcp_subflow_push_pending(sk, ssk); 3032 else 3033 set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); 3034 mptcp_data_unlock(sk); 3035 mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND); 3036 } 3037 if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) { 3038 schedule_3rdack_retransmission(ssk); 3039 mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK); 3040 } 3041 } 3042 3043 static int mptcp_hash(struct sock *sk) 3044 { 3045 /* should never be called, 3046 * we hash the TCP subflows not the master socket 3047 */ 3048 WARN_ON_ONCE(1); 3049 return 0; 3050 } 3051 3052 static void mptcp_unhash(struct sock *sk) 3053 { 3054 /* called from sk_common_release(), but nothing to do here */ 3055 } 3056 3057 static int mptcp_get_port(struct sock *sk, unsigned short snum) 3058 { 3059 struct mptcp_sock *msk = mptcp_sk(sk); 3060 struct socket *ssock; 3061 3062 ssock = __mptcp_nmpc_socket(msk); 3063 pr_debug("msk=%p, subflow=%p", msk, ssock); 3064 if (WARN_ON_ONCE(!ssock)) 3065 return -EINVAL; 3066 3067 return inet_csk_get_port(ssock->sk, snum); 3068 } 3069 3070 void mptcp_finish_connect(struct sock *ssk) 3071 { 3072 struct mptcp_subflow_context *subflow; 3073 struct mptcp_sock *msk; 3074 struct sock *sk; 3075 u64 ack_seq; 3076 3077 subflow = mptcp_subflow_ctx(ssk); 3078 sk = subflow->conn; 3079 msk = mptcp_sk(sk); 3080 3081 pr_debug("msk=%p, token=%u", sk, subflow->token); 3082 3083 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); 3084 ack_seq++; 3085 subflow->map_seq = ack_seq; 3086 subflow->map_subflow_seq = 1; 3087 3088 /* the socket is not connected yet, no msk/subflow ops can access/race 3089 * accessing the field below 3090 */ 3091 WRITE_ONCE(msk->remote_key, subflow->remote_key); 3092 WRITE_ONCE(msk->local_key, subflow->local_key); 3093 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 3094 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 3095 WRITE_ONCE(msk->ack_seq, ack_seq); 3096 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); 3097 WRITE_ONCE(msk->can_ack, 1); 3098 WRITE_ONCE(msk->snd_una, msk->write_seq); 3099 3100 mptcp_pm_new_connection(msk, ssk, 0); 3101 3102 mptcp_rcv_space_init(msk, ssk); 3103 } 3104 3105 void mptcp_sock_graft(struct sock *sk, struct socket *parent) 3106 { 3107 write_lock_bh(&sk->sk_callback_lock); 3108 rcu_assign_pointer(sk->sk_wq, &parent->wq); 3109 sk_set_socket(sk, parent); 3110 sk->sk_uid = SOCK_INODE(parent)->i_uid; 3111 write_unlock_bh(&sk->sk_callback_lock); 3112 } 3113 3114 bool mptcp_finish_join(struct sock *ssk) 3115 { 3116 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 3117 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 3118 struct sock *parent = (void *)msk; 3119 struct socket *parent_sock; 3120 bool ret; 3121 3122 pr_debug("msk=%p, subflow=%p", msk, subflow); 3123 3124 /* mptcp socket already closing? */ 3125 if (!mptcp_is_fully_established(parent)) { 3126 subflow->reset_reason = MPTCP_RST_EMPTCP; 3127 return false; 3128 } 3129 3130 if (!msk->pm.server_side) 3131 goto out; 3132 3133 if (!mptcp_pm_allow_new_subflow(msk)) { 3134 subflow->reset_reason = MPTCP_RST_EPROHIBIT; 3135 return false; 3136 } 3137 3138 /* active connections are already on conn_list, and we can't acquire 3139 * msk lock here. 3140 * use the join list lock as synchronization point and double-check 3141 * msk status to avoid racing with __mptcp_destroy_sock() 3142 */ 3143 spin_lock_bh(&msk->join_list_lock); 3144 ret = inet_sk_state_load(parent) == TCP_ESTABLISHED; 3145 if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) { 3146 list_add_tail(&subflow->node, &msk->join_list); 3147 sock_hold(ssk); 3148 } 3149 spin_unlock_bh(&msk->join_list_lock); 3150 if (!ret) { 3151 subflow->reset_reason = MPTCP_RST_EPROHIBIT; 3152 return false; 3153 } 3154 3155 /* attach to msk socket only after we are sure he will deal with us 3156 * at close time 3157 */ 3158 parent_sock = READ_ONCE(parent->sk_socket); 3159 if (parent_sock && !ssk->sk_socket) 3160 mptcp_sock_graft(ssk, parent_sock); 3161 subflow->map_seq = READ_ONCE(msk->ack_seq); 3162 out: 3163 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); 3164 return true; 3165 } 3166 3167 static void mptcp_shutdown(struct sock *sk, int how) 3168 { 3169 pr_debug("sk=%p, how=%d", sk, how); 3170 3171 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) 3172 __mptcp_wr_shutdown(sk); 3173 } 3174 3175 static int mptcp_forward_alloc_get(const struct sock *sk) 3176 { 3177 return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc; 3178 } 3179 3180 static struct proto mptcp_prot = { 3181 .name = "MPTCP", 3182 .owner = THIS_MODULE, 3183 .init = mptcp_init_sock, 3184 .disconnect = mptcp_disconnect, 3185 .close = mptcp_close, 3186 .accept = mptcp_accept, 3187 .setsockopt = mptcp_setsockopt, 3188 .getsockopt = mptcp_getsockopt, 3189 .shutdown = mptcp_shutdown, 3190 .destroy = mptcp_destroy, 3191 .sendmsg = mptcp_sendmsg, 3192 .recvmsg = mptcp_recvmsg, 3193 .release_cb = mptcp_release_cb, 3194 .hash = mptcp_hash, 3195 .unhash = mptcp_unhash, 3196 .get_port = mptcp_get_port, 3197 .forward_alloc_get = mptcp_forward_alloc_get, 3198 .sockets_allocated = &mptcp_sockets_allocated, 3199 .memory_allocated = &tcp_memory_allocated, 3200 .memory_pressure = &tcp_memory_pressure, 3201 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), 3202 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), 3203 .sysctl_mem = sysctl_tcp_mem, 3204 .obj_size = sizeof(struct mptcp_sock), 3205 .slab_flags = SLAB_TYPESAFE_BY_RCU, 3206 .no_autobind = true, 3207 }; 3208 3209 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3210 { 3211 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3212 struct socket *ssock; 3213 int err; 3214 3215 lock_sock(sock->sk); 3216 ssock = __mptcp_nmpc_socket(msk); 3217 if (!ssock) { 3218 err = -EINVAL; 3219 goto unlock; 3220 } 3221 3222 err = ssock->ops->bind(ssock, uaddr, addr_len); 3223 if (!err) 3224 mptcp_copy_inaddrs(sock->sk, ssock->sk); 3225 3226 unlock: 3227 release_sock(sock->sk); 3228 return err; 3229 } 3230 3231 static void mptcp_subflow_early_fallback(struct mptcp_sock *msk, 3232 struct mptcp_subflow_context *subflow) 3233 { 3234 subflow->request_mptcp = 0; 3235 __mptcp_do_fallback(msk); 3236 } 3237 3238 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, 3239 int addr_len, int flags) 3240 { 3241 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3242 struct mptcp_subflow_context *subflow; 3243 struct socket *ssock; 3244 int err; 3245 3246 lock_sock(sock->sk); 3247 if (sock->state != SS_UNCONNECTED && msk->subflow) { 3248 /* pending connection or invalid state, let existing subflow 3249 * cope with that 3250 */ 3251 ssock = msk->subflow; 3252 goto do_connect; 3253 } 3254 3255 ssock = __mptcp_nmpc_socket(msk); 3256 if (!ssock) { 3257 err = -EINVAL; 3258 goto unlock; 3259 } 3260 3261 mptcp_token_destroy(msk); 3262 inet_sk_state_store(sock->sk, TCP_SYN_SENT); 3263 subflow = mptcp_subflow_ctx(ssock->sk); 3264 #ifdef CONFIG_TCP_MD5SIG 3265 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 3266 * TCP option space. 3267 */ 3268 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) 3269 mptcp_subflow_early_fallback(msk, subflow); 3270 #endif 3271 if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) { 3272 MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT); 3273 mptcp_subflow_early_fallback(msk, subflow); 3274 } 3275 if (likely(!__mptcp_check_fallback(msk))) 3276 MPTCP_INC_STATS(sock_net(sock->sk), MPTCP_MIB_MPCAPABLEACTIVE); 3277 3278 do_connect: 3279 err = ssock->ops->connect(ssock, uaddr, addr_len, flags); 3280 sock->state = ssock->state; 3281 3282 /* on successful connect, the msk state will be moved to established by 3283 * subflow_finish_connect() 3284 */ 3285 if (!err || err == -EINPROGRESS) 3286 mptcp_copy_inaddrs(sock->sk, ssock->sk); 3287 else 3288 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 3289 3290 unlock: 3291 release_sock(sock->sk); 3292 return err; 3293 } 3294 3295 static int mptcp_listen(struct socket *sock, int backlog) 3296 { 3297 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3298 struct socket *ssock; 3299 int err; 3300 3301 pr_debug("msk=%p", msk); 3302 3303 lock_sock(sock->sk); 3304 ssock = __mptcp_nmpc_socket(msk); 3305 if (!ssock) { 3306 err = -EINVAL; 3307 goto unlock; 3308 } 3309 3310 mptcp_token_destroy(msk); 3311 inet_sk_state_store(sock->sk, TCP_LISTEN); 3312 sock_set_flag(sock->sk, SOCK_RCU_FREE); 3313 3314 err = ssock->ops->listen(ssock, backlog); 3315 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 3316 if (!err) 3317 mptcp_copy_inaddrs(sock->sk, ssock->sk); 3318 3319 unlock: 3320 release_sock(sock->sk); 3321 return err; 3322 } 3323 3324 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, 3325 int flags, bool kern) 3326 { 3327 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3328 struct socket *ssock; 3329 int err; 3330 3331 pr_debug("msk=%p", msk); 3332 3333 lock_sock(sock->sk); 3334 if (sock->sk->sk_state != TCP_LISTEN) 3335 goto unlock_fail; 3336 3337 ssock = __mptcp_nmpc_socket(msk); 3338 if (!ssock) 3339 goto unlock_fail; 3340 3341 clear_bit(MPTCP_DATA_READY, &msk->flags); 3342 sock_hold(ssock->sk); 3343 release_sock(sock->sk); 3344 3345 err = ssock->ops->accept(sock, newsock, flags, kern); 3346 if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) { 3347 struct mptcp_sock *msk = mptcp_sk(newsock->sk); 3348 struct mptcp_subflow_context *subflow; 3349 struct sock *newsk = newsock->sk; 3350 3351 lock_sock(newsk); 3352 3353 /* PM/worker can now acquire the first subflow socket 3354 * lock without racing with listener queue cleanup, 3355 * we can notify it, if needed. 3356 * 3357 * Even if remote has reset the initial subflow by now 3358 * the refcnt is still at least one. 3359 */ 3360 subflow = mptcp_subflow_ctx(msk->first); 3361 list_add(&subflow->node, &msk->conn_list); 3362 sock_hold(msk->first); 3363 if (mptcp_is_fully_established(newsk)) 3364 mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL); 3365 3366 mptcp_copy_inaddrs(newsk, msk->first); 3367 mptcp_rcv_space_init(msk, msk->first); 3368 mptcp_propagate_sndbuf(newsk, msk->first); 3369 3370 /* set ssk->sk_socket of accept()ed flows to mptcp socket. 3371 * This is needed so NOSPACE flag can be set from tcp stack. 3372 */ 3373 mptcp_flush_join_list(msk); 3374 mptcp_for_each_subflow(msk, subflow) { 3375 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 3376 3377 if (!ssk->sk_socket) 3378 mptcp_sock_graft(ssk, newsock); 3379 } 3380 release_sock(newsk); 3381 } 3382 3383 if (inet_csk_listen_poll(ssock->sk)) 3384 set_bit(MPTCP_DATA_READY, &msk->flags); 3385 sock_put(ssock->sk); 3386 return err; 3387 3388 unlock_fail: 3389 release_sock(sock->sk); 3390 return -EINVAL; 3391 } 3392 3393 static __poll_t mptcp_check_readable(struct mptcp_sock *msk) 3394 { 3395 /* Concurrent splices from sk_receive_queue into receive_queue will 3396 * always show at least one non-empty queue when checked in this order. 3397 */ 3398 if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) && 3399 skb_queue_empty_lockless(&msk->receive_queue)) 3400 return 0; 3401 3402 return EPOLLIN | EPOLLRDNORM; 3403 } 3404 3405 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) 3406 { 3407 struct sock *sk = (struct sock *)msk; 3408 3409 if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) 3410 return EPOLLOUT | EPOLLWRNORM; 3411 3412 if (sk_stream_is_writeable(sk)) 3413 return EPOLLOUT | EPOLLWRNORM; 3414 3415 mptcp_set_nospace(sk); 3416 smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */ 3417 if (sk_stream_is_writeable(sk)) 3418 return EPOLLOUT | EPOLLWRNORM; 3419 3420 return 0; 3421 } 3422 3423 static __poll_t mptcp_poll(struct file *file, struct socket *sock, 3424 struct poll_table_struct *wait) 3425 { 3426 struct sock *sk = sock->sk; 3427 struct mptcp_sock *msk; 3428 __poll_t mask = 0; 3429 int state; 3430 3431 msk = mptcp_sk(sk); 3432 sock_poll_wait(file, sock, wait); 3433 3434 state = inet_sk_state_load(sk); 3435 pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); 3436 if (state == TCP_LISTEN) 3437 return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0; 3438 3439 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { 3440 mask |= mptcp_check_readable(msk); 3441 mask |= mptcp_check_writeable(msk); 3442 } 3443 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 3444 mask |= EPOLLHUP; 3445 if (sk->sk_shutdown & RCV_SHUTDOWN) 3446 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 3447 3448 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 3449 smp_rmb(); 3450 if (sk->sk_err) 3451 mask |= EPOLLERR; 3452 3453 return mask; 3454 } 3455 3456 static const struct proto_ops mptcp_stream_ops = { 3457 .family = PF_INET, 3458 .owner = THIS_MODULE, 3459 .release = inet_release, 3460 .bind = mptcp_bind, 3461 .connect = mptcp_stream_connect, 3462 .socketpair = sock_no_socketpair, 3463 .accept = mptcp_stream_accept, 3464 .getname = inet_getname, 3465 .poll = mptcp_poll, 3466 .ioctl = inet_ioctl, 3467 .gettstamp = sock_gettstamp, 3468 .listen = mptcp_listen, 3469 .shutdown = inet_shutdown, 3470 .setsockopt = sock_common_setsockopt, 3471 .getsockopt = sock_common_getsockopt, 3472 .sendmsg = inet_sendmsg, 3473 .recvmsg = inet_recvmsg, 3474 .mmap = sock_no_mmap, 3475 .sendpage = inet_sendpage, 3476 }; 3477 3478 static struct inet_protosw mptcp_protosw = { 3479 .type = SOCK_STREAM, 3480 .protocol = IPPROTO_MPTCP, 3481 .prot = &mptcp_prot, 3482 .ops = &mptcp_stream_ops, 3483 .flags = INET_PROTOSW_ICSK, 3484 }; 3485 3486 static int mptcp_napi_poll(struct napi_struct *napi, int budget) 3487 { 3488 struct mptcp_delegated_action *delegated; 3489 struct mptcp_subflow_context *subflow; 3490 int work_done = 0; 3491 3492 delegated = container_of(napi, struct mptcp_delegated_action, napi); 3493 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { 3494 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 3495 3496 bh_lock_sock_nested(ssk); 3497 if (!sock_owned_by_user(ssk) && 3498 mptcp_subflow_has_delegated_action(subflow)) 3499 mptcp_subflow_process_delegated(ssk); 3500 /* ... elsewhere tcp_release_cb_override already processed 3501 * the action or will do at next release_sock(). 3502 * In both case must dequeue the subflow here - on the same 3503 * CPU that scheduled it. 3504 */ 3505 bh_unlock_sock(ssk); 3506 sock_put(ssk); 3507 3508 if (++work_done == budget) 3509 return budget; 3510 } 3511 3512 /* always provide a 0 'work_done' argument, so that napi_complete_done 3513 * will not try accessing the NULL napi->dev ptr 3514 */ 3515 napi_complete_done(napi, 0); 3516 return work_done; 3517 } 3518 3519 void __init mptcp_proto_init(void) 3520 { 3521 struct mptcp_delegated_action *delegated; 3522 int cpu; 3523 3524 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; 3525 3526 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) 3527 panic("Failed to allocate MPTCP pcpu counter\n"); 3528 3529 init_dummy_netdev(&mptcp_napi_dev); 3530 for_each_possible_cpu(cpu) { 3531 delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu); 3532 INIT_LIST_HEAD(&delegated->head); 3533 netif_tx_napi_add(&mptcp_napi_dev, &delegated->napi, mptcp_napi_poll, 3534 NAPI_POLL_WEIGHT); 3535 napi_enable(&delegated->napi); 3536 } 3537 3538 mptcp_subflow_init(); 3539 mptcp_pm_init(); 3540 mptcp_token_init(); 3541 3542 if (proto_register(&mptcp_prot, 1) != 0) 3543 panic("Failed to register MPTCP proto.\n"); 3544 3545 inet_register_protosw(&mptcp_protosw); 3546 3547 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); 3548 } 3549 3550 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3551 static const struct proto_ops mptcp_v6_stream_ops = { 3552 .family = PF_INET6, 3553 .owner = THIS_MODULE, 3554 .release = inet6_release, 3555 .bind = mptcp_bind, 3556 .connect = mptcp_stream_connect, 3557 .socketpair = sock_no_socketpair, 3558 .accept = mptcp_stream_accept, 3559 .getname = inet6_getname, 3560 .poll = mptcp_poll, 3561 .ioctl = inet6_ioctl, 3562 .gettstamp = sock_gettstamp, 3563 .listen = mptcp_listen, 3564 .shutdown = inet_shutdown, 3565 .setsockopt = sock_common_setsockopt, 3566 .getsockopt = sock_common_getsockopt, 3567 .sendmsg = inet6_sendmsg, 3568 .recvmsg = inet6_recvmsg, 3569 .mmap = sock_no_mmap, 3570 .sendpage = inet_sendpage, 3571 #ifdef CONFIG_COMPAT 3572 .compat_ioctl = inet6_compat_ioctl, 3573 #endif 3574 }; 3575 3576 static struct proto mptcp_v6_prot; 3577 3578 static void mptcp_v6_destroy(struct sock *sk) 3579 { 3580 mptcp_destroy(sk); 3581 inet6_destroy_sock(sk); 3582 } 3583 3584 static struct inet_protosw mptcp_v6_protosw = { 3585 .type = SOCK_STREAM, 3586 .protocol = IPPROTO_MPTCP, 3587 .prot = &mptcp_v6_prot, 3588 .ops = &mptcp_v6_stream_ops, 3589 .flags = INET_PROTOSW_ICSK, 3590 }; 3591 3592 int __init mptcp_proto_v6_init(void) 3593 { 3594 int err; 3595 3596 mptcp_v6_prot = mptcp_prot; 3597 strcpy(mptcp_v6_prot.name, "MPTCPv6"); 3598 mptcp_v6_prot.slab = NULL; 3599 mptcp_v6_prot.destroy = mptcp_v6_destroy; 3600 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); 3601 3602 err = proto_register(&mptcp_v6_prot, 1); 3603 if (err) 3604 return err; 3605 3606 err = inet6_register_protosw(&mptcp_v6_protosw); 3607 if (err) 3608 proto_unregister(&mptcp_v6_prot); 3609 3610 return err; 3611 } 3612 #endif 3613