1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/algapi.h> 13 #include <crypto/sha2.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #include <net/tcp.h> 19 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 20 #include <net/ip6_route.h> 21 #include <net/transp_v6.h> 22 #endif 23 #include <net/mptcp.h> 24 #include <uapi/linux/mptcp.h> 25 #include "protocol.h" 26 #include "mib.h" 27 28 #include <trace/events/mptcp.h> 29 #include <trace/events/sock.h> 30 31 static void mptcp_subflow_ops_undo_override(struct sock *ssk); 32 33 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, 34 enum linux_mptcp_mib_field field) 35 { 36 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); 37 } 38 39 static void subflow_req_destructor(struct request_sock *req) 40 { 41 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 42 43 pr_debug("subflow_req=%p", subflow_req); 44 45 if (subflow_req->msk) 46 sock_put((struct sock *)subflow_req->msk); 47 48 mptcp_token_destroy_request(req); 49 } 50 51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, 52 void *hmac) 53 { 54 u8 msg[8]; 55 56 put_unaligned_be32(nonce1, &msg[0]); 57 put_unaligned_be32(nonce2, &msg[4]); 58 59 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 60 } 61 62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk) 63 { 64 return mptcp_is_fully_established((void *)msk) && 65 ((mptcp_pm_is_userspace(msk) && 66 mptcp_userspace_pm_active(msk)) || 67 READ_ONCE(msk->pm.accept_subflow)); 68 } 69 70 /* validate received token and create truncated hmac and nonce for SYN-ACK */ 71 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req) 72 { 73 struct mptcp_sock *msk = subflow_req->msk; 74 u8 hmac[SHA256_DIGEST_SIZE]; 75 76 get_random_bytes(&subflow_req->local_nonce, sizeof(u32)); 77 78 subflow_generate_hmac(msk->local_key, msk->remote_key, 79 subflow_req->local_nonce, 80 subflow_req->remote_nonce, hmac); 81 82 subflow_req->thmac = get_unaligned_be64(hmac); 83 } 84 85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req) 86 { 87 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 88 struct mptcp_sock *msk; 89 int local_id; 90 91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token); 92 if (!msk) { 93 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); 94 return NULL; 95 } 96 97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); 98 if (local_id < 0) { 99 sock_put((struct sock *)msk); 100 return NULL; 101 } 102 subflow_req->local_id = local_id; 103 subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req); 104 105 return msk; 106 } 107 108 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener) 109 { 110 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 111 112 subflow_req->mp_capable = 0; 113 subflow_req->mp_join = 0; 114 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener)); 115 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener)); 116 subflow_req->msk = NULL; 117 mptcp_token_init_request(req); 118 } 119 120 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk) 121 { 122 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport; 123 } 124 125 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason) 126 { 127 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 128 129 if (mpext) { 130 memset(mpext, 0, sizeof(*mpext)); 131 mpext->reset_reason = reason; 132 } 133 } 134 135 /* Init mptcp request socket. 136 * 137 * Returns an error code if a JOIN has failed and a TCP reset 138 * should be sent. 139 */ 140 static int subflow_check_req(struct request_sock *req, 141 const struct sock *sk_listener, 142 struct sk_buff *skb) 143 { 144 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 145 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 146 struct mptcp_options_received mp_opt; 147 bool opt_mp_capable, opt_mp_join; 148 149 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); 150 151 #ifdef CONFIG_TCP_MD5SIG 152 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 153 * TCP option space. 154 */ 155 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) 156 return -EINVAL; 157 #endif 158 159 mptcp_get_options(skb, &mp_opt); 160 161 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN); 162 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN); 163 if (opt_mp_capable) { 164 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); 165 166 if (opt_mp_join) 167 return 0; 168 } else if (opt_mp_join) { 169 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 170 171 if (mp_opt.backup) 172 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX); 173 } 174 175 if (opt_mp_capable && listener->request_mptcp) { 176 int err, retries = MPTCP_TOKEN_MAX_RETRIES; 177 178 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 179 again: 180 do { 181 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); 182 } while (subflow_req->local_key == 0); 183 184 if (unlikely(req->syncookie)) { 185 mptcp_crypto_key_sha(subflow_req->local_key, 186 &subflow_req->token, 187 &subflow_req->idsn); 188 if (mptcp_token_exists(subflow_req->token)) { 189 if (retries-- > 0) 190 goto again; 191 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 192 } else { 193 subflow_req->mp_capable = 1; 194 } 195 return 0; 196 } 197 198 err = mptcp_token_new_request(req); 199 if (err == 0) 200 subflow_req->mp_capable = 1; 201 else if (retries-- > 0) 202 goto again; 203 else 204 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 205 206 } else if (opt_mp_join && listener->request_mptcp) { 207 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 208 subflow_req->mp_join = 1; 209 subflow_req->backup = mp_opt.backup; 210 subflow_req->remote_id = mp_opt.join_id; 211 subflow_req->token = mp_opt.token; 212 subflow_req->remote_nonce = mp_opt.nonce; 213 subflow_req->msk = subflow_token_join_request(req); 214 215 /* Can't fall back to TCP in this case. */ 216 if (!subflow_req->msk) { 217 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 218 return -EPERM; 219 } 220 221 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { 222 pr_debug("syn inet_sport=%d %d", 223 ntohs(inet_sk(sk_listener)->inet_sport), 224 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); 225 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { 226 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); 227 return -EPERM; 228 } 229 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX); 230 } 231 232 subflow_req_create_thmac(subflow_req); 233 234 if (unlikely(req->syncookie)) { 235 if (mptcp_can_accept_new_subflow(subflow_req->msk)) 236 subflow_init_req_cookie_join_save(subflow_req, skb); 237 else 238 return -EPERM; 239 } 240 241 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, 242 subflow_req->remote_nonce, subflow_req->msk); 243 } 244 245 return 0; 246 } 247 248 int mptcp_subflow_init_cookie_req(struct request_sock *req, 249 const struct sock *sk_listener, 250 struct sk_buff *skb) 251 { 252 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 253 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 254 struct mptcp_options_received mp_opt; 255 bool opt_mp_capable, opt_mp_join; 256 int err; 257 258 subflow_init_req(req, sk_listener); 259 mptcp_get_options(skb, &mp_opt); 260 261 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK); 262 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK); 263 if (opt_mp_capable && opt_mp_join) 264 return -EINVAL; 265 266 if (opt_mp_capable && listener->request_mptcp) { 267 if (mp_opt.sndr_key == 0) 268 return -EINVAL; 269 270 subflow_req->local_key = mp_opt.rcvr_key; 271 err = mptcp_token_new_request(req); 272 if (err) 273 return err; 274 275 subflow_req->mp_capable = 1; 276 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 277 } else if (opt_mp_join && listener->request_mptcp) { 278 if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) 279 return -EINVAL; 280 281 subflow_req->mp_join = 1; 282 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 283 } 284 285 return 0; 286 } 287 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); 288 289 static struct dst_entry *subflow_v4_route_req(const struct sock *sk, 290 struct sk_buff *skb, 291 struct flowi *fl, 292 struct request_sock *req) 293 { 294 struct dst_entry *dst; 295 int err; 296 297 tcp_rsk(req)->is_mptcp = 1; 298 subflow_init_req(req, sk); 299 300 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req); 301 if (!dst) 302 return NULL; 303 304 err = subflow_check_req(req, sk, skb); 305 if (err == 0) 306 return dst; 307 308 dst_release(dst); 309 if (!req->syncookie) 310 tcp_request_sock_ops.send_reset(sk, skb); 311 return NULL; 312 } 313 314 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req, 315 struct tcp_fastopen_cookie *foc, 316 enum tcp_synack_type synack_type) 317 { 318 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 319 struct inet_request_sock *ireq = inet_rsk(req); 320 321 /* clear tstamp_ok, as needed depending on cookie */ 322 if (foc && foc->len > -1) 323 ireq->tstamp_ok = 0; 324 325 if (synack_type == TCP_SYNACK_FASTOPEN) 326 mptcp_fastopen_subflow_synack_set_params(subflow, req); 327 } 328 329 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst, 330 struct flowi *fl, 331 struct request_sock *req, 332 struct tcp_fastopen_cookie *foc, 333 enum tcp_synack_type synack_type, 334 struct sk_buff *syn_skb) 335 { 336 subflow_prep_synack(sk, req, foc, synack_type); 337 338 return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc, 339 synack_type, syn_skb); 340 } 341 342 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 343 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst, 344 struct flowi *fl, 345 struct request_sock *req, 346 struct tcp_fastopen_cookie *foc, 347 enum tcp_synack_type synack_type, 348 struct sk_buff *syn_skb) 349 { 350 subflow_prep_synack(sk, req, foc, synack_type); 351 352 return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc, 353 synack_type, syn_skb); 354 } 355 356 static struct dst_entry *subflow_v6_route_req(const struct sock *sk, 357 struct sk_buff *skb, 358 struct flowi *fl, 359 struct request_sock *req) 360 { 361 struct dst_entry *dst; 362 int err; 363 364 tcp_rsk(req)->is_mptcp = 1; 365 subflow_init_req(req, sk); 366 367 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req); 368 if (!dst) 369 return NULL; 370 371 err = subflow_check_req(req, sk, skb); 372 if (err == 0) 373 return dst; 374 375 dst_release(dst); 376 if (!req->syncookie) 377 tcp6_request_sock_ops.send_reset(sk, skb); 378 return NULL; 379 } 380 #endif 381 382 /* validate received truncated hmac and create hmac for third ACK */ 383 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 384 { 385 u8 hmac[SHA256_DIGEST_SIZE]; 386 u64 thmac; 387 388 subflow_generate_hmac(subflow->remote_key, subflow->local_key, 389 subflow->remote_nonce, subflow->local_nonce, 390 hmac); 391 392 thmac = get_unaligned_be64(hmac); 393 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", 394 subflow, subflow->token, thmac, subflow->thmac); 395 396 return thmac == subflow->thmac; 397 } 398 399 void mptcp_subflow_reset(struct sock *ssk) 400 { 401 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 402 struct sock *sk = subflow->conn; 403 404 /* mptcp_mp_fail_no_response() can reach here on an already closed 405 * socket 406 */ 407 if (ssk->sk_state == TCP_CLOSE) 408 return; 409 410 /* must hold: tcp_done() could drop last reference on parent */ 411 sock_hold(sk); 412 413 tcp_send_active_reset(ssk, GFP_ATOMIC); 414 tcp_done(ssk); 415 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags)) 416 mptcp_schedule_work(sk); 417 418 sock_put(sk); 419 } 420 421 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk) 422 { 423 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; 424 } 425 426 void __mptcp_sync_state(struct sock *sk, int state) 427 { 428 struct mptcp_subflow_context *subflow; 429 struct mptcp_sock *msk = mptcp_sk(sk); 430 struct sock *ssk = msk->first; 431 432 subflow = mptcp_subflow_ctx(ssk); 433 __mptcp_propagate_sndbuf(sk, ssk); 434 if (!msk->rcvspace_init) 435 mptcp_rcv_space_init(msk, ssk); 436 437 if (sk->sk_state == TCP_SYN_SENT) { 438 /* subflow->idsn is always available is TCP_SYN_SENT state, 439 * even for the FASTOPEN scenarios 440 */ 441 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 442 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 443 mptcp_set_state(sk, state); 444 sk->sk_state_change(sk); 445 } 446 } 447 448 static void subflow_set_remote_key(struct mptcp_sock *msk, 449 struct mptcp_subflow_context *subflow, 450 const struct mptcp_options_received *mp_opt) 451 { 452 /* active MPC subflow will reach here multiple times: 453 * at subflow_finish_connect() time and at 4th ack time 454 */ 455 if (subflow->remote_key_valid) 456 return; 457 458 subflow->remote_key_valid = 1; 459 subflow->remote_key = mp_opt->sndr_key; 460 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); 461 subflow->iasn++; 462 463 WRITE_ONCE(msk->remote_key, subflow->remote_key); 464 WRITE_ONCE(msk->ack_seq, subflow->iasn); 465 WRITE_ONCE(msk->can_ack, true); 466 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); 467 } 468 469 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk, 470 struct mptcp_subflow_context *subflow, 471 const struct mptcp_options_received *mp_opt) 472 { 473 struct mptcp_sock *msk = mptcp_sk(sk); 474 475 mptcp_data_lock(sk); 476 if (mp_opt) { 477 /* Options are available only in the non fallback cases 478 * avoid updating rx path fields otherwise 479 */ 480 WRITE_ONCE(msk->snd_una, subflow->idsn + 1); 481 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); 482 subflow_set_remote_key(msk, subflow, mp_opt); 483 } 484 485 if (!sock_owned_by_user(sk)) { 486 __mptcp_sync_state(sk, ssk->sk_state); 487 } else { 488 msk->pending_state = ssk->sk_state; 489 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags); 490 } 491 mptcp_data_unlock(sk); 492 } 493 494 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 495 { 496 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 497 struct mptcp_options_received mp_opt; 498 struct sock *parent = subflow->conn; 499 struct mptcp_sock *msk; 500 501 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 502 503 /* be sure no special action on any packet other than syn-ack */ 504 if (subflow->conn_finished) 505 return; 506 507 msk = mptcp_sk(parent); 508 subflow->rel_write_seq = 1; 509 subflow->conn_finished = 1; 510 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 511 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); 512 513 mptcp_get_options(skb, &mp_opt); 514 if (subflow->request_mptcp) { 515 if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) { 516 MPTCP_INC_STATS(sock_net(sk), 517 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); 518 mptcp_do_fallback(sk); 519 pr_fallback(msk); 520 goto fallback; 521 } 522 523 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD) 524 WRITE_ONCE(msk->csum_enabled, true); 525 if (mp_opt.deny_join_id0) 526 WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 527 subflow->mp_capable = 1; 528 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); 529 mptcp_finish_connect(sk); 530 mptcp_propagate_state(parent, sk, subflow, &mp_opt); 531 } else if (subflow->request_join) { 532 u8 hmac[SHA256_DIGEST_SIZE]; 533 534 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) { 535 subflow->reset_reason = MPTCP_RST_EMPTCP; 536 goto do_reset; 537 } 538 539 subflow->backup = mp_opt.backup; 540 subflow->thmac = mp_opt.thmac; 541 subflow->remote_nonce = mp_opt.nonce; 542 WRITE_ONCE(subflow->remote_id, mp_opt.join_id); 543 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d", 544 subflow, subflow->thmac, subflow->remote_nonce, 545 subflow->backup); 546 547 if (!subflow_thmac_valid(subflow)) { 548 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); 549 subflow->reset_reason = MPTCP_RST_EMPTCP; 550 goto do_reset; 551 } 552 553 if (!mptcp_finish_join(sk)) 554 goto do_reset; 555 556 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 557 subflow->local_nonce, 558 subflow->remote_nonce, 559 hmac); 560 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); 561 562 subflow->mp_join = 1; 563 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 564 565 if (subflow->backup) 566 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX); 567 568 if (subflow_use_different_dport(msk, sk)) { 569 pr_debug("synack inet_dport=%d %d", 570 ntohs(inet_sk(sk)->inet_dport), 571 ntohs(inet_sk(parent)->inet_dport)); 572 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX); 573 } 574 } else if (mptcp_check_fallback(sk)) { 575 fallback: 576 mptcp_propagate_state(parent, sk, subflow, NULL); 577 } 578 return; 579 580 do_reset: 581 subflow->reset_transient = 0; 582 mptcp_subflow_reset(sk); 583 } 584 585 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) 586 { 587 WARN_ON_ONCE(local_id < 0 || local_id > 255); 588 WRITE_ONCE(subflow->local_id, local_id); 589 } 590 591 static int subflow_chk_local_id(struct sock *sk) 592 { 593 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 594 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 595 int err; 596 597 if (likely(subflow->local_id >= 0)) 598 return 0; 599 600 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk); 601 if (err < 0) 602 return err; 603 604 subflow_set_local_id(subflow, err); 605 subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk); 606 607 return 0; 608 } 609 610 static int subflow_rebuild_header(struct sock *sk) 611 { 612 int err = subflow_chk_local_id(sk); 613 614 if (unlikely(err < 0)) 615 return err; 616 617 return inet_sk_rebuild_header(sk); 618 } 619 620 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 621 static int subflow_v6_rebuild_header(struct sock *sk) 622 { 623 int err = subflow_chk_local_id(sk); 624 625 if (unlikely(err < 0)) 626 return err; 627 628 return inet6_sk_rebuild_header(sk); 629 } 630 #endif 631 632 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; 633 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; 634 635 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) 636 { 637 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 638 639 pr_debug("subflow=%p", subflow); 640 641 /* Never answer to SYNs sent to broadcast or multicast */ 642 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 643 goto drop; 644 645 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, 646 &subflow_request_sock_ipv4_ops, 647 sk, skb); 648 drop: 649 tcp_listendrop(sk); 650 return 0; 651 } 652 653 static void subflow_v4_req_destructor(struct request_sock *req) 654 { 655 subflow_req_destructor(req); 656 tcp_request_sock_ops.destructor(req); 657 } 658 659 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 660 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; 661 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; 662 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; 663 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; 664 static struct proto tcpv6_prot_override __ro_after_init; 665 666 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) 667 { 668 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 669 670 pr_debug("subflow=%p", subflow); 671 672 if (skb->protocol == htons(ETH_P_IP)) 673 return subflow_v4_conn_request(sk, skb); 674 675 if (!ipv6_unicast_destination(skb)) 676 goto drop; 677 678 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { 679 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); 680 return 0; 681 } 682 683 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, 684 &subflow_request_sock_ipv6_ops, sk, skb); 685 686 drop: 687 tcp_listendrop(sk); 688 return 0; /* don't send reset */ 689 } 690 691 static void subflow_v6_req_destructor(struct request_sock *req) 692 { 693 subflow_req_destructor(req); 694 tcp6_request_sock_ops.destructor(req); 695 } 696 #endif 697 698 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, 699 struct sock *sk_listener, 700 bool attach_listener) 701 { 702 if (ops->family == AF_INET) 703 ops = &mptcp_subflow_v4_request_sock_ops; 704 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 705 else if (ops->family == AF_INET6) 706 ops = &mptcp_subflow_v6_request_sock_ops; 707 #endif 708 709 return inet_reqsk_alloc(ops, sk_listener, attach_listener); 710 } 711 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); 712 713 /* validate hmac received in third ACK */ 714 static bool subflow_hmac_valid(const struct request_sock *req, 715 const struct mptcp_options_received *mp_opt) 716 { 717 const struct mptcp_subflow_request_sock *subflow_req; 718 u8 hmac[SHA256_DIGEST_SIZE]; 719 struct mptcp_sock *msk; 720 721 subflow_req = mptcp_subflow_rsk(req); 722 msk = subflow_req->msk; 723 if (!msk) 724 return false; 725 726 subflow_generate_hmac(msk->remote_key, msk->local_key, 727 subflow_req->remote_nonce, 728 subflow_req->local_nonce, hmac); 729 730 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); 731 } 732 733 static void subflow_ulp_fallback(struct sock *sk, 734 struct mptcp_subflow_context *old_ctx) 735 { 736 struct inet_connection_sock *icsk = inet_csk(sk); 737 738 mptcp_subflow_tcp_fallback(sk, old_ctx); 739 icsk->icsk_ulp_ops = NULL; 740 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 741 tcp_sk(sk)->is_mptcp = 0; 742 743 mptcp_subflow_ops_undo_override(sk); 744 } 745 746 void mptcp_subflow_drop_ctx(struct sock *ssk) 747 { 748 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 749 750 if (!ctx) 751 return; 752 753 list_del(&mptcp_subflow_ctx(ssk)->node); 754 if (inet_csk(ssk)->icsk_ulp_ops) { 755 subflow_ulp_fallback(ssk, ctx); 756 if (ctx->conn) 757 sock_put(ctx->conn); 758 } 759 760 kfree_rcu(ctx, rcu); 761 } 762 763 void __mptcp_subflow_fully_established(struct mptcp_sock *msk, 764 struct mptcp_subflow_context *subflow, 765 const struct mptcp_options_received *mp_opt) 766 { 767 subflow_set_remote_key(msk, subflow, mp_opt); 768 subflow->fully_established = 1; 769 WRITE_ONCE(msk->fully_established, true); 770 771 if (subflow->is_mptfo) 772 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); 773 } 774 775 static struct sock *subflow_syn_recv_sock(const struct sock *sk, 776 struct sk_buff *skb, 777 struct request_sock *req, 778 struct dst_entry *dst, 779 struct request_sock *req_unhash, 780 bool *own_req) 781 { 782 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 783 struct mptcp_subflow_request_sock *subflow_req; 784 struct mptcp_options_received mp_opt; 785 bool fallback, fallback_is_fatal; 786 struct mptcp_sock *owner; 787 struct sock *child; 788 789 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); 790 791 /* After child creation we must look for MPC even when options 792 * are not parsed 793 */ 794 mp_opt.suboptions = 0; 795 796 /* hopefully temporary handling for MP_JOIN+syncookie */ 797 subflow_req = mptcp_subflow_rsk(req); 798 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join; 799 fallback = !tcp_rsk(req)->is_mptcp; 800 if (fallback) 801 goto create_child; 802 803 /* if the sk is MP_CAPABLE, we try to fetch the client key */ 804 if (subflow_req->mp_capable) { 805 /* we can receive and accept an in-window, out-of-order pkt, 806 * which may not carry the MP_CAPABLE opt even on mptcp enabled 807 * paths: always try to extract the peer key, and fallback 808 * for packets missing it. 809 * Even OoO DSS packets coming legitly after dropped or 810 * reordered MPC will cause fallback, but we don't have other 811 * options. 812 */ 813 mptcp_get_options(skb, &mp_opt); 814 if (!(mp_opt.suboptions & 815 (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK))) 816 fallback = true; 817 818 } else if (subflow_req->mp_join) { 819 mptcp_get_options(skb, &mp_opt); 820 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) || 821 !subflow_hmac_valid(req, &mp_opt) || 822 !mptcp_can_accept_new_subflow(subflow_req->msk)) { 823 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); 824 fallback = true; 825 } 826 } 827 828 create_child: 829 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 830 req_unhash, own_req); 831 832 if (child && *own_req) { 833 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); 834 835 tcp_rsk(req)->drop_req = false; 836 837 /* we need to fallback on ctx allocation failure and on pre-reqs 838 * checking above. In the latter scenario we additionally need 839 * to reset the context to non MPTCP status. 840 */ 841 if (!ctx || fallback) { 842 if (fallback_is_fatal) { 843 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 844 goto dispose_child; 845 } 846 goto fallback; 847 } 848 849 /* ssk inherits options of listener sk */ 850 ctx->setsockopt_seq = listener->setsockopt_seq; 851 852 if (ctx->mp_capable) { 853 ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req); 854 if (!ctx->conn) 855 goto fallback; 856 857 ctx->subflow_id = 1; 858 owner = mptcp_sk(ctx->conn); 859 mptcp_pm_new_connection(owner, child, 1); 860 861 /* with OoO packets we can reach here without ingress 862 * mpc option 863 */ 864 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { 865 mptcp_pm_fully_established(owner, child); 866 ctx->pm_notified = 1; 867 } 868 } else if (ctx->mp_join) { 869 owner = subflow_req->msk; 870 if (!owner) { 871 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 872 goto dispose_child; 873 } 874 875 /* move the msk reference ownership to the subflow */ 876 subflow_req->msk = NULL; 877 ctx->conn = (struct sock *)owner; 878 879 if (subflow_use_different_sport(owner, sk)) { 880 pr_debug("ack inet_sport=%d %d", 881 ntohs(inet_sk(sk)->inet_sport), 882 ntohs(inet_sk((struct sock *)owner)->inet_sport)); 883 if (!mptcp_pm_sport_in_anno_list(owner, sk)) { 884 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX); 885 goto dispose_child; 886 } 887 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX); 888 } 889 890 if (!mptcp_finish_join(child)) 891 goto dispose_child; 892 893 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); 894 tcp_rsk(req)->drop_req = true; 895 } 896 } 897 898 /* check for expected invariant - should never trigger, just help 899 * catching eariler subtle bugs 900 */ 901 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && 902 (!mptcp_subflow_ctx(child) || 903 !mptcp_subflow_ctx(child)->conn)); 904 return child; 905 906 dispose_child: 907 mptcp_subflow_drop_ctx(child); 908 tcp_rsk(req)->drop_req = true; 909 inet_csk_prepare_for_destroy_sock(child); 910 tcp_done(child); 911 req->rsk_ops->send_reset(sk, skb); 912 913 /* The last child reference will be released by the caller */ 914 return child; 915 916 fallback: 917 if (fallback) 918 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); 919 mptcp_subflow_drop_ctx(child); 920 return child; 921 } 922 923 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; 924 static struct proto tcp_prot_override __ro_after_init; 925 926 enum mapping_status { 927 MAPPING_OK, 928 MAPPING_INVALID, 929 MAPPING_EMPTY, 930 MAPPING_DATA_FIN, 931 MAPPING_DUMMY, 932 MAPPING_BAD_CSUM 933 }; 934 935 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 936 { 937 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d", 938 ssn, subflow->map_subflow_seq, subflow->map_data_len); 939 } 940 941 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) 942 { 943 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 944 unsigned int skb_consumed; 945 946 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; 947 if (WARN_ON_ONCE(skb_consumed >= skb->len)) 948 return true; 949 950 return skb->len - skb_consumed <= subflow->map_data_len - 951 mptcp_subflow_get_map_offset(subflow); 952 } 953 954 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) 955 { 956 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 957 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 958 959 if (unlikely(before(ssn, subflow->map_subflow_seq))) { 960 /* Mapping covers data later in the subflow stream, 961 * currently unsupported. 962 */ 963 dbg_bad_map(subflow, ssn); 964 return false; 965 } 966 if (unlikely(!before(ssn, subflow->map_subflow_seq + 967 subflow->map_data_len))) { 968 /* Mapping does covers past subflow data, invalid */ 969 dbg_bad_map(subflow, ssn); 970 return false; 971 } 972 return true; 973 } 974 975 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb, 976 bool csum_reqd) 977 { 978 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 979 u32 offset, seq, delta; 980 __sum16 csum; 981 int len; 982 983 if (!csum_reqd) 984 return MAPPING_OK; 985 986 /* mapping already validated on previous traversal */ 987 if (subflow->map_csum_len == subflow->map_data_len) 988 return MAPPING_OK; 989 990 /* traverse the receive queue, ensuring it contains a full 991 * DSS mapping and accumulating the related csum. 992 * Preserve the accoumlate csum across multiple calls, to compute 993 * the csum only once 994 */ 995 delta = subflow->map_data_len - subflow->map_csum_len; 996 for (;;) { 997 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; 998 offset = seq - TCP_SKB_CB(skb)->seq; 999 1000 /* if the current skb has not been accounted yet, csum its contents 1001 * up to the amount covered by the current DSS 1002 */ 1003 if (offset < skb->len) { 1004 __wsum csum; 1005 1006 len = min(skb->len - offset, delta); 1007 csum = skb_checksum(skb, offset, len, 0); 1008 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, 1009 subflow->map_csum_len); 1010 1011 delta -= len; 1012 subflow->map_csum_len += len; 1013 } 1014 if (delta == 0) 1015 break; 1016 1017 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) { 1018 /* if this subflow is closed, the partial mapping 1019 * will be never completed; flush the pending skbs, so 1020 * that subflow_sched_work_if_closed() can kick in 1021 */ 1022 if (unlikely(ssk->sk_state == TCP_CLOSE)) 1023 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1024 sk_eat_skb(ssk, skb); 1025 1026 /* not enough data to validate the csum */ 1027 return MAPPING_EMPTY; 1028 } 1029 1030 /* the DSS mapping for next skbs will be validated later, 1031 * when a get_mapping_status call will process such skb 1032 */ 1033 skb = skb->next; 1034 } 1035 1036 /* note that 'map_data_len' accounts only for the carried data, does 1037 * not include the eventual seq increment due to the data fin, 1038 * while the pseudo header requires the original DSS data len, 1039 * including that 1040 */ 1041 csum = __mptcp_make_csum(subflow->map_seq, 1042 subflow->map_subflow_seq, 1043 subflow->map_data_len + subflow->map_data_fin, 1044 subflow->map_data_csum); 1045 if (unlikely(csum)) { 1046 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR); 1047 return MAPPING_BAD_CSUM; 1048 } 1049 1050 subflow->valid_csum_seen = 1; 1051 return MAPPING_OK; 1052 } 1053 1054 static enum mapping_status get_mapping_status(struct sock *ssk, 1055 struct mptcp_sock *msk) 1056 { 1057 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1058 bool csum_reqd = READ_ONCE(msk->csum_enabled); 1059 struct mptcp_ext *mpext; 1060 struct sk_buff *skb; 1061 u16 data_len; 1062 u64 map_seq; 1063 1064 skb = skb_peek(&ssk->sk_receive_queue); 1065 if (!skb) 1066 return MAPPING_EMPTY; 1067 1068 if (mptcp_check_fallback(ssk)) 1069 return MAPPING_DUMMY; 1070 1071 mpext = mptcp_get_ext(skb); 1072 if (!mpext || !mpext->use_map) { 1073 if (!subflow->map_valid && !skb->len) { 1074 /* the TCP stack deliver 0 len FIN pkt to the receive 1075 * queue, that is the only 0len pkts ever expected here, 1076 * and we can admit no mapping only for 0 len pkts 1077 */ 1078 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1079 WARN_ONCE(1, "0len seq %d:%d flags %x", 1080 TCP_SKB_CB(skb)->seq, 1081 TCP_SKB_CB(skb)->end_seq, 1082 TCP_SKB_CB(skb)->tcp_flags); 1083 sk_eat_skb(ssk, skb); 1084 return MAPPING_EMPTY; 1085 } 1086 1087 if (!subflow->map_valid) 1088 return MAPPING_INVALID; 1089 1090 goto validate_seq; 1091 } 1092 1093 trace_get_mapping_status(mpext); 1094 1095 data_len = mpext->data_len; 1096 if (data_len == 0) { 1097 pr_debug("infinite mapping received"); 1098 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); 1099 subflow->map_data_len = 0; 1100 return MAPPING_INVALID; 1101 } 1102 1103 if (mpext->data_fin == 1) { 1104 if (data_len == 1) { 1105 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, 1106 mpext->dsn64); 1107 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq); 1108 if (subflow->map_valid) { 1109 /* A DATA_FIN might arrive in a DSS 1110 * option before the previous mapping 1111 * has been fully consumed. Continue 1112 * handling the existing mapping. 1113 */ 1114 skb_ext_del(skb, SKB_EXT_MPTCP); 1115 return MAPPING_OK; 1116 } else { 1117 if (updated) 1118 mptcp_schedule_work((struct sock *)msk); 1119 1120 return MAPPING_DATA_FIN; 1121 } 1122 } else { 1123 u64 data_fin_seq = mpext->data_seq + data_len - 1; 1124 1125 /* If mpext->data_seq is a 32-bit value, data_fin_seq 1126 * must also be limited to 32 bits. 1127 */ 1128 if (!mpext->dsn64) 1129 data_fin_seq &= GENMASK_ULL(31, 0); 1130 1131 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); 1132 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d", 1133 data_fin_seq, mpext->dsn64); 1134 } 1135 1136 /* Adjust for DATA_FIN using 1 byte of sequence space */ 1137 data_len--; 1138 } 1139 1140 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64); 1141 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); 1142 1143 if (subflow->map_valid) { 1144 /* Allow replacing only with an identical map */ 1145 if (subflow->map_seq == map_seq && 1146 subflow->map_subflow_seq == mpext->subflow_seq && 1147 subflow->map_data_len == data_len && 1148 subflow->map_csum_reqd == mpext->csum_reqd) { 1149 skb_ext_del(skb, SKB_EXT_MPTCP); 1150 goto validate_csum; 1151 } 1152 1153 /* If this skb data are fully covered by the current mapping, 1154 * the new map would need caching, which is not supported 1155 */ 1156 if (skb_is_fully_mapped(ssk, skb)) { 1157 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); 1158 return MAPPING_INVALID; 1159 } 1160 1161 /* will validate the next map after consuming the current one */ 1162 goto validate_csum; 1163 } 1164 1165 subflow->map_seq = map_seq; 1166 subflow->map_subflow_seq = mpext->subflow_seq; 1167 subflow->map_data_len = data_len; 1168 subflow->map_valid = 1; 1169 subflow->map_data_fin = mpext->data_fin; 1170 subflow->mpc_map = mpext->mpc_map; 1171 subflow->map_csum_reqd = mpext->csum_reqd; 1172 subflow->map_csum_len = 0; 1173 subflow->map_data_csum = csum_unfold(mpext->csum); 1174 1175 /* Cfr RFC 8684 Section 3.3.0 */ 1176 if (unlikely(subflow->map_csum_reqd != csum_reqd)) 1177 return MAPPING_INVALID; 1178 1179 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", 1180 subflow->map_seq, subflow->map_subflow_seq, 1181 subflow->map_data_len, subflow->map_csum_reqd, 1182 subflow->map_data_csum); 1183 1184 validate_seq: 1185 /* we revalidate valid mapping on new skb, because we must ensure 1186 * the current skb is completely covered by the available mapping 1187 */ 1188 if (!validate_mapping(ssk, skb)) { 1189 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH); 1190 return MAPPING_INVALID; 1191 } 1192 1193 skb_ext_del(skb, SKB_EXT_MPTCP); 1194 1195 validate_csum: 1196 return validate_data_csum(ssk, skb, csum_reqd); 1197 } 1198 1199 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, 1200 u64 limit) 1201 { 1202 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1203 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 1204 struct tcp_sock *tp = tcp_sk(ssk); 1205 u32 offset, incr, avail_len; 1206 1207 offset = tp->copied_seq - TCP_SKB_CB(skb)->seq; 1208 if (WARN_ON_ONCE(offset > skb->len)) 1209 goto out; 1210 1211 avail_len = skb->len - offset; 1212 incr = limit >= avail_len ? avail_len + fin : limit; 1213 1214 pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len, 1215 offset, subflow->map_subflow_seq); 1216 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); 1217 tcp_sk(ssk)->copied_seq += incr; 1218 1219 out: 1220 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) 1221 sk_eat_skb(ssk, skb); 1222 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) 1223 subflow->map_valid = 0; 1224 } 1225 1226 /* sched mptcp worker to remove the subflow if no more data is pending */ 1227 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) 1228 { 1229 if (likely(ssk->sk_state != TCP_CLOSE)) 1230 return; 1231 1232 if (skb_queue_empty(&ssk->sk_receive_queue) && 1233 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 1234 mptcp_schedule_work((struct sock *)msk); 1235 } 1236 1237 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) 1238 { 1239 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 1240 1241 if (subflow->mp_join) 1242 return false; 1243 else if (READ_ONCE(msk->csum_enabled)) 1244 return !subflow->valid_csum_seen; 1245 else 1246 return !subflow->fully_established; 1247 } 1248 1249 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) 1250 { 1251 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1252 unsigned long fail_tout; 1253 1254 /* greceful failure can happen only on the MPC subflow */ 1255 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) 1256 return; 1257 1258 /* since the close timeout take precedence on the fail one, 1259 * no need to start the latter when the first is already set 1260 */ 1261 if (sock_flag((struct sock *)msk, SOCK_DEAD)) 1262 return; 1263 1264 /* we don't need extreme accuracy here, use a zero fail_tout as special 1265 * value meaning no fail timeout at all; 1266 */ 1267 fail_tout = jiffies + TCP_RTO_MAX; 1268 if (!fail_tout) 1269 fail_tout = 1; 1270 WRITE_ONCE(subflow->fail_tout, fail_tout); 1271 tcp_send_ack(ssk); 1272 1273 mptcp_reset_tout_timer(msk, subflow->fail_tout); 1274 } 1275 1276 static bool subflow_check_data_avail(struct sock *ssk) 1277 { 1278 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1279 enum mapping_status status; 1280 struct mptcp_sock *msk; 1281 struct sk_buff *skb; 1282 1283 if (!skb_peek(&ssk->sk_receive_queue)) 1284 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1285 if (subflow->data_avail) 1286 return true; 1287 1288 msk = mptcp_sk(subflow->conn); 1289 for (;;) { 1290 u64 ack_seq; 1291 u64 old_ack; 1292 1293 status = get_mapping_status(ssk, msk); 1294 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); 1295 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY || 1296 status == MAPPING_BAD_CSUM)) 1297 goto fallback; 1298 1299 if (status != MAPPING_OK) 1300 goto no_data; 1301 1302 skb = skb_peek(&ssk->sk_receive_queue); 1303 if (WARN_ON_ONCE(!skb)) 1304 goto no_data; 1305 1306 if (unlikely(!READ_ONCE(msk->can_ack))) 1307 goto fallback; 1308 1309 old_ack = READ_ONCE(msk->ack_seq); 1310 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 1311 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, 1312 ack_seq); 1313 if (unlikely(before64(ack_seq, old_ack))) { 1314 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); 1315 continue; 1316 } 1317 1318 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1319 break; 1320 } 1321 return true; 1322 1323 no_data: 1324 subflow_sched_work_if_closed(msk, ssk); 1325 return false; 1326 1327 fallback: 1328 if (!__mptcp_check_fallback(msk)) { 1329 /* RFC 8684 section 3.7. */ 1330 if (status == MAPPING_BAD_CSUM && 1331 (subflow->mp_join || subflow->valid_csum_seen)) { 1332 subflow->send_mp_fail = 1; 1333 1334 if (!READ_ONCE(msk->allow_infinite_fallback)) { 1335 subflow->reset_transient = 0; 1336 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; 1337 goto reset; 1338 } 1339 mptcp_subflow_fail(msk, ssk); 1340 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1341 return true; 1342 } 1343 1344 if (!subflow_can_fallback(subflow) && subflow->map_data_len) { 1345 /* fatal protocol error, close the socket. 1346 * subflow_error_report() will introduce the appropriate barriers 1347 */ 1348 subflow->reset_transient = 0; 1349 subflow->reset_reason = MPTCP_RST_EMPTCP; 1350 1351 reset: 1352 WRITE_ONCE(ssk->sk_err, EBADMSG); 1353 tcp_set_state(ssk, TCP_CLOSE); 1354 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1355 sk_eat_skb(ssk, skb); 1356 tcp_send_active_reset(ssk, GFP_ATOMIC); 1357 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1358 return false; 1359 } 1360 1361 mptcp_do_fallback(ssk); 1362 } 1363 1364 skb = skb_peek(&ssk->sk_receive_queue); 1365 subflow->map_valid = 1; 1366 subflow->map_seq = READ_ONCE(msk->ack_seq); 1367 subflow->map_data_len = skb->len; 1368 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 1369 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1370 return true; 1371 } 1372 1373 bool mptcp_subflow_data_available(struct sock *sk) 1374 { 1375 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1376 1377 /* check if current mapping is still valid */ 1378 if (subflow->map_valid && 1379 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { 1380 subflow->map_valid = 0; 1381 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1382 1383 pr_debug("Done with mapping: seq=%u data_len=%u", 1384 subflow->map_subflow_seq, 1385 subflow->map_data_len); 1386 } 1387 1388 return subflow_check_data_avail(sk); 1389 } 1390 1391 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, 1392 * not the ssk one. 1393 * 1394 * In mptcp, rwin is about the mptcp-level connection data. 1395 * 1396 * Data that is still on the ssk rx queue can thus be ignored, 1397 * as far as mptcp peer is concerned that data is still inflight. 1398 * DSS ACK is updated when skb is moved to the mptcp rx queue. 1399 */ 1400 void mptcp_space(const struct sock *ssk, int *space, int *full_space) 1401 { 1402 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1403 const struct sock *sk = subflow->conn; 1404 1405 *space = __mptcp_space(sk); 1406 *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); 1407 } 1408 1409 static void subflow_error_report(struct sock *ssk) 1410 { 1411 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1412 1413 /* bail early if this is a no-op, so that we avoid introducing a 1414 * problematic lockdep dependency between TCP accept queue lock 1415 * and msk socket spinlock 1416 */ 1417 if (!sk->sk_socket) 1418 return; 1419 1420 mptcp_data_lock(sk); 1421 if (!sock_owned_by_user(sk)) 1422 __mptcp_error_report(sk); 1423 else 1424 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags); 1425 mptcp_data_unlock(sk); 1426 } 1427 1428 static void subflow_data_ready(struct sock *sk) 1429 { 1430 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1431 u16 state = 1 << inet_sk_state_load(sk); 1432 struct sock *parent = subflow->conn; 1433 struct mptcp_sock *msk; 1434 1435 trace_sk_data_ready(sk); 1436 1437 msk = mptcp_sk(parent); 1438 if (state & TCPF_LISTEN) { 1439 /* MPJ subflow are removed from accept queue before reaching here, 1440 * avoid stray wakeups 1441 */ 1442 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) 1443 return; 1444 1445 parent->sk_data_ready(parent); 1446 return; 1447 } 1448 1449 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && 1450 !subflow->mp_join && !(state & TCPF_CLOSE)); 1451 1452 if (mptcp_subflow_data_available(sk)) { 1453 mptcp_data_ready(parent, sk); 1454 1455 /* subflow-level lowat test are not relevant. 1456 * respect the msk-level threshold eventually mandating an immediate ack 1457 */ 1458 if (mptcp_data_avail(msk) < parent->sk_rcvlowat && 1459 (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss) 1460 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; 1461 } else if (unlikely(sk->sk_err)) { 1462 subflow_error_report(sk); 1463 } 1464 } 1465 1466 static void subflow_write_space(struct sock *ssk) 1467 { 1468 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1469 1470 mptcp_propagate_sndbuf(sk, ssk); 1471 mptcp_write_space(sk); 1472 } 1473 1474 static const struct inet_connection_sock_af_ops * 1475 subflow_default_af_ops(struct sock *sk) 1476 { 1477 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1478 if (sk->sk_family == AF_INET6) 1479 return &subflow_v6_specific; 1480 #endif 1481 return &subflow_specific; 1482 } 1483 1484 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1485 void mptcpv6_handle_mapped(struct sock *sk, bool mapped) 1486 { 1487 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1488 struct inet_connection_sock *icsk = inet_csk(sk); 1489 const struct inet_connection_sock_af_ops *target; 1490 1491 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 1492 1493 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", 1494 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 1495 1496 if (likely(icsk->icsk_af_ops == target)) 1497 return; 1498 1499 subflow->icsk_af_ops = icsk->icsk_af_ops; 1500 icsk->icsk_af_ops = target; 1501 } 1502 #endif 1503 1504 void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 1505 struct sockaddr_storage *addr, 1506 unsigned short family) 1507 { 1508 memset(addr, 0, sizeof(*addr)); 1509 addr->ss_family = family; 1510 if (addr->ss_family == AF_INET) { 1511 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; 1512 1513 if (info->family == AF_INET) 1514 in_addr->sin_addr = info->addr; 1515 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1516 else if (ipv6_addr_v4mapped(&info->addr6)) 1517 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3]; 1518 #endif 1519 in_addr->sin_port = info->port; 1520 } 1521 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1522 else if (addr->ss_family == AF_INET6) { 1523 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; 1524 1525 if (info->family == AF_INET) 1526 ipv6_addr_set_v4mapped(info->addr.s_addr, 1527 &in6_addr->sin6_addr); 1528 else 1529 in6_addr->sin6_addr = info->addr6; 1530 in6_addr->sin6_port = info->port; 1531 } 1532 #endif 1533 } 1534 1535 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, 1536 const struct mptcp_addr_info *remote) 1537 { 1538 struct mptcp_sock *msk = mptcp_sk(sk); 1539 struct mptcp_subflow_context *subflow; 1540 struct sockaddr_storage addr; 1541 int remote_id = remote->id; 1542 int local_id = loc->id; 1543 int err = -ENOTCONN; 1544 struct socket *sf; 1545 struct sock *ssk; 1546 u32 remote_token; 1547 int addrlen; 1548 int ifindex; 1549 u8 flags; 1550 1551 if (!mptcp_is_fully_established(sk)) 1552 goto err_out; 1553 1554 err = mptcp_subflow_create_socket(sk, loc->family, &sf); 1555 if (err) 1556 goto err_out; 1557 1558 ssk = sf->sk; 1559 subflow = mptcp_subflow_ctx(ssk); 1560 do { 1561 get_random_bytes(&subflow->local_nonce, sizeof(u32)); 1562 } while (!subflow->local_nonce); 1563 1564 if (local_id) 1565 subflow_set_local_id(subflow, local_id); 1566 1567 mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id, 1568 &flags, &ifindex); 1569 subflow->remote_key_valid = 1; 1570 subflow->remote_key = msk->remote_key; 1571 subflow->local_key = msk->local_key; 1572 subflow->token = msk->token; 1573 mptcp_info2sockaddr(loc, &addr, ssk->sk_family); 1574 1575 addrlen = sizeof(struct sockaddr_in); 1576 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1577 if (addr.ss_family == AF_INET6) 1578 addrlen = sizeof(struct sockaddr_in6); 1579 #endif 1580 mptcp_sockopt_sync(msk, ssk); 1581 1582 ssk->sk_bound_dev_if = ifindex; 1583 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); 1584 if (err) 1585 goto failed; 1586 1587 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); 1588 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk, 1589 remote_token, local_id, remote_id); 1590 subflow->remote_token = remote_token; 1591 WRITE_ONCE(subflow->remote_id, remote_id); 1592 subflow->request_join = 1; 1593 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1594 subflow->subflow_id = msk->subflow_id++; 1595 mptcp_info2sockaddr(remote, &addr, ssk->sk_family); 1596 1597 sock_hold(ssk); 1598 list_add_tail(&subflow->node, &msk->conn_list); 1599 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); 1600 if (err && err != -EINPROGRESS) 1601 goto failed_unlink; 1602 1603 /* discard the subflow socket */ 1604 mptcp_sock_graft(ssk, sk->sk_socket); 1605 iput(SOCK_INODE(sf)); 1606 WRITE_ONCE(msk->allow_infinite_fallback, false); 1607 mptcp_stop_tout_timer(sk); 1608 return 0; 1609 1610 failed_unlink: 1611 list_del(&subflow->node); 1612 sock_put(mptcp_subflow_tcp_sock(subflow)); 1613 1614 failed: 1615 subflow->disposable = 1; 1616 sock_release(sf); 1617 1618 err_out: 1619 /* we account subflows before the creation, and this failures will not 1620 * be caught by sk_state_change() 1621 */ 1622 mptcp_pm_close_subflow(msk); 1623 return err; 1624 } 1625 1626 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child) 1627 { 1628 #ifdef CONFIG_SOCK_CGROUP_DATA 1629 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data, 1630 *child_skcd = &child->sk_cgrp_data; 1631 1632 /* only the additional subflows created by kworkers have to be modified */ 1633 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) != 1634 cgroup_id(sock_cgroup_ptr(child_skcd))) { 1635 #ifdef CONFIG_MEMCG 1636 struct mem_cgroup *memcg = parent->sk_memcg; 1637 1638 mem_cgroup_sk_free(child); 1639 if (memcg && css_tryget(&memcg->css)) 1640 child->sk_memcg = memcg; 1641 #endif /* CONFIG_MEMCG */ 1642 1643 cgroup_sk_free(child_skcd); 1644 *child_skcd = *parent_skcd; 1645 cgroup_sk_clone(child_skcd); 1646 } 1647 #endif /* CONFIG_SOCK_CGROUP_DATA */ 1648 } 1649 1650 static void mptcp_subflow_ops_override(struct sock *ssk) 1651 { 1652 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1653 if (ssk->sk_prot == &tcpv6_prot) 1654 ssk->sk_prot = &tcpv6_prot_override; 1655 else 1656 #endif 1657 ssk->sk_prot = &tcp_prot_override; 1658 } 1659 1660 static void mptcp_subflow_ops_undo_override(struct sock *ssk) 1661 { 1662 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1663 if (ssk->sk_prot == &tcpv6_prot_override) 1664 ssk->sk_prot = &tcpv6_prot; 1665 else 1666 #endif 1667 ssk->sk_prot = &tcp_prot; 1668 } 1669 1670 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 1671 struct socket **new_sock) 1672 { 1673 struct mptcp_subflow_context *subflow; 1674 struct net *net = sock_net(sk); 1675 struct socket *sf; 1676 int err; 1677 1678 /* un-accepted server sockets can reach here - on bad configuration 1679 * bail early to avoid greater trouble later 1680 */ 1681 if (unlikely(!sk->sk_socket)) 1682 return -EINVAL; 1683 1684 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf); 1685 if (err) 1686 return err; 1687 1688 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING); 1689 1690 err = security_mptcp_add_subflow(sk, sf->sk); 1691 if (err) 1692 goto release_ssk; 1693 1694 /* the newly created socket has to be in the same cgroup as its parent */ 1695 mptcp_attach_cgroup(sk, sf->sk); 1696 1697 /* kernel sockets do not by default acquire net ref, but TCP timer 1698 * needs it. 1699 * Update ns_tracker to current stack trace and refcounted tracker. 1700 */ 1701 __netns_tracker_free(net, &sf->sk->ns_tracker, false); 1702 sf->sk->sk_net_refcnt = 1; 1703 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); 1704 sock_inuse_add(net, 1); 1705 err = tcp_set_ulp(sf->sk, "mptcp"); 1706 1707 release_ssk: 1708 release_sock(sf->sk); 1709 1710 if (err) { 1711 sock_release(sf); 1712 return err; 1713 } 1714 1715 /* the newly created socket really belongs to the owning MPTCP master 1716 * socket, even if for additional subflows the allocation is performed 1717 * by a kernel workqueue. Adjust inode references, so that the 1718 * procfs/diag interfaces really show this one belonging to the correct 1719 * user. 1720 */ 1721 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino; 1722 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid; 1723 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; 1724 1725 subflow = mptcp_subflow_ctx(sf->sk); 1726 pr_debug("subflow=%p", subflow); 1727 1728 *new_sock = sf; 1729 sock_hold(sk); 1730 subflow->conn = sk; 1731 mptcp_subflow_ops_override(sf->sk); 1732 1733 return 0; 1734 } 1735 1736 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, 1737 gfp_t priority) 1738 { 1739 struct inet_connection_sock *icsk = inet_csk(sk); 1740 struct mptcp_subflow_context *ctx; 1741 1742 ctx = kzalloc(sizeof(*ctx), priority); 1743 if (!ctx) 1744 return NULL; 1745 1746 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 1747 INIT_LIST_HEAD(&ctx->node); 1748 INIT_LIST_HEAD(&ctx->delegated_node); 1749 1750 pr_debug("subflow=%p", ctx); 1751 1752 ctx->tcp_sock = sk; 1753 WRITE_ONCE(ctx->local_id, -1); 1754 1755 return ctx; 1756 } 1757 1758 static void __subflow_state_change(struct sock *sk) 1759 { 1760 struct socket_wq *wq; 1761 1762 rcu_read_lock(); 1763 wq = rcu_dereference(sk->sk_wq); 1764 if (skwq_has_sleeper(wq)) 1765 wake_up_interruptible_all(&wq->wait); 1766 rcu_read_unlock(); 1767 } 1768 1769 static bool subflow_is_done(const struct sock *sk) 1770 { 1771 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; 1772 } 1773 1774 static void subflow_state_change(struct sock *sk) 1775 { 1776 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1777 struct sock *parent = subflow->conn; 1778 struct mptcp_sock *msk; 1779 1780 __subflow_state_change(sk); 1781 1782 msk = mptcp_sk(parent); 1783 if (subflow_simultaneous_connect(sk)) { 1784 mptcp_do_fallback(sk); 1785 pr_fallback(msk); 1786 subflow->conn_finished = 1; 1787 mptcp_propagate_state(parent, sk, subflow, NULL); 1788 } 1789 1790 /* as recvmsg() does not acquire the subflow socket for ssk selection 1791 * a fin packet carrying a DSS can be unnoticed if we don't trigger 1792 * the data available machinery here. 1793 */ 1794 if (mptcp_subflow_data_available(sk)) 1795 mptcp_data_ready(parent, sk); 1796 else if (unlikely(sk->sk_err)) 1797 subflow_error_report(sk); 1798 1799 subflow_sched_work_if_closed(mptcp_sk(parent), sk); 1800 1801 /* when the fallback subflow closes the rx side, trigger a 'dummy' 1802 * ingress data fin, so that the msk state will follow along 1803 */ 1804 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk && 1805 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true)) 1806 mptcp_schedule_work(parent); 1807 } 1808 1809 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) 1810 { 1811 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; 1812 struct request_sock *req, *head, *tail; 1813 struct mptcp_subflow_context *subflow; 1814 struct sock *sk, *ssk; 1815 1816 /* Due to lock dependencies no relevant lock can be acquired under rskq_lock. 1817 * Splice the req list, so that accept() can not reach the pending ssk after 1818 * the listener socket is released below. 1819 */ 1820 spin_lock_bh(&queue->rskq_lock); 1821 head = queue->rskq_accept_head; 1822 tail = queue->rskq_accept_tail; 1823 queue->rskq_accept_head = NULL; 1824 queue->rskq_accept_tail = NULL; 1825 spin_unlock_bh(&queue->rskq_lock); 1826 1827 if (!head) 1828 return; 1829 1830 /* can't acquire the msk socket lock under the subflow one, 1831 * or will cause ABBA deadlock 1832 */ 1833 release_sock(listener_ssk); 1834 1835 for (req = head; req; req = req->dl_next) { 1836 ssk = req->sk; 1837 if (!sk_is_mptcp(ssk)) 1838 continue; 1839 1840 subflow = mptcp_subflow_ctx(ssk); 1841 if (!subflow || !subflow->conn) 1842 continue; 1843 1844 sk = subflow->conn; 1845 sock_hold(sk); 1846 1847 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 1848 __mptcp_unaccepted_force_close(sk); 1849 release_sock(sk); 1850 1851 /* lockdep will report a false positive ABBA deadlock 1852 * between cancel_work_sync and the listener socket. 1853 * The involved locks belong to different sockets WRT 1854 * the existing AB chain. 1855 * Using a per socket key is problematic as key 1856 * deregistration requires process context and must be 1857 * performed at socket disposal time, in atomic 1858 * context. 1859 * Just tell lockdep to consider the listener socket 1860 * released here. 1861 */ 1862 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_); 1863 mptcp_cancel_work(sk); 1864 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_); 1865 1866 sock_put(sk); 1867 } 1868 1869 /* we are still under the listener msk socket lock */ 1870 lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING); 1871 1872 /* restore the listener queue, to let the TCP code clean it up */ 1873 spin_lock_bh(&queue->rskq_lock); 1874 WARN_ON_ONCE(queue->rskq_accept_head); 1875 queue->rskq_accept_head = head; 1876 queue->rskq_accept_tail = tail; 1877 spin_unlock_bh(&queue->rskq_lock); 1878 } 1879 1880 static int subflow_ulp_init(struct sock *sk) 1881 { 1882 struct inet_connection_sock *icsk = inet_csk(sk); 1883 struct mptcp_subflow_context *ctx; 1884 struct tcp_sock *tp = tcp_sk(sk); 1885 int err = 0; 1886 1887 /* disallow attaching ULP to a socket unless it has been 1888 * created with sock_create_kern() 1889 */ 1890 if (!sk->sk_kern_sock) { 1891 err = -EOPNOTSUPP; 1892 goto out; 1893 } 1894 1895 ctx = subflow_create_ctx(sk, GFP_KERNEL); 1896 if (!ctx) { 1897 err = -ENOMEM; 1898 goto out; 1899 } 1900 1901 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); 1902 1903 tp->is_mptcp = 1; 1904 ctx->icsk_af_ops = icsk->icsk_af_ops; 1905 icsk->icsk_af_ops = subflow_default_af_ops(sk); 1906 ctx->tcp_state_change = sk->sk_state_change; 1907 ctx->tcp_error_report = sk->sk_error_report; 1908 1909 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable); 1910 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space); 1911 1912 sk->sk_data_ready = subflow_data_ready; 1913 sk->sk_write_space = subflow_write_space; 1914 sk->sk_state_change = subflow_state_change; 1915 sk->sk_error_report = subflow_error_report; 1916 out: 1917 return err; 1918 } 1919 1920 static void subflow_ulp_release(struct sock *ssk) 1921 { 1922 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 1923 bool release = true; 1924 struct sock *sk; 1925 1926 if (!ctx) 1927 return; 1928 1929 sk = ctx->conn; 1930 if (sk) { 1931 /* if the msk has been orphaned, keep the ctx 1932 * alive, will be freed by __mptcp_close_ssk(), 1933 * when the subflow is still unaccepted 1934 */ 1935 release = ctx->disposable || list_empty(&ctx->node); 1936 1937 /* inet_child_forget() does not call sk_state_change(), 1938 * explicitly trigger the socket close machinery 1939 */ 1940 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, 1941 &mptcp_sk(sk)->flags)) 1942 mptcp_schedule_work(sk); 1943 sock_put(sk); 1944 } 1945 1946 mptcp_subflow_ops_undo_override(ssk); 1947 if (release) 1948 kfree_rcu(ctx, rcu); 1949 } 1950 1951 static void subflow_ulp_clone(const struct request_sock *req, 1952 struct sock *newsk, 1953 const gfp_t priority) 1954 { 1955 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 1956 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); 1957 struct mptcp_subflow_context *new_ctx; 1958 1959 if (!tcp_rsk(req)->is_mptcp || 1960 (!subflow_req->mp_capable && !subflow_req->mp_join)) { 1961 subflow_ulp_fallback(newsk, old_ctx); 1962 return; 1963 } 1964 1965 new_ctx = subflow_create_ctx(newsk, priority); 1966 if (!new_ctx) { 1967 subflow_ulp_fallback(newsk, old_ctx); 1968 return; 1969 } 1970 1971 new_ctx->conn_finished = 1; 1972 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; 1973 new_ctx->tcp_state_change = old_ctx->tcp_state_change; 1974 new_ctx->tcp_error_report = old_ctx->tcp_error_report; 1975 new_ctx->rel_write_seq = 1; 1976 new_ctx->tcp_sock = newsk; 1977 1978 if (subflow_req->mp_capable) { 1979 /* see comments in subflow_syn_recv_sock(), MPTCP connection 1980 * is fully established only after we receive the remote key 1981 */ 1982 new_ctx->mp_capable = 1; 1983 new_ctx->local_key = subflow_req->local_key; 1984 new_ctx->token = subflow_req->token; 1985 new_ctx->ssn_offset = subflow_req->ssn_offset; 1986 new_ctx->idsn = subflow_req->idsn; 1987 1988 /* this is the first subflow, id is always 0 */ 1989 subflow_set_local_id(new_ctx, 0); 1990 } else if (subflow_req->mp_join) { 1991 new_ctx->ssn_offset = subflow_req->ssn_offset; 1992 new_ctx->mp_join = 1; 1993 new_ctx->fully_established = 1; 1994 new_ctx->remote_key_valid = 1; 1995 new_ctx->backup = subflow_req->backup; 1996 new_ctx->request_bkup = subflow_req->request_bkup; 1997 WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id); 1998 new_ctx->token = subflow_req->token; 1999 new_ctx->thmac = subflow_req->thmac; 2000 2001 /* the subflow req id is valid, fetched via subflow_check_req() 2002 * and subflow_token_join_request() 2003 */ 2004 subflow_set_local_id(new_ctx, subflow_req->local_id); 2005 } 2006 } 2007 2008 static void tcp_release_cb_override(struct sock *ssk) 2009 { 2010 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 2011 long status; 2012 2013 /* process and clear all the pending actions, but leave the subflow into 2014 * the napi queue. To respect locking, only the same CPU that originated 2015 * the action can touch the list. mptcp_napi_poll will take care of it. 2016 */ 2017 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); 2018 if (status) 2019 mptcp_subflow_process_delegated(ssk, status); 2020 2021 tcp_release_cb(ssk); 2022 } 2023 2024 static int tcp_abort_override(struct sock *ssk, int err) 2025 { 2026 /* closing a listener subflow requires a great deal of care. 2027 * keep it simple and just prevent such operation 2028 */ 2029 if (inet_sk_state_load(ssk) == TCP_LISTEN) 2030 return -EINVAL; 2031 2032 return tcp_abort(ssk, err); 2033 } 2034 2035 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { 2036 .name = "mptcp", 2037 .owner = THIS_MODULE, 2038 .init = subflow_ulp_init, 2039 .release = subflow_ulp_release, 2040 .clone = subflow_ulp_clone, 2041 }; 2042 2043 static int subflow_ops_init(struct request_sock_ops *subflow_ops) 2044 { 2045 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); 2046 2047 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, 2048 subflow_ops->obj_size, 0, 2049 SLAB_ACCOUNT | 2050 SLAB_TYPESAFE_BY_RCU, 2051 NULL); 2052 if (!subflow_ops->slab) 2053 return -ENOMEM; 2054 2055 return 0; 2056 } 2057 2058 void __init mptcp_subflow_init(void) 2059 { 2060 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; 2061 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; 2062 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; 2063 2064 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) 2065 panic("MPTCP: failed to init subflow v4 request sock ops\n"); 2066 2067 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; 2068 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; 2069 subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack; 2070 2071 subflow_specific = ipv4_specific; 2072 subflow_specific.conn_request = subflow_v4_conn_request; 2073 subflow_specific.syn_recv_sock = subflow_syn_recv_sock; 2074 subflow_specific.sk_rx_dst_set = subflow_finish_connect; 2075 subflow_specific.rebuild_header = subflow_rebuild_header; 2076 2077 tcp_prot_override = tcp_prot; 2078 tcp_prot_override.release_cb = tcp_release_cb_override; 2079 tcp_prot_override.diag_destroy = tcp_abort_override; 2080 2081 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2082 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock 2083 * structures for v4 and v6 have the same size. It should not changed in 2084 * the future but better to make sure to be warned if it is no longer 2085 * the case. 2086 */ 2087 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); 2088 2089 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; 2090 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; 2091 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; 2092 2093 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) 2094 panic("MPTCP: failed to init subflow v6 request sock ops\n"); 2095 2096 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; 2097 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; 2098 subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack; 2099 2100 subflow_v6_specific = ipv6_specific; 2101 subflow_v6_specific.conn_request = subflow_v6_conn_request; 2102 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; 2103 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; 2104 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header; 2105 2106 subflow_v6m_specific = subflow_v6_specific; 2107 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; 2108 subflow_v6m_specific.send_check = ipv4_specific.send_check; 2109 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; 2110 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; 2111 subflow_v6m_specific.net_frag_header_len = 0; 2112 subflow_v6m_specific.rebuild_header = subflow_rebuild_header; 2113 2114 tcpv6_prot_override = tcpv6_prot; 2115 tcpv6_prot_override.release_cb = tcp_release_cb_override; 2116 tcpv6_prot_override.diag_destroy = tcp_abort_override; 2117 #endif 2118 2119 mptcp_diag_subflow_init(&subflow_ulp_ops); 2120 2121 if (tcp_register_ulp(&subflow_ulp_ops) != 0) 2122 panic("MPTCP: failed to register subflows to ULP\n"); 2123 } 2124