1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/algapi.h> 13 #include <crypto/sha2.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #include <net/tcp.h> 19 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 20 #include <net/ip6_route.h> 21 #include <net/transp_v6.h> 22 #endif 23 #include <net/mptcp.h> 24 #include <uapi/linux/mptcp.h> 25 #include "protocol.h" 26 #include "mib.h" 27 28 #include <trace/events/mptcp.h> 29 #include <trace/events/sock.h> 30 31 static void mptcp_subflow_ops_undo_override(struct sock *ssk); 32 33 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, 34 enum linux_mptcp_mib_field field) 35 { 36 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); 37 } 38 39 static void subflow_req_destructor(struct request_sock *req) 40 { 41 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 42 43 pr_debug("subflow_req=%p", subflow_req); 44 45 if (subflow_req->msk) 46 sock_put((struct sock *)subflow_req->msk); 47 48 mptcp_token_destroy_request(req); 49 } 50 51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, 52 void *hmac) 53 { 54 u8 msg[8]; 55 56 put_unaligned_be32(nonce1, &msg[0]); 57 put_unaligned_be32(nonce2, &msg[4]); 58 59 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 60 } 61 62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk) 63 { 64 return mptcp_is_fully_established((void *)msk) && 65 ((mptcp_pm_is_userspace(msk) && 66 mptcp_userspace_pm_active(msk)) || 67 READ_ONCE(msk->pm.accept_subflow)); 68 } 69 70 /* validate received token and create truncated hmac and nonce for SYN-ACK */ 71 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req) 72 { 73 struct mptcp_sock *msk = subflow_req->msk; 74 u8 hmac[SHA256_DIGEST_SIZE]; 75 76 get_random_bytes(&subflow_req->local_nonce, sizeof(u32)); 77 78 subflow_generate_hmac(msk->local_key, msk->remote_key, 79 subflow_req->local_nonce, 80 subflow_req->remote_nonce, hmac); 81 82 subflow_req->thmac = get_unaligned_be64(hmac); 83 } 84 85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req) 86 { 87 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 88 struct mptcp_sock *msk; 89 int local_id; 90 91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token); 92 if (!msk) { 93 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); 94 return NULL; 95 } 96 97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); 98 if (local_id < 0) { 99 sock_put((struct sock *)msk); 100 return NULL; 101 } 102 subflow_req->local_id = local_id; 103 104 return msk; 105 } 106 107 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener) 108 { 109 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 110 111 subflow_req->mp_capable = 0; 112 subflow_req->mp_join = 0; 113 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener)); 114 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener)); 115 subflow_req->msk = NULL; 116 mptcp_token_init_request(req); 117 } 118 119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk) 120 { 121 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport; 122 } 123 124 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason) 125 { 126 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 127 128 if (mpext) { 129 memset(mpext, 0, sizeof(*mpext)); 130 mpext->reset_reason = reason; 131 } 132 } 133 134 /* Init mptcp request socket. 135 * 136 * Returns an error code if a JOIN has failed and a TCP reset 137 * should be sent. 138 */ 139 static int subflow_check_req(struct request_sock *req, 140 const struct sock *sk_listener, 141 struct sk_buff *skb) 142 { 143 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 144 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 145 struct mptcp_options_received mp_opt; 146 bool opt_mp_capable, opt_mp_join; 147 148 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); 149 150 #ifdef CONFIG_TCP_MD5SIG 151 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 152 * TCP option space. 153 */ 154 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) 155 return -EINVAL; 156 #endif 157 158 mptcp_get_options(skb, &mp_opt); 159 160 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN); 161 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN); 162 if (opt_mp_capable) { 163 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); 164 165 if (opt_mp_join) 166 return 0; 167 } else if (opt_mp_join) { 168 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 169 } 170 171 if (opt_mp_capable && listener->request_mptcp) { 172 int err, retries = MPTCP_TOKEN_MAX_RETRIES; 173 174 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 175 again: 176 do { 177 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); 178 } while (subflow_req->local_key == 0); 179 180 if (unlikely(req->syncookie)) { 181 mptcp_crypto_key_sha(subflow_req->local_key, 182 &subflow_req->token, 183 &subflow_req->idsn); 184 if (mptcp_token_exists(subflow_req->token)) { 185 if (retries-- > 0) 186 goto again; 187 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 188 } else { 189 subflow_req->mp_capable = 1; 190 } 191 return 0; 192 } 193 194 err = mptcp_token_new_request(req); 195 if (err == 0) 196 subflow_req->mp_capable = 1; 197 else if (retries-- > 0) 198 goto again; 199 else 200 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 201 202 } else if (opt_mp_join && listener->request_mptcp) { 203 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 204 subflow_req->mp_join = 1; 205 subflow_req->backup = mp_opt.backup; 206 subflow_req->remote_id = mp_opt.join_id; 207 subflow_req->token = mp_opt.token; 208 subflow_req->remote_nonce = mp_opt.nonce; 209 subflow_req->msk = subflow_token_join_request(req); 210 211 /* Can't fall back to TCP in this case. */ 212 if (!subflow_req->msk) { 213 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 214 return -EPERM; 215 } 216 217 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { 218 pr_debug("syn inet_sport=%d %d", 219 ntohs(inet_sk(sk_listener)->inet_sport), 220 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); 221 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { 222 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); 223 return -EPERM; 224 } 225 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX); 226 } 227 228 subflow_req_create_thmac(subflow_req); 229 230 if (unlikely(req->syncookie)) { 231 if (mptcp_can_accept_new_subflow(subflow_req->msk)) 232 subflow_init_req_cookie_join_save(subflow_req, skb); 233 else 234 return -EPERM; 235 } 236 237 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, 238 subflow_req->remote_nonce, subflow_req->msk); 239 } 240 241 return 0; 242 } 243 244 int mptcp_subflow_init_cookie_req(struct request_sock *req, 245 const struct sock *sk_listener, 246 struct sk_buff *skb) 247 { 248 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 249 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 250 struct mptcp_options_received mp_opt; 251 bool opt_mp_capable, opt_mp_join; 252 int err; 253 254 subflow_init_req(req, sk_listener); 255 mptcp_get_options(skb, &mp_opt); 256 257 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK); 258 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK); 259 if (opt_mp_capable && opt_mp_join) 260 return -EINVAL; 261 262 if (opt_mp_capable && listener->request_mptcp) { 263 if (mp_opt.sndr_key == 0) 264 return -EINVAL; 265 266 subflow_req->local_key = mp_opt.rcvr_key; 267 err = mptcp_token_new_request(req); 268 if (err) 269 return err; 270 271 subflow_req->mp_capable = 1; 272 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 273 } else if (opt_mp_join && listener->request_mptcp) { 274 if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) 275 return -EINVAL; 276 277 subflow_req->mp_join = 1; 278 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 279 } 280 281 return 0; 282 } 283 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); 284 285 static struct dst_entry *subflow_v4_route_req(const struct sock *sk, 286 struct sk_buff *skb, 287 struct flowi *fl, 288 struct request_sock *req) 289 { 290 struct dst_entry *dst; 291 int err; 292 293 tcp_rsk(req)->is_mptcp = 1; 294 subflow_init_req(req, sk); 295 296 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req); 297 if (!dst) 298 return NULL; 299 300 err = subflow_check_req(req, sk, skb); 301 if (err == 0) 302 return dst; 303 304 dst_release(dst); 305 if (!req->syncookie) 306 tcp_request_sock_ops.send_reset(sk, skb); 307 return NULL; 308 } 309 310 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req, 311 struct tcp_fastopen_cookie *foc, 312 enum tcp_synack_type synack_type) 313 { 314 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 315 struct inet_request_sock *ireq = inet_rsk(req); 316 317 /* clear tstamp_ok, as needed depending on cookie */ 318 if (foc && foc->len > -1) 319 ireq->tstamp_ok = 0; 320 321 if (synack_type == TCP_SYNACK_FASTOPEN) 322 mptcp_fastopen_subflow_synack_set_params(subflow, req); 323 } 324 325 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst, 326 struct flowi *fl, 327 struct request_sock *req, 328 struct tcp_fastopen_cookie *foc, 329 enum tcp_synack_type synack_type, 330 struct sk_buff *syn_skb) 331 { 332 subflow_prep_synack(sk, req, foc, synack_type); 333 334 return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc, 335 synack_type, syn_skb); 336 } 337 338 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 339 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst, 340 struct flowi *fl, 341 struct request_sock *req, 342 struct tcp_fastopen_cookie *foc, 343 enum tcp_synack_type synack_type, 344 struct sk_buff *syn_skb) 345 { 346 subflow_prep_synack(sk, req, foc, synack_type); 347 348 return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc, 349 synack_type, syn_skb); 350 } 351 352 static struct dst_entry *subflow_v6_route_req(const struct sock *sk, 353 struct sk_buff *skb, 354 struct flowi *fl, 355 struct request_sock *req) 356 { 357 struct dst_entry *dst; 358 int err; 359 360 tcp_rsk(req)->is_mptcp = 1; 361 subflow_init_req(req, sk); 362 363 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req); 364 if (!dst) 365 return NULL; 366 367 err = subflow_check_req(req, sk, skb); 368 if (err == 0) 369 return dst; 370 371 dst_release(dst); 372 if (!req->syncookie) 373 tcp6_request_sock_ops.send_reset(sk, skb); 374 return NULL; 375 } 376 #endif 377 378 /* validate received truncated hmac and create hmac for third ACK */ 379 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 380 { 381 u8 hmac[SHA256_DIGEST_SIZE]; 382 u64 thmac; 383 384 subflow_generate_hmac(subflow->remote_key, subflow->local_key, 385 subflow->remote_nonce, subflow->local_nonce, 386 hmac); 387 388 thmac = get_unaligned_be64(hmac); 389 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", 390 subflow, subflow->token, thmac, subflow->thmac); 391 392 return thmac == subflow->thmac; 393 } 394 395 void mptcp_subflow_reset(struct sock *ssk) 396 { 397 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 398 struct sock *sk = subflow->conn; 399 400 /* mptcp_mp_fail_no_response() can reach here on an already closed 401 * socket 402 */ 403 if (ssk->sk_state == TCP_CLOSE) 404 return; 405 406 /* must hold: tcp_done() could drop last reference on parent */ 407 sock_hold(sk); 408 409 tcp_send_active_reset(ssk, GFP_ATOMIC); 410 tcp_done(ssk); 411 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags)) 412 mptcp_schedule_work(sk); 413 414 sock_put(sk); 415 } 416 417 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk) 418 { 419 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; 420 } 421 422 void __mptcp_sync_state(struct sock *sk, int state) 423 { 424 struct mptcp_subflow_context *subflow; 425 struct mptcp_sock *msk = mptcp_sk(sk); 426 struct sock *ssk = msk->first; 427 428 subflow = mptcp_subflow_ctx(ssk); 429 __mptcp_propagate_sndbuf(sk, ssk); 430 if (!msk->rcvspace_init) 431 mptcp_rcv_space_init(msk, ssk); 432 433 if (sk->sk_state == TCP_SYN_SENT) { 434 /* subflow->idsn is always available is TCP_SYN_SENT state, 435 * even for the FASTOPEN scenarios 436 */ 437 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 438 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 439 mptcp_set_state(sk, state); 440 sk->sk_state_change(sk); 441 } 442 } 443 444 static void subflow_set_remote_key(struct mptcp_sock *msk, 445 struct mptcp_subflow_context *subflow, 446 const struct mptcp_options_received *mp_opt) 447 { 448 /* active MPC subflow will reach here multiple times: 449 * at subflow_finish_connect() time and at 4th ack time 450 */ 451 if (subflow->remote_key_valid) 452 return; 453 454 subflow->remote_key_valid = 1; 455 subflow->remote_key = mp_opt->sndr_key; 456 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); 457 subflow->iasn++; 458 459 WRITE_ONCE(msk->remote_key, subflow->remote_key); 460 WRITE_ONCE(msk->ack_seq, subflow->iasn); 461 WRITE_ONCE(msk->can_ack, true); 462 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); 463 } 464 465 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk, 466 struct mptcp_subflow_context *subflow, 467 const struct mptcp_options_received *mp_opt) 468 { 469 struct mptcp_sock *msk = mptcp_sk(sk); 470 471 mptcp_data_lock(sk); 472 if (mp_opt) { 473 /* Options are available only in the non fallback cases 474 * avoid updating rx path fields otherwise 475 */ 476 WRITE_ONCE(msk->snd_una, subflow->idsn + 1); 477 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); 478 subflow_set_remote_key(msk, subflow, mp_opt); 479 } 480 481 if (!sock_owned_by_user(sk)) { 482 __mptcp_sync_state(sk, ssk->sk_state); 483 } else { 484 msk->pending_state = ssk->sk_state; 485 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags); 486 } 487 mptcp_data_unlock(sk); 488 } 489 490 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 491 { 492 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 493 struct mptcp_options_received mp_opt; 494 struct sock *parent = subflow->conn; 495 struct mptcp_sock *msk; 496 497 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 498 499 /* be sure no special action on any packet other than syn-ack */ 500 if (subflow->conn_finished) 501 return; 502 503 msk = mptcp_sk(parent); 504 subflow->rel_write_seq = 1; 505 subflow->conn_finished = 1; 506 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 507 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); 508 509 mptcp_get_options(skb, &mp_opt); 510 if (subflow->request_mptcp) { 511 if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) { 512 MPTCP_INC_STATS(sock_net(sk), 513 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); 514 mptcp_do_fallback(sk); 515 pr_fallback(msk); 516 goto fallback; 517 } 518 519 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD) 520 WRITE_ONCE(msk->csum_enabled, true); 521 if (mp_opt.deny_join_id0) 522 WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 523 subflow->mp_capable = 1; 524 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); 525 mptcp_finish_connect(sk); 526 mptcp_propagate_state(parent, sk, subflow, &mp_opt); 527 } else if (subflow->request_join) { 528 u8 hmac[SHA256_DIGEST_SIZE]; 529 530 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) { 531 subflow->reset_reason = MPTCP_RST_EMPTCP; 532 goto do_reset; 533 } 534 535 subflow->backup = mp_opt.backup; 536 subflow->thmac = mp_opt.thmac; 537 subflow->remote_nonce = mp_opt.nonce; 538 WRITE_ONCE(subflow->remote_id, mp_opt.join_id); 539 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d", 540 subflow, subflow->thmac, subflow->remote_nonce, 541 subflow->backup); 542 543 if (!subflow_thmac_valid(subflow)) { 544 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); 545 subflow->reset_reason = MPTCP_RST_EMPTCP; 546 goto do_reset; 547 } 548 549 if (!mptcp_finish_join(sk)) 550 goto do_reset; 551 552 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 553 subflow->local_nonce, 554 subflow->remote_nonce, 555 hmac); 556 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); 557 558 subflow->mp_join = 1; 559 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 560 561 if (subflow_use_different_dport(msk, sk)) { 562 pr_debug("synack inet_dport=%d %d", 563 ntohs(inet_sk(sk)->inet_dport), 564 ntohs(inet_sk(parent)->inet_dport)); 565 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX); 566 } 567 } else if (mptcp_check_fallback(sk)) { 568 fallback: 569 mptcp_propagate_state(parent, sk, subflow, NULL); 570 } 571 return; 572 573 do_reset: 574 subflow->reset_transient = 0; 575 mptcp_subflow_reset(sk); 576 } 577 578 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) 579 { 580 WARN_ON_ONCE(local_id < 0 || local_id > 255); 581 WRITE_ONCE(subflow->local_id, local_id); 582 } 583 584 static int subflow_chk_local_id(struct sock *sk) 585 { 586 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 587 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 588 int err; 589 590 if (likely(subflow->local_id >= 0)) 591 return 0; 592 593 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk); 594 if (err < 0) 595 return err; 596 597 subflow_set_local_id(subflow, err); 598 return 0; 599 } 600 601 static int subflow_rebuild_header(struct sock *sk) 602 { 603 int err = subflow_chk_local_id(sk); 604 605 if (unlikely(err < 0)) 606 return err; 607 608 return inet_sk_rebuild_header(sk); 609 } 610 611 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 612 static int subflow_v6_rebuild_header(struct sock *sk) 613 { 614 int err = subflow_chk_local_id(sk); 615 616 if (unlikely(err < 0)) 617 return err; 618 619 return inet6_sk_rebuild_header(sk); 620 } 621 #endif 622 623 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; 624 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; 625 626 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) 627 { 628 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 629 630 pr_debug("subflow=%p", subflow); 631 632 /* Never answer to SYNs sent to broadcast or multicast */ 633 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 634 goto drop; 635 636 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, 637 &subflow_request_sock_ipv4_ops, 638 sk, skb); 639 drop: 640 tcp_listendrop(sk); 641 return 0; 642 } 643 644 static void subflow_v4_req_destructor(struct request_sock *req) 645 { 646 subflow_req_destructor(req); 647 tcp_request_sock_ops.destructor(req); 648 } 649 650 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 651 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; 652 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; 653 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; 654 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; 655 static struct proto tcpv6_prot_override __ro_after_init; 656 657 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) 658 { 659 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 660 661 pr_debug("subflow=%p", subflow); 662 663 if (skb->protocol == htons(ETH_P_IP)) 664 return subflow_v4_conn_request(sk, skb); 665 666 if (!ipv6_unicast_destination(skb)) 667 goto drop; 668 669 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { 670 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); 671 return 0; 672 } 673 674 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, 675 &subflow_request_sock_ipv6_ops, sk, skb); 676 677 drop: 678 tcp_listendrop(sk); 679 return 0; /* don't send reset */ 680 } 681 682 static void subflow_v6_req_destructor(struct request_sock *req) 683 { 684 subflow_req_destructor(req); 685 tcp6_request_sock_ops.destructor(req); 686 } 687 #endif 688 689 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, 690 struct sock *sk_listener, 691 bool attach_listener) 692 { 693 if (ops->family == AF_INET) 694 ops = &mptcp_subflow_v4_request_sock_ops; 695 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 696 else if (ops->family == AF_INET6) 697 ops = &mptcp_subflow_v6_request_sock_ops; 698 #endif 699 700 return inet_reqsk_alloc(ops, sk_listener, attach_listener); 701 } 702 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); 703 704 /* validate hmac received in third ACK */ 705 static bool subflow_hmac_valid(const struct request_sock *req, 706 const struct mptcp_options_received *mp_opt) 707 { 708 const struct mptcp_subflow_request_sock *subflow_req; 709 u8 hmac[SHA256_DIGEST_SIZE]; 710 struct mptcp_sock *msk; 711 712 subflow_req = mptcp_subflow_rsk(req); 713 msk = subflow_req->msk; 714 if (!msk) 715 return false; 716 717 subflow_generate_hmac(msk->remote_key, msk->local_key, 718 subflow_req->remote_nonce, 719 subflow_req->local_nonce, hmac); 720 721 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); 722 } 723 724 static void subflow_ulp_fallback(struct sock *sk, 725 struct mptcp_subflow_context *old_ctx) 726 { 727 struct inet_connection_sock *icsk = inet_csk(sk); 728 729 mptcp_subflow_tcp_fallback(sk, old_ctx); 730 icsk->icsk_ulp_ops = NULL; 731 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 732 tcp_sk(sk)->is_mptcp = 0; 733 734 mptcp_subflow_ops_undo_override(sk); 735 } 736 737 void mptcp_subflow_drop_ctx(struct sock *ssk) 738 { 739 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 740 741 if (!ctx) 742 return; 743 744 list_del(&mptcp_subflow_ctx(ssk)->node); 745 if (inet_csk(ssk)->icsk_ulp_ops) { 746 subflow_ulp_fallback(ssk, ctx); 747 if (ctx->conn) 748 sock_put(ctx->conn); 749 } 750 751 kfree_rcu(ctx, rcu); 752 } 753 754 void __mptcp_subflow_fully_established(struct mptcp_sock *msk, 755 struct mptcp_subflow_context *subflow, 756 const struct mptcp_options_received *mp_opt) 757 { 758 subflow_set_remote_key(msk, subflow, mp_opt); 759 subflow->fully_established = 1; 760 WRITE_ONCE(msk->fully_established, true); 761 762 if (subflow->is_mptfo) 763 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); 764 } 765 766 static struct sock *subflow_syn_recv_sock(const struct sock *sk, 767 struct sk_buff *skb, 768 struct request_sock *req, 769 struct dst_entry *dst, 770 struct request_sock *req_unhash, 771 bool *own_req) 772 { 773 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 774 struct mptcp_subflow_request_sock *subflow_req; 775 struct mptcp_options_received mp_opt; 776 bool fallback, fallback_is_fatal; 777 struct mptcp_sock *owner; 778 struct sock *child; 779 780 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); 781 782 /* After child creation we must look for MPC even when options 783 * are not parsed 784 */ 785 mp_opt.suboptions = 0; 786 787 /* hopefully temporary handling for MP_JOIN+syncookie */ 788 subflow_req = mptcp_subflow_rsk(req); 789 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join; 790 fallback = !tcp_rsk(req)->is_mptcp; 791 if (fallback) 792 goto create_child; 793 794 /* if the sk is MP_CAPABLE, we try to fetch the client key */ 795 if (subflow_req->mp_capable) { 796 /* we can receive and accept an in-window, out-of-order pkt, 797 * which may not carry the MP_CAPABLE opt even on mptcp enabled 798 * paths: always try to extract the peer key, and fallback 799 * for packets missing it. 800 * Even OoO DSS packets coming legitly after dropped or 801 * reordered MPC will cause fallback, but we don't have other 802 * options. 803 */ 804 mptcp_get_options(skb, &mp_opt); 805 if (!(mp_opt.suboptions & 806 (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK))) 807 fallback = true; 808 809 } else if (subflow_req->mp_join) { 810 mptcp_get_options(skb, &mp_opt); 811 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) || 812 !subflow_hmac_valid(req, &mp_opt) || 813 !mptcp_can_accept_new_subflow(subflow_req->msk)) { 814 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); 815 fallback = true; 816 } 817 } 818 819 create_child: 820 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 821 req_unhash, own_req); 822 823 if (child && *own_req) { 824 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); 825 826 tcp_rsk(req)->drop_req = false; 827 828 /* we need to fallback on ctx allocation failure and on pre-reqs 829 * checking above. In the latter scenario we additionally need 830 * to reset the context to non MPTCP status. 831 */ 832 if (!ctx || fallback) { 833 if (fallback_is_fatal) { 834 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 835 goto dispose_child; 836 } 837 goto fallback; 838 } 839 840 /* ssk inherits options of listener sk */ 841 ctx->setsockopt_seq = listener->setsockopt_seq; 842 843 if (ctx->mp_capable) { 844 ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req); 845 if (!ctx->conn) 846 goto fallback; 847 848 ctx->subflow_id = 1; 849 owner = mptcp_sk(ctx->conn); 850 mptcp_pm_new_connection(owner, child, 1); 851 852 /* with OoO packets we can reach here without ingress 853 * mpc option 854 */ 855 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { 856 mptcp_pm_fully_established(owner, child); 857 ctx->pm_notified = 1; 858 } 859 } else if (ctx->mp_join) { 860 owner = subflow_req->msk; 861 if (!owner) { 862 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 863 goto dispose_child; 864 } 865 866 /* move the msk reference ownership to the subflow */ 867 subflow_req->msk = NULL; 868 ctx->conn = (struct sock *)owner; 869 870 if (subflow_use_different_sport(owner, sk)) { 871 pr_debug("ack inet_sport=%d %d", 872 ntohs(inet_sk(sk)->inet_sport), 873 ntohs(inet_sk((struct sock *)owner)->inet_sport)); 874 if (!mptcp_pm_sport_in_anno_list(owner, sk)) { 875 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX); 876 goto dispose_child; 877 } 878 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX); 879 } 880 881 if (!mptcp_finish_join(child)) 882 goto dispose_child; 883 884 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); 885 tcp_rsk(req)->drop_req = true; 886 } 887 } 888 889 /* check for expected invariant - should never trigger, just help 890 * catching eariler subtle bugs 891 */ 892 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && 893 (!mptcp_subflow_ctx(child) || 894 !mptcp_subflow_ctx(child)->conn)); 895 return child; 896 897 dispose_child: 898 mptcp_subflow_drop_ctx(child); 899 tcp_rsk(req)->drop_req = true; 900 inet_csk_prepare_for_destroy_sock(child); 901 tcp_done(child); 902 req->rsk_ops->send_reset(sk, skb); 903 904 /* The last child reference will be released by the caller */ 905 return child; 906 907 fallback: 908 mptcp_subflow_drop_ctx(child); 909 return child; 910 } 911 912 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; 913 static struct proto tcp_prot_override __ro_after_init; 914 915 enum mapping_status { 916 MAPPING_OK, 917 MAPPING_INVALID, 918 MAPPING_EMPTY, 919 MAPPING_DATA_FIN, 920 MAPPING_DUMMY, 921 MAPPING_BAD_CSUM 922 }; 923 924 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 925 { 926 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d", 927 ssn, subflow->map_subflow_seq, subflow->map_data_len); 928 } 929 930 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) 931 { 932 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 933 unsigned int skb_consumed; 934 935 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; 936 if (WARN_ON_ONCE(skb_consumed >= skb->len)) 937 return true; 938 939 return skb->len - skb_consumed <= subflow->map_data_len - 940 mptcp_subflow_get_map_offset(subflow); 941 } 942 943 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) 944 { 945 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 946 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 947 948 if (unlikely(before(ssn, subflow->map_subflow_seq))) { 949 /* Mapping covers data later in the subflow stream, 950 * currently unsupported. 951 */ 952 dbg_bad_map(subflow, ssn); 953 return false; 954 } 955 if (unlikely(!before(ssn, subflow->map_subflow_seq + 956 subflow->map_data_len))) { 957 /* Mapping does covers past subflow data, invalid */ 958 dbg_bad_map(subflow, ssn); 959 return false; 960 } 961 return true; 962 } 963 964 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb, 965 bool csum_reqd) 966 { 967 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 968 u32 offset, seq, delta; 969 __sum16 csum; 970 int len; 971 972 if (!csum_reqd) 973 return MAPPING_OK; 974 975 /* mapping already validated on previous traversal */ 976 if (subflow->map_csum_len == subflow->map_data_len) 977 return MAPPING_OK; 978 979 /* traverse the receive queue, ensuring it contains a full 980 * DSS mapping and accumulating the related csum. 981 * Preserve the accoumlate csum across multiple calls, to compute 982 * the csum only once 983 */ 984 delta = subflow->map_data_len - subflow->map_csum_len; 985 for (;;) { 986 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; 987 offset = seq - TCP_SKB_CB(skb)->seq; 988 989 /* if the current skb has not been accounted yet, csum its contents 990 * up to the amount covered by the current DSS 991 */ 992 if (offset < skb->len) { 993 __wsum csum; 994 995 len = min(skb->len - offset, delta); 996 csum = skb_checksum(skb, offset, len, 0); 997 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, 998 subflow->map_csum_len); 999 1000 delta -= len; 1001 subflow->map_csum_len += len; 1002 } 1003 if (delta == 0) 1004 break; 1005 1006 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) { 1007 /* if this subflow is closed, the partial mapping 1008 * will be never completed; flush the pending skbs, so 1009 * that subflow_sched_work_if_closed() can kick in 1010 */ 1011 if (unlikely(ssk->sk_state == TCP_CLOSE)) 1012 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1013 sk_eat_skb(ssk, skb); 1014 1015 /* not enough data to validate the csum */ 1016 return MAPPING_EMPTY; 1017 } 1018 1019 /* the DSS mapping for next skbs will be validated later, 1020 * when a get_mapping_status call will process such skb 1021 */ 1022 skb = skb->next; 1023 } 1024 1025 /* note that 'map_data_len' accounts only for the carried data, does 1026 * not include the eventual seq increment due to the data fin, 1027 * while the pseudo header requires the original DSS data len, 1028 * including that 1029 */ 1030 csum = __mptcp_make_csum(subflow->map_seq, 1031 subflow->map_subflow_seq, 1032 subflow->map_data_len + subflow->map_data_fin, 1033 subflow->map_data_csum); 1034 if (unlikely(csum)) { 1035 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR); 1036 return MAPPING_BAD_CSUM; 1037 } 1038 1039 subflow->valid_csum_seen = 1; 1040 return MAPPING_OK; 1041 } 1042 1043 static enum mapping_status get_mapping_status(struct sock *ssk, 1044 struct mptcp_sock *msk) 1045 { 1046 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1047 bool csum_reqd = READ_ONCE(msk->csum_enabled); 1048 struct mptcp_ext *mpext; 1049 struct sk_buff *skb; 1050 u16 data_len; 1051 u64 map_seq; 1052 1053 skb = skb_peek(&ssk->sk_receive_queue); 1054 if (!skb) 1055 return MAPPING_EMPTY; 1056 1057 if (mptcp_check_fallback(ssk)) 1058 return MAPPING_DUMMY; 1059 1060 mpext = mptcp_get_ext(skb); 1061 if (!mpext || !mpext->use_map) { 1062 if (!subflow->map_valid && !skb->len) { 1063 /* the TCP stack deliver 0 len FIN pkt to the receive 1064 * queue, that is the only 0len pkts ever expected here, 1065 * and we can admit no mapping only for 0 len pkts 1066 */ 1067 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1068 WARN_ONCE(1, "0len seq %d:%d flags %x", 1069 TCP_SKB_CB(skb)->seq, 1070 TCP_SKB_CB(skb)->end_seq, 1071 TCP_SKB_CB(skb)->tcp_flags); 1072 sk_eat_skb(ssk, skb); 1073 return MAPPING_EMPTY; 1074 } 1075 1076 if (!subflow->map_valid) 1077 return MAPPING_INVALID; 1078 1079 goto validate_seq; 1080 } 1081 1082 trace_get_mapping_status(mpext); 1083 1084 data_len = mpext->data_len; 1085 if (data_len == 0) { 1086 pr_debug("infinite mapping received"); 1087 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); 1088 subflow->map_data_len = 0; 1089 return MAPPING_INVALID; 1090 } 1091 1092 if (mpext->data_fin == 1) { 1093 if (data_len == 1) { 1094 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, 1095 mpext->dsn64); 1096 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq); 1097 if (subflow->map_valid) { 1098 /* A DATA_FIN might arrive in a DSS 1099 * option before the previous mapping 1100 * has been fully consumed. Continue 1101 * handling the existing mapping. 1102 */ 1103 skb_ext_del(skb, SKB_EXT_MPTCP); 1104 return MAPPING_OK; 1105 } else { 1106 if (updated) 1107 mptcp_schedule_work((struct sock *)msk); 1108 1109 return MAPPING_DATA_FIN; 1110 } 1111 } else { 1112 u64 data_fin_seq = mpext->data_seq + data_len - 1; 1113 1114 /* If mpext->data_seq is a 32-bit value, data_fin_seq 1115 * must also be limited to 32 bits. 1116 */ 1117 if (!mpext->dsn64) 1118 data_fin_seq &= GENMASK_ULL(31, 0); 1119 1120 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); 1121 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d", 1122 data_fin_seq, mpext->dsn64); 1123 } 1124 1125 /* Adjust for DATA_FIN using 1 byte of sequence space */ 1126 data_len--; 1127 } 1128 1129 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64); 1130 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); 1131 1132 if (subflow->map_valid) { 1133 /* Allow replacing only with an identical map */ 1134 if (subflow->map_seq == map_seq && 1135 subflow->map_subflow_seq == mpext->subflow_seq && 1136 subflow->map_data_len == data_len && 1137 subflow->map_csum_reqd == mpext->csum_reqd) { 1138 skb_ext_del(skb, SKB_EXT_MPTCP); 1139 goto validate_csum; 1140 } 1141 1142 /* If this skb data are fully covered by the current mapping, 1143 * the new map would need caching, which is not supported 1144 */ 1145 if (skb_is_fully_mapped(ssk, skb)) { 1146 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); 1147 return MAPPING_INVALID; 1148 } 1149 1150 /* will validate the next map after consuming the current one */ 1151 goto validate_csum; 1152 } 1153 1154 subflow->map_seq = map_seq; 1155 subflow->map_subflow_seq = mpext->subflow_seq; 1156 subflow->map_data_len = data_len; 1157 subflow->map_valid = 1; 1158 subflow->map_data_fin = mpext->data_fin; 1159 subflow->mpc_map = mpext->mpc_map; 1160 subflow->map_csum_reqd = mpext->csum_reqd; 1161 subflow->map_csum_len = 0; 1162 subflow->map_data_csum = csum_unfold(mpext->csum); 1163 1164 /* Cfr RFC 8684 Section 3.3.0 */ 1165 if (unlikely(subflow->map_csum_reqd != csum_reqd)) 1166 return MAPPING_INVALID; 1167 1168 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", 1169 subflow->map_seq, subflow->map_subflow_seq, 1170 subflow->map_data_len, subflow->map_csum_reqd, 1171 subflow->map_data_csum); 1172 1173 validate_seq: 1174 /* we revalidate valid mapping on new skb, because we must ensure 1175 * the current skb is completely covered by the available mapping 1176 */ 1177 if (!validate_mapping(ssk, skb)) { 1178 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH); 1179 return MAPPING_INVALID; 1180 } 1181 1182 skb_ext_del(skb, SKB_EXT_MPTCP); 1183 1184 validate_csum: 1185 return validate_data_csum(ssk, skb, csum_reqd); 1186 } 1187 1188 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, 1189 u64 limit) 1190 { 1191 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1192 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 1193 u32 incr; 1194 1195 incr = limit >= skb->len ? skb->len + fin : limit; 1196 1197 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len, 1198 subflow->map_subflow_seq); 1199 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); 1200 tcp_sk(ssk)->copied_seq += incr; 1201 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) 1202 sk_eat_skb(ssk, skb); 1203 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) 1204 subflow->map_valid = 0; 1205 } 1206 1207 /* sched mptcp worker to remove the subflow if no more data is pending */ 1208 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) 1209 { 1210 if (likely(ssk->sk_state != TCP_CLOSE)) 1211 return; 1212 1213 if (skb_queue_empty(&ssk->sk_receive_queue) && 1214 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 1215 mptcp_schedule_work((struct sock *)msk); 1216 } 1217 1218 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) 1219 { 1220 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 1221 1222 if (subflow->mp_join) 1223 return false; 1224 else if (READ_ONCE(msk->csum_enabled)) 1225 return !subflow->valid_csum_seen; 1226 else 1227 return !subflow->fully_established; 1228 } 1229 1230 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) 1231 { 1232 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1233 unsigned long fail_tout; 1234 1235 /* greceful failure can happen only on the MPC subflow */ 1236 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) 1237 return; 1238 1239 /* since the close timeout take precedence on the fail one, 1240 * no need to start the latter when the first is already set 1241 */ 1242 if (sock_flag((struct sock *)msk, SOCK_DEAD)) 1243 return; 1244 1245 /* we don't need extreme accuracy here, use a zero fail_tout as special 1246 * value meaning no fail timeout at all; 1247 */ 1248 fail_tout = jiffies + TCP_RTO_MAX; 1249 if (!fail_tout) 1250 fail_tout = 1; 1251 WRITE_ONCE(subflow->fail_tout, fail_tout); 1252 tcp_send_ack(ssk); 1253 1254 mptcp_reset_tout_timer(msk, subflow->fail_tout); 1255 } 1256 1257 static bool subflow_check_data_avail(struct sock *ssk) 1258 { 1259 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1260 enum mapping_status status; 1261 struct mptcp_sock *msk; 1262 struct sk_buff *skb; 1263 1264 if (!skb_peek(&ssk->sk_receive_queue)) 1265 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1266 if (subflow->data_avail) 1267 return true; 1268 1269 msk = mptcp_sk(subflow->conn); 1270 for (;;) { 1271 u64 ack_seq; 1272 u64 old_ack; 1273 1274 status = get_mapping_status(ssk, msk); 1275 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); 1276 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY || 1277 status == MAPPING_BAD_CSUM)) 1278 goto fallback; 1279 1280 if (status != MAPPING_OK) 1281 goto no_data; 1282 1283 skb = skb_peek(&ssk->sk_receive_queue); 1284 if (WARN_ON_ONCE(!skb)) 1285 goto no_data; 1286 1287 if (unlikely(!READ_ONCE(msk->can_ack))) 1288 goto fallback; 1289 1290 old_ack = READ_ONCE(msk->ack_seq); 1291 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 1292 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, 1293 ack_seq); 1294 if (unlikely(before64(ack_seq, old_ack))) { 1295 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); 1296 continue; 1297 } 1298 1299 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1300 break; 1301 } 1302 return true; 1303 1304 no_data: 1305 subflow_sched_work_if_closed(msk, ssk); 1306 return false; 1307 1308 fallback: 1309 if (!__mptcp_check_fallback(msk)) { 1310 /* RFC 8684 section 3.7. */ 1311 if (status == MAPPING_BAD_CSUM && 1312 (subflow->mp_join || subflow->valid_csum_seen)) { 1313 subflow->send_mp_fail = 1; 1314 1315 if (!READ_ONCE(msk->allow_infinite_fallback)) { 1316 subflow->reset_transient = 0; 1317 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; 1318 goto reset; 1319 } 1320 mptcp_subflow_fail(msk, ssk); 1321 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1322 return true; 1323 } 1324 1325 if (!subflow_can_fallback(subflow) && subflow->map_data_len) { 1326 /* fatal protocol error, close the socket. 1327 * subflow_error_report() will introduce the appropriate barriers 1328 */ 1329 subflow->reset_transient = 0; 1330 subflow->reset_reason = MPTCP_RST_EMPTCP; 1331 1332 reset: 1333 WRITE_ONCE(ssk->sk_err, EBADMSG); 1334 tcp_set_state(ssk, TCP_CLOSE); 1335 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1336 sk_eat_skb(ssk, skb); 1337 tcp_send_active_reset(ssk, GFP_ATOMIC); 1338 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1339 return false; 1340 } 1341 1342 mptcp_do_fallback(ssk); 1343 } 1344 1345 skb = skb_peek(&ssk->sk_receive_queue); 1346 subflow->map_valid = 1; 1347 subflow->map_seq = READ_ONCE(msk->ack_seq); 1348 subflow->map_data_len = skb->len; 1349 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 1350 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1351 return true; 1352 } 1353 1354 bool mptcp_subflow_data_available(struct sock *sk) 1355 { 1356 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1357 1358 /* check if current mapping is still valid */ 1359 if (subflow->map_valid && 1360 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { 1361 subflow->map_valid = 0; 1362 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1363 1364 pr_debug("Done with mapping: seq=%u data_len=%u", 1365 subflow->map_subflow_seq, 1366 subflow->map_data_len); 1367 } 1368 1369 return subflow_check_data_avail(sk); 1370 } 1371 1372 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, 1373 * not the ssk one. 1374 * 1375 * In mptcp, rwin is about the mptcp-level connection data. 1376 * 1377 * Data that is still on the ssk rx queue can thus be ignored, 1378 * as far as mptcp peer is concerned that data is still inflight. 1379 * DSS ACK is updated when skb is moved to the mptcp rx queue. 1380 */ 1381 void mptcp_space(const struct sock *ssk, int *space, int *full_space) 1382 { 1383 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1384 const struct sock *sk = subflow->conn; 1385 1386 *space = __mptcp_space(sk); 1387 *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); 1388 } 1389 1390 static void subflow_error_report(struct sock *ssk) 1391 { 1392 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1393 1394 /* bail early if this is a no-op, so that we avoid introducing a 1395 * problematic lockdep dependency between TCP accept queue lock 1396 * and msk socket spinlock 1397 */ 1398 if (!sk->sk_socket) 1399 return; 1400 1401 mptcp_data_lock(sk); 1402 if (!sock_owned_by_user(sk)) 1403 __mptcp_error_report(sk); 1404 else 1405 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags); 1406 mptcp_data_unlock(sk); 1407 } 1408 1409 static void subflow_data_ready(struct sock *sk) 1410 { 1411 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1412 u16 state = 1 << inet_sk_state_load(sk); 1413 struct sock *parent = subflow->conn; 1414 struct mptcp_sock *msk; 1415 1416 trace_sk_data_ready(sk); 1417 1418 msk = mptcp_sk(parent); 1419 if (state & TCPF_LISTEN) { 1420 /* MPJ subflow are removed from accept queue before reaching here, 1421 * avoid stray wakeups 1422 */ 1423 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) 1424 return; 1425 1426 parent->sk_data_ready(parent); 1427 return; 1428 } 1429 1430 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && 1431 !subflow->mp_join && !(state & TCPF_CLOSE)); 1432 1433 if (mptcp_subflow_data_available(sk)) 1434 mptcp_data_ready(parent, sk); 1435 else if (unlikely(sk->sk_err)) 1436 subflow_error_report(sk); 1437 } 1438 1439 static void subflow_write_space(struct sock *ssk) 1440 { 1441 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1442 1443 mptcp_propagate_sndbuf(sk, ssk); 1444 mptcp_write_space(sk); 1445 } 1446 1447 static const struct inet_connection_sock_af_ops * 1448 subflow_default_af_ops(struct sock *sk) 1449 { 1450 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1451 if (sk->sk_family == AF_INET6) 1452 return &subflow_v6_specific; 1453 #endif 1454 return &subflow_specific; 1455 } 1456 1457 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1458 void mptcpv6_handle_mapped(struct sock *sk, bool mapped) 1459 { 1460 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1461 struct inet_connection_sock *icsk = inet_csk(sk); 1462 const struct inet_connection_sock_af_ops *target; 1463 1464 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 1465 1466 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", 1467 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 1468 1469 if (likely(icsk->icsk_af_ops == target)) 1470 return; 1471 1472 subflow->icsk_af_ops = icsk->icsk_af_ops; 1473 icsk->icsk_af_ops = target; 1474 } 1475 #endif 1476 1477 void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 1478 struct sockaddr_storage *addr, 1479 unsigned short family) 1480 { 1481 memset(addr, 0, sizeof(*addr)); 1482 addr->ss_family = family; 1483 if (addr->ss_family == AF_INET) { 1484 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; 1485 1486 if (info->family == AF_INET) 1487 in_addr->sin_addr = info->addr; 1488 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1489 else if (ipv6_addr_v4mapped(&info->addr6)) 1490 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3]; 1491 #endif 1492 in_addr->sin_port = info->port; 1493 } 1494 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1495 else if (addr->ss_family == AF_INET6) { 1496 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; 1497 1498 if (info->family == AF_INET) 1499 ipv6_addr_set_v4mapped(info->addr.s_addr, 1500 &in6_addr->sin6_addr); 1501 else 1502 in6_addr->sin6_addr = info->addr6; 1503 in6_addr->sin6_port = info->port; 1504 } 1505 #endif 1506 } 1507 1508 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, 1509 const struct mptcp_addr_info *remote) 1510 { 1511 struct mptcp_sock *msk = mptcp_sk(sk); 1512 struct mptcp_subflow_context *subflow; 1513 struct sockaddr_storage addr; 1514 int remote_id = remote->id; 1515 int local_id = loc->id; 1516 int err = -ENOTCONN; 1517 struct socket *sf; 1518 struct sock *ssk; 1519 u32 remote_token; 1520 int addrlen; 1521 int ifindex; 1522 u8 flags; 1523 1524 if (!mptcp_is_fully_established(sk)) 1525 goto err_out; 1526 1527 err = mptcp_subflow_create_socket(sk, loc->family, &sf); 1528 if (err) 1529 goto err_out; 1530 1531 ssk = sf->sk; 1532 subflow = mptcp_subflow_ctx(ssk); 1533 do { 1534 get_random_bytes(&subflow->local_nonce, sizeof(u32)); 1535 } while (!subflow->local_nonce); 1536 1537 if (local_id) 1538 subflow_set_local_id(subflow, local_id); 1539 1540 mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id, 1541 &flags, &ifindex); 1542 subflow->remote_key_valid = 1; 1543 subflow->remote_key = msk->remote_key; 1544 subflow->local_key = msk->local_key; 1545 subflow->token = msk->token; 1546 mptcp_info2sockaddr(loc, &addr, ssk->sk_family); 1547 1548 addrlen = sizeof(struct sockaddr_in); 1549 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1550 if (addr.ss_family == AF_INET6) 1551 addrlen = sizeof(struct sockaddr_in6); 1552 #endif 1553 mptcp_sockopt_sync(msk, ssk); 1554 1555 ssk->sk_bound_dev_if = ifindex; 1556 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); 1557 if (err) 1558 goto failed; 1559 1560 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); 1561 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk, 1562 remote_token, local_id, remote_id); 1563 subflow->remote_token = remote_token; 1564 WRITE_ONCE(subflow->remote_id, remote_id); 1565 subflow->request_join = 1; 1566 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1567 subflow->subflow_id = msk->subflow_id++; 1568 mptcp_info2sockaddr(remote, &addr, ssk->sk_family); 1569 1570 sock_hold(ssk); 1571 list_add_tail(&subflow->node, &msk->conn_list); 1572 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); 1573 if (err && err != -EINPROGRESS) 1574 goto failed_unlink; 1575 1576 /* discard the subflow socket */ 1577 mptcp_sock_graft(ssk, sk->sk_socket); 1578 iput(SOCK_INODE(sf)); 1579 WRITE_ONCE(msk->allow_infinite_fallback, false); 1580 mptcp_stop_tout_timer(sk); 1581 return 0; 1582 1583 failed_unlink: 1584 list_del(&subflow->node); 1585 sock_put(mptcp_subflow_tcp_sock(subflow)); 1586 1587 failed: 1588 subflow->disposable = 1; 1589 sock_release(sf); 1590 1591 err_out: 1592 /* we account subflows before the creation, and this failures will not 1593 * be caught by sk_state_change() 1594 */ 1595 mptcp_pm_close_subflow(msk); 1596 return err; 1597 } 1598 1599 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child) 1600 { 1601 #ifdef CONFIG_SOCK_CGROUP_DATA 1602 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data, 1603 *child_skcd = &child->sk_cgrp_data; 1604 1605 /* only the additional subflows created by kworkers have to be modified */ 1606 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) != 1607 cgroup_id(sock_cgroup_ptr(child_skcd))) { 1608 #ifdef CONFIG_MEMCG 1609 struct mem_cgroup *memcg = parent->sk_memcg; 1610 1611 mem_cgroup_sk_free(child); 1612 if (memcg && css_tryget(&memcg->css)) 1613 child->sk_memcg = memcg; 1614 #endif /* CONFIG_MEMCG */ 1615 1616 cgroup_sk_free(child_skcd); 1617 *child_skcd = *parent_skcd; 1618 cgroup_sk_clone(child_skcd); 1619 } 1620 #endif /* CONFIG_SOCK_CGROUP_DATA */ 1621 } 1622 1623 static void mptcp_subflow_ops_override(struct sock *ssk) 1624 { 1625 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1626 if (ssk->sk_prot == &tcpv6_prot) 1627 ssk->sk_prot = &tcpv6_prot_override; 1628 else 1629 #endif 1630 ssk->sk_prot = &tcp_prot_override; 1631 } 1632 1633 static void mptcp_subflow_ops_undo_override(struct sock *ssk) 1634 { 1635 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1636 if (ssk->sk_prot == &tcpv6_prot_override) 1637 ssk->sk_prot = &tcpv6_prot; 1638 else 1639 #endif 1640 ssk->sk_prot = &tcp_prot; 1641 } 1642 1643 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 1644 struct socket **new_sock) 1645 { 1646 struct mptcp_subflow_context *subflow; 1647 struct net *net = sock_net(sk); 1648 struct socket *sf; 1649 int err; 1650 1651 /* un-accepted server sockets can reach here - on bad configuration 1652 * bail early to avoid greater trouble later 1653 */ 1654 if (unlikely(!sk->sk_socket)) 1655 return -EINVAL; 1656 1657 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf); 1658 if (err) 1659 return err; 1660 1661 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING); 1662 1663 err = security_mptcp_add_subflow(sk, sf->sk); 1664 if (err) 1665 goto release_ssk; 1666 1667 /* the newly created socket has to be in the same cgroup as its parent */ 1668 mptcp_attach_cgroup(sk, sf->sk); 1669 1670 /* kernel sockets do not by default acquire net ref, but TCP timer 1671 * needs it. 1672 * Update ns_tracker to current stack trace and refcounted tracker. 1673 */ 1674 __netns_tracker_free(net, &sf->sk->ns_tracker, false); 1675 sf->sk->sk_net_refcnt = 1; 1676 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); 1677 sock_inuse_add(net, 1); 1678 err = tcp_set_ulp(sf->sk, "mptcp"); 1679 1680 release_ssk: 1681 release_sock(sf->sk); 1682 1683 if (err) { 1684 sock_release(sf); 1685 return err; 1686 } 1687 1688 /* the newly created socket really belongs to the owning MPTCP master 1689 * socket, even if for additional subflows the allocation is performed 1690 * by a kernel workqueue. Adjust inode references, so that the 1691 * procfs/diag interfaces really show this one belonging to the correct 1692 * user. 1693 */ 1694 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino; 1695 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid; 1696 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; 1697 1698 subflow = mptcp_subflow_ctx(sf->sk); 1699 pr_debug("subflow=%p", subflow); 1700 1701 *new_sock = sf; 1702 sock_hold(sk); 1703 subflow->conn = sk; 1704 mptcp_subflow_ops_override(sf->sk); 1705 1706 return 0; 1707 } 1708 1709 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, 1710 gfp_t priority) 1711 { 1712 struct inet_connection_sock *icsk = inet_csk(sk); 1713 struct mptcp_subflow_context *ctx; 1714 1715 ctx = kzalloc(sizeof(*ctx), priority); 1716 if (!ctx) 1717 return NULL; 1718 1719 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 1720 INIT_LIST_HEAD(&ctx->node); 1721 INIT_LIST_HEAD(&ctx->delegated_node); 1722 1723 pr_debug("subflow=%p", ctx); 1724 1725 ctx->tcp_sock = sk; 1726 WRITE_ONCE(ctx->local_id, -1); 1727 1728 return ctx; 1729 } 1730 1731 static void __subflow_state_change(struct sock *sk) 1732 { 1733 struct socket_wq *wq; 1734 1735 rcu_read_lock(); 1736 wq = rcu_dereference(sk->sk_wq); 1737 if (skwq_has_sleeper(wq)) 1738 wake_up_interruptible_all(&wq->wait); 1739 rcu_read_unlock(); 1740 } 1741 1742 static bool subflow_is_done(const struct sock *sk) 1743 { 1744 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; 1745 } 1746 1747 static void subflow_state_change(struct sock *sk) 1748 { 1749 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1750 struct sock *parent = subflow->conn; 1751 struct mptcp_sock *msk; 1752 1753 __subflow_state_change(sk); 1754 1755 msk = mptcp_sk(parent); 1756 if (subflow_simultaneous_connect(sk)) { 1757 mptcp_do_fallback(sk); 1758 pr_fallback(msk); 1759 subflow->conn_finished = 1; 1760 mptcp_propagate_state(parent, sk, subflow, NULL); 1761 } 1762 1763 /* as recvmsg() does not acquire the subflow socket for ssk selection 1764 * a fin packet carrying a DSS can be unnoticed if we don't trigger 1765 * the data available machinery here. 1766 */ 1767 if (mptcp_subflow_data_available(sk)) 1768 mptcp_data_ready(parent, sk); 1769 else if (unlikely(sk->sk_err)) 1770 subflow_error_report(sk); 1771 1772 subflow_sched_work_if_closed(mptcp_sk(parent), sk); 1773 1774 /* when the fallback subflow closes the rx side, trigger a 'dummy' 1775 * ingress data fin, so that the msk state will follow along 1776 */ 1777 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk && 1778 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true)) 1779 mptcp_schedule_work(parent); 1780 } 1781 1782 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) 1783 { 1784 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; 1785 struct request_sock *req, *head, *tail; 1786 struct mptcp_subflow_context *subflow; 1787 struct sock *sk, *ssk; 1788 1789 /* Due to lock dependencies no relevant lock can be acquired under rskq_lock. 1790 * Splice the req list, so that accept() can not reach the pending ssk after 1791 * the listener socket is released below. 1792 */ 1793 spin_lock_bh(&queue->rskq_lock); 1794 head = queue->rskq_accept_head; 1795 tail = queue->rskq_accept_tail; 1796 queue->rskq_accept_head = NULL; 1797 queue->rskq_accept_tail = NULL; 1798 spin_unlock_bh(&queue->rskq_lock); 1799 1800 if (!head) 1801 return; 1802 1803 /* can't acquire the msk socket lock under the subflow one, 1804 * or will cause ABBA deadlock 1805 */ 1806 release_sock(listener_ssk); 1807 1808 for (req = head; req; req = req->dl_next) { 1809 ssk = req->sk; 1810 if (!sk_is_mptcp(ssk)) 1811 continue; 1812 1813 subflow = mptcp_subflow_ctx(ssk); 1814 if (!subflow || !subflow->conn) 1815 continue; 1816 1817 sk = subflow->conn; 1818 sock_hold(sk); 1819 1820 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 1821 __mptcp_unaccepted_force_close(sk); 1822 release_sock(sk); 1823 1824 /* lockdep will report a false positive ABBA deadlock 1825 * between cancel_work_sync and the listener socket. 1826 * The involved locks belong to different sockets WRT 1827 * the existing AB chain. 1828 * Using a per socket key is problematic as key 1829 * deregistration requires process context and must be 1830 * performed at socket disposal time, in atomic 1831 * context. 1832 * Just tell lockdep to consider the listener socket 1833 * released here. 1834 */ 1835 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_); 1836 mptcp_cancel_work(sk); 1837 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_); 1838 1839 sock_put(sk); 1840 } 1841 1842 /* we are still under the listener msk socket lock */ 1843 lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING); 1844 1845 /* restore the listener queue, to let the TCP code clean it up */ 1846 spin_lock_bh(&queue->rskq_lock); 1847 WARN_ON_ONCE(queue->rskq_accept_head); 1848 queue->rskq_accept_head = head; 1849 queue->rskq_accept_tail = tail; 1850 spin_unlock_bh(&queue->rskq_lock); 1851 } 1852 1853 static int subflow_ulp_init(struct sock *sk) 1854 { 1855 struct inet_connection_sock *icsk = inet_csk(sk); 1856 struct mptcp_subflow_context *ctx; 1857 struct tcp_sock *tp = tcp_sk(sk); 1858 int err = 0; 1859 1860 /* disallow attaching ULP to a socket unless it has been 1861 * created with sock_create_kern() 1862 */ 1863 if (!sk->sk_kern_sock) { 1864 err = -EOPNOTSUPP; 1865 goto out; 1866 } 1867 1868 ctx = subflow_create_ctx(sk, GFP_KERNEL); 1869 if (!ctx) { 1870 err = -ENOMEM; 1871 goto out; 1872 } 1873 1874 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); 1875 1876 tp->is_mptcp = 1; 1877 ctx->icsk_af_ops = icsk->icsk_af_ops; 1878 icsk->icsk_af_ops = subflow_default_af_ops(sk); 1879 ctx->tcp_state_change = sk->sk_state_change; 1880 ctx->tcp_error_report = sk->sk_error_report; 1881 1882 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable); 1883 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space); 1884 1885 sk->sk_data_ready = subflow_data_ready; 1886 sk->sk_write_space = subflow_write_space; 1887 sk->sk_state_change = subflow_state_change; 1888 sk->sk_error_report = subflow_error_report; 1889 out: 1890 return err; 1891 } 1892 1893 static void subflow_ulp_release(struct sock *ssk) 1894 { 1895 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 1896 bool release = true; 1897 struct sock *sk; 1898 1899 if (!ctx) 1900 return; 1901 1902 sk = ctx->conn; 1903 if (sk) { 1904 /* if the msk has been orphaned, keep the ctx 1905 * alive, will be freed by __mptcp_close_ssk(), 1906 * when the subflow is still unaccepted 1907 */ 1908 release = ctx->disposable || list_empty(&ctx->node); 1909 1910 /* inet_child_forget() does not call sk_state_change(), 1911 * explicitly trigger the socket close machinery 1912 */ 1913 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, 1914 &mptcp_sk(sk)->flags)) 1915 mptcp_schedule_work(sk); 1916 sock_put(sk); 1917 } 1918 1919 mptcp_subflow_ops_undo_override(ssk); 1920 if (release) 1921 kfree_rcu(ctx, rcu); 1922 } 1923 1924 static void subflow_ulp_clone(const struct request_sock *req, 1925 struct sock *newsk, 1926 const gfp_t priority) 1927 { 1928 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 1929 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); 1930 struct mptcp_subflow_context *new_ctx; 1931 1932 if (!tcp_rsk(req)->is_mptcp || 1933 (!subflow_req->mp_capable && !subflow_req->mp_join)) { 1934 subflow_ulp_fallback(newsk, old_ctx); 1935 return; 1936 } 1937 1938 new_ctx = subflow_create_ctx(newsk, priority); 1939 if (!new_ctx) { 1940 subflow_ulp_fallback(newsk, old_ctx); 1941 return; 1942 } 1943 1944 new_ctx->conn_finished = 1; 1945 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; 1946 new_ctx->tcp_state_change = old_ctx->tcp_state_change; 1947 new_ctx->tcp_error_report = old_ctx->tcp_error_report; 1948 new_ctx->rel_write_seq = 1; 1949 new_ctx->tcp_sock = newsk; 1950 1951 if (subflow_req->mp_capable) { 1952 /* see comments in subflow_syn_recv_sock(), MPTCP connection 1953 * is fully established only after we receive the remote key 1954 */ 1955 new_ctx->mp_capable = 1; 1956 new_ctx->local_key = subflow_req->local_key; 1957 new_ctx->token = subflow_req->token; 1958 new_ctx->ssn_offset = subflow_req->ssn_offset; 1959 new_ctx->idsn = subflow_req->idsn; 1960 1961 /* this is the first subflow, id is always 0 */ 1962 subflow_set_local_id(new_ctx, 0); 1963 } else if (subflow_req->mp_join) { 1964 new_ctx->ssn_offset = subflow_req->ssn_offset; 1965 new_ctx->mp_join = 1; 1966 new_ctx->fully_established = 1; 1967 new_ctx->remote_key_valid = 1; 1968 new_ctx->backup = subflow_req->backup; 1969 WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id); 1970 new_ctx->token = subflow_req->token; 1971 new_ctx->thmac = subflow_req->thmac; 1972 1973 /* the subflow req id is valid, fetched via subflow_check_req() 1974 * and subflow_token_join_request() 1975 */ 1976 subflow_set_local_id(new_ctx, subflow_req->local_id); 1977 } 1978 } 1979 1980 static void tcp_release_cb_override(struct sock *ssk) 1981 { 1982 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1983 long status; 1984 1985 /* process and clear all the pending actions, but leave the subflow into 1986 * the napi queue. To respect locking, only the same CPU that originated 1987 * the action can touch the list. mptcp_napi_poll will take care of it. 1988 */ 1989 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); 1990 if (status) 1991 mptcp_subflow_process_delegated(ssk, status); 1992 1993 tcp_release_cb(ssk); 1994 } 1995 1996 static int tcp_abort_override(struct sock *ssk, int err) 1997 { 1998 /* closing a listener subflow requires a great deal of care. 1999 * keep it simple and just prevent such operation 2000 */ 2001 if (inet_sk_state_load(ssk) == TCP_LISTEN) 2002 return -EINVAL; 2003 2004 return tcp_abort(ssk, err); 2005 } 2006 2007 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { 2008 .name = "mptcp", 2009 .owner = THIS_MODULE, 2010 .init = subflow_ulp_init, 2011 .release = subflow_ulp_release, 2012 .clone = subflow_ulp_clone, 2013 }; 2014 2015 static int subflow_ops_init(struct request_sock_ops *subflow_ops) 2016 { 2017 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); 2018 2019 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, 2020 subflow_ops->obj_size, 0, 2021 SLAB_ACCOUNT | 2022 SLAB_TYPESAFE_BY_RCU, 2023 NULL); 2024 if (!subflow_ops->slab) 2025 return -ENOMEM; 2026 2027 return 0; 2028 } 2029 2030 void __init mptcp_subflow_init(void) 2031 { 2032 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; 2033 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; 2034 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; 2035 2036 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) 2037 panic("MPTCP: failed to init subflow v4 request sock ops\n"); 2038 2039 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; 2040 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; 2041 subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack; 2042 2043 subflow_specific = ipv4_specific; 2044 subflow_specific.conn_request = subflow_v4_conn_request; 2045 subflow_specific.syn_recv_sock = subflow_syn_recv_sock; 2046 subflow_specific.sk_rx_dst_set = subflow_finish_connect; 2047 subflow_specific.rebuild_header = subflow_rebuild_header; 2048 2049 tcp_prot_override = tcp_prot; 2050 tcp_prot_override.release_cb = tcp_release_cb_override; 2051 tcp_prot_override.diag_destroy = tcp_abort_override; 2052 2053 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2054 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock 2055 * structures for v4 and v6 have the same size. It should not changed in 2056 * the future but better to make sure to be warned if it is no longer 2057 * the case. 2058 */ 2059 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); 2060 2061 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; 2062 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; 2063 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; 2064 2065 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) 2066 panic("MPTCP: failed to init subflow v6 request sock ops\n"); 2067 2068 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; 2069 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; 2070 subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack; 2071 2072 subflow_v6_specific = ipv6_specific; 2073 subflow_v6_specific.conn_request = subflow_v6_conn_request; 2074 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; 2075 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; 2076 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header; 2077 2078 subflow_v6m_specific = subflow_v6_specific; 2079 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; 2080 subflow_v6m_specific.send_check = ipv4_specific.send_check; 2081 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; 2082 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; 2083 subflow_v6m_specific.net_frag_header_len = 0; 2084 subflow_v6m_specific.rebuild_header = subflow_rebuild_header; 2085 2086 tcpv6_prot_override = tcpv6_prot; 2087 tcpv6_prot_override.release_cb = tcp_release_cb_override; 2088 tcpv6_prot_override.diag_destroy = tcp_abort_override; 2089 #endif 2090 2091 mptcp_diag_subflow_init(&subflow_ulp_ops); 2092 2093 if (tcp_register_ulp(&subflow_ulp_ops) != 0) 2094 panic("MPTCP: failed to register subflows to ULP\n"); 2095 } 2096