1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <crypto/sha2.h> 11 #include <net/tcp.h> 12 #include <net/mptcp.h> 13 #include "protocol.h" 14 #include "mib.h" 15 16 #include <trace/events/mptcp.h> 17 18 static bool mptcp_cap_flag_sha256(u8 flags) 19 { 20 return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256; 21 } 22 23 static void mptcp_parse_option(const struct sk_buff *skb, 24 const unsigned char *ptr, int opsize, 25 struct mptcp_options_received *mp_opt) 26 { 27 u8 subtype = *ptr >> 4; 28 int expected_opsize; 29 u8 version; 30 u8 flags; 31 u8 i; 32 33 switch (subtype) { 34 case MPTCPOPT_MP_CAPABLE: 35 /* strict size checking */ 36 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 37 if (skb->len > tcp_hdr(skb)->doff << 2) 38 expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA; 39 else 40 expected_opsize = TCPOLEN_MPTCP_MPC_ACK; 41 } else { 42 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) 43 expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK; 44 else 45 expected_opsize = TCPOLEN_MPTCP_MPC_SYN; 46 } 47 48 /* Cfr RFC 8684 Section 3.3.0: 49 * If a checksum is present but its use had 50 * not been negotiated in the MP_CAPABLE handshake, the receiver MUST 51 * close the subflow with a RST, as it is not behaving as negotiated. 52 * If a checksum is not present when its use has been negotiated, the 53 * receiver MUST close the subflow with a RST, as it is considered 54 * broken 55 * We parse even option with mismatching csum presence, so that 56 * later in subflow_data_ready we can trigger the reset. 57 */ 58 if (opsize != expected_opsize && 59 (expected_opsize != TCPOLEN_MPTCP_MPC_ACK_DATA || 60 opsize != TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM)) 61 break; 62 63 /* try to be gentle vs future versions on the initial syn */ 64 version = *ptr++ & MPTCP_VERSION_MASK; 65 if (opsize != TCPOLEN_MPTCP_MPC_SYN) { 66 if (version != MPTCP_SUPPORTED_VERSION) 67 break; 68 } else if (version < MPTCP_SUPPORTED_VERSION) { 69 break; 70 } 71 72 flags = *ptr++; 73 if (!mptcp_cap_flag_sha256(flags) || 74 (flags & MPTCP_CAP_EXTENSIBILITY)) 75 break; 76 77 /* RFC 6824, Section 3.1: 78 * "For the Checksum Required bit (labeled "A"), if either 79 * host requires the use of checksums, checksums MUST be used. 80 * In other words, the only way for checksums not to be used 81 * is if both hosts in their SYNs set A=0." 82 */ 83 if (flags & MPTCP_CAP_CHECKSUM_REQD) 84 mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD; 85 86 mp_opt->deny_join_id0 = !!(flags & MPTCP_CAP_DENY_JOIN_ID0); 87 88 mp_opt->suboptions |= OPTIONS_MPTCP_MPC; 89 if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) { 90 mp_opt->sndr_key = get_unaligned_be64(ptr); 91 ptr += 8; 92 } 93 if (opsize >= TCPOLEN_MPTCP_MPC_ACK) { 94 mp_opt->rcvr_key = get_unaligned_be64(ptr); 95 ptr += 8; 96 } 97 if (opsize >= TCPOLEN_MPTCP_MPC_ACK_DATA) { 98 /* Section 3.1.: 99 * "the data parameters in a MP_CAPABLE are semantically 100 * equivalent to those in a DSS option and can be used 101 * interchangeably." 102 */ 103 mp_opt->suboptions |= OPTION_MPTCP_DSS; 104 mp_opt->use_map = 1; 105 mp_opt->mpc_map = 1; 106 mp_opt->data_len = get_unaligned_be16(ptr); 107 ptr += 2; 108 } 109 if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) { 110 mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr); 111 mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD; 112 ptr += 2; 113 } 114 pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u", 115 version, flags, opsize, mp_opt->sndr_key, 116 mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum); 117 break; 118 119 case MPTCPOPT_MP_JOIN: 120 mp_opt->suboptions |= OPTIONS_MPTCP_MPJ; 121 if (opsize == TCPOLEN_MPTCP_MPJ_SYN) { 122 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; 123 mp_opt->join_id = *ptr++; 124 mp_opt->token = get_unaligned_be32(ptr); 125 ptr += 4; 126 mp_opt->nonce = get_unaligned_be32(ptr); 127 ptr += 4; 128 pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u", 129 mp_opt->backup, mp_opt->join_id, 130 mp_opt->token, mp_opt->nonce); 131 } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) { 132 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; 133 mp_opt->join_id = *ptr++; 134 mp_opt->thmac = get_unaligned_be64(ptr); 135 ptr += 8; 136 mp_opt->nonce = get_unaligned_be32(ptr); 137 ptr += 4; 138 pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u", 139 mp_opt->backup, mp_opt->join_id, 140 mp_opt->thmac, mp_opt->nonce); 141 } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) { 142 ptr += 2; 143 memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN); 144 pr_debug("MP_JOIN hmac"); 145 } else { 146 mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ; 147 } 148 break; 149 150 case MPTCPOPT_DSS: 151 pr_debug("DSS"); 152 ptr++; 153 154 /* we must clear 'mpc_map' be able to detect MP_CAPABLE 155 * map vs DSS map in mptcp_incoming_options(), and reconstruct 156 * map info accordingly 157 */ 158 mp_opt->mpc_map = 0; 159 flags = (*ptr++) & MPTCP_DSS_FLAG_MASK; 160 mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0; 161 mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0; 162 mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0; 163 mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0; 164 mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK); 165 166 pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d", 167 mp_opt->data_fin, mp_opt->dsn64, 168 mp_opt->use_map, mp_opt->ack64, 169 mp_opt->use_ack); 170 171 expected_opsize = TCPOLEN_MPTCP_DSS_BASE; 172 173 if (mp_opt->use_ack) { 174 if (mp_opt->ack64) 175 expected_opsize += TCPOLEN_MPTCP_DSS_ACK64; 176 else 177 expected_opsize += TCPOLEN_MPTCP_DSS_ACK32; 178 } 179 180 if (mp_opt->use_map) { 181 if (mp_opt->dsn64) 182 expected_opsize += TCPOLEN_MPTCP_DSS_MAP64; 183 else 184 expected_opsize += TCPOLEN_MPTCP_DSS_MAP32; 185 } 186 187 /* Always parse any csum presence combination, we will enforce 188 * RFC 8684 Section 3.3.0 checks later in subflow_data_ready 189 */ 190 if (opsize != expected_opsize && 191 opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) 192 break; 193 194 mp_opt->suboptions |= OPTION_MPTCP_DSS; 195 if (mp_opt->use_ack) { 196 if (mp_opt->ack64) { 197 mp_opt->data_ack = get_unaligned_be64(ptr); 198 ptr += 8; 199 } else { 200 mp_opt->data_ack = get_unaligned_be32(ptr); 201 ptr += 4; 202 } 203 204 pr_debug("data_ack=%llu", mp_opt->data_ack); 205 } 206 207 if (mp_opt->use_map) { 208 if (mp_opt->dsn64) { 209 mp_opt->data_seq = get_unaligned_be64(ptr); 210 ptr += 8; 211 } else { 212 mp_opt->data_seq = get_unaligned_be32(ptr); 213 ptr += 4; 214 } 215 216 mp_opt->subflow_seq = get_unaligned_be32(ptr); 217 ptr += 4; 218 219 mp_opt->data_len = get_unaligned_be16(ptr); 220 ptr += 2; 221 222 if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) { 223 mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD; 224 mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr); 225 ptr += 2; 226 } 227 228 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", 229 mp_opt->data_seq, mp_opt->subflow_seq, 230 mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD), 231 mp_opt->csum); 232 } 233 234 break; 235 236 case MPTCPOPT_ADD_ADDR: 237 mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO; 238 if (!mp_opt->echo) { 239 if (opsize == TCPOLEN_MPTCP_ADD_ADDR || 240 opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT) 241 mp_opt->addr.family = AF_INET; 242 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 243 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 || 244 opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT) 245 mp_opt->addr.family = AF_INET6; 246 #endif 247 else 248 break; 249 } else { 250 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE || 251 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) 252 mp_opt->addr.family = AF_INET; 253 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 254 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE || 255 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) 256 mp_opt->addr.family = AF_INET6; 257 #endif 258 else 259 break; 260 } 261 262 mp_opt->suboptions |= OPTION_MPTCP_ADD_ADDR; 263 mp_opt->addr.id = *ptr++; 264 mp_opt->addr.port = 0; 265 mp_opt->ahmac = 0; 266 if (mp_opt->addr.family == AF_INET) { 267 memcpy((u8 *)&mp_opt->addr.addr.s_addr, (u8 *)ptr, 4); 268 ptr += 4; 269 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT || 270 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) { 271 mp_opt->addr.port = htons(get_unaligned_be16(ptr)); 272 ptr += 2; 273 } 274 } 275 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 276 else { 277 memcpy(mp_opt->addr.addr6.s6_addr, (u8 *)ptr, 16); 278 ptr += 16; 279 if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT || 280 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) { 281 mp_opt->addr.port = htons(get_unaligned_be16(ptr)); 282 ptr += 2; 283 } 284 } 285 #endif 286 if (!mp_opt->echo) { 287 mp_opt->ahmac = get_unaligned_be64(ptr); 288 ptr += 8; 289 } 290 pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d", 291 (mp_opt->addr.family == AF_INET6) ? "6" : "", 292 mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port)); 293 break; 294 295 case MPTCPOPT_RM_ADDR: 296 if (opsize < TCPOLEN_MPTCP_RM_ADDR_BASE + 1 || 297 opsize > TCPOLEN_MPTCP_RM_ADDR_BASE + MPTCP_RM_IDS_MAX) 298 break; 299 300 ptr++; 301 302 mp_opt->suboptions |= OPTION_MPTCP_RM_ADDR; 303 mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE; 304 for (i = 0; i < mp_opt->rm_list.nr; i++) 305 mp_opt->rm_list.ids[i] = *ptr++; 306 pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr); 307 break; 308 309 case MPTCPOPT_MP_PRIO: 310 if (opsize != TCPOLEN_MPTCP_PRIO) 311 break; 312 313 mp_opt->suboptions |= OPTION_MPTCP_PRIO; 314 mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP; 315 pr_debug("MP_PRIO: prio=%d", mp_opt->backup); 316 break; 317 318 case MPTCPOPT_MP_FASTCLOSE: 319 if (opsize != TCPOLEN_MPTCP_FASTCLOSE) 320 break; 321 322 ptr += 2; 323 mp_opt->rcvr_key = get_unaligned_be64(ptr); 324 ptr += 8; 325 mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE; 326 break; 327 328 case MPTCPOPT_RST: 329 if (opsize != TCPOLEN_MPTCP_RST) 330 break; 331 332 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) 333 break; 334 335 mp_opt->suboptions |= OPTION_MPTCP_RST; 336 flags = *ptr++; 337 mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT; 338 mp_opt->reset_reason = *ptr; 339 break; 340 341 case MPTCPOPT_MP_FAIL: 342 if (opsize != TCPOLEN_MPTCP_FAIL) 343 break; 344 345 ptr += 2; 346 mp_opt->suboptions |= OPTION_MPTCP_FAIL; 347 mp_opt->fail_seq = get_unaligned_be64(ptr); 348 pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq); 349 break; 350 351 default: 352 break; 353 } 354 } 355 356 void mptcp_get_options(const struct sock *sk, 357 const struct sk_buff *skb, 358 struct mptcp_options_received *mp_opt) 359 { 360 const struct tcphdr *th = tcp_hdr(skb); 361 const unsigned char *ptr; 362 int length; 363 364 /* initialize option status */ 365 mp_opt->suboptions = 0; 366 367 length = (th->doff * 4) - sizeof(struct tcphdr); 368 ptr = (const unsigned char *)(th + 1); 369 370 while (length > 0) { 371 int opcode = *ptr++; 372 int opsize; 373 374 switch (opcode) { 375 case TCPOPT_EOL: 376 return; 377 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 378 length--; 379 continue; 380 default: 381 if (length < 2) 382 return; 383 opsize = *ptr++; 384 if (opsize < 2) /* "silly options" */ 385 return; 386 if (opsize > length) 387 return; /* don't parse partial options */ 388 if (opcode == TCPOPT_MPTCP) 389 mptcp_parse_option(skb, ptr, opsize, mp_opt); 390 ptr += opsize - 2; 391 length -= opsize; 392 } 393 } 394 } 395 396 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, 397 unsigned int *size, struct mptcp_out_options *opts) 398 { 399 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 400 401 /* we will use snd_isn to detect first pkt [re]transmission 402 * in mptcp_established_options_mp() 403 */ 404 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; 405 if (subflow->request_mptcp) { 406 opts->suboptions = OPTION_MPTCP_MPC_SYN; 407 opts->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk)); 408 opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk)); 409 *size = TCPOLEN_MPTCP_MPC_SYN; 410 return true; 411 } else if (subflow->request_join) { 412 pr_debug("remote_token=%u, nonce=%u", subflow->remote_token, 413 subflow->local_nonce); 414 opts->suboptions = OPTION_MPTCP_MPJ_SYN; 415 opts->join_id = subflow->local_id; 416 opts->token = subflow->remote_token; 417 opts->nonce = subflow->local_nonce; 418 opts->backup = subflow->request_bkup; 419 *size = TCPOLEN_MPTCP_MPJ_SYN; 420 return true; 421 } 422 return false; 423 } 424 425 /* MP_JOIN client subflow must wait for 4th ack before sending any data: 426 * TCP can't schedule delack timer before the subflow is fully established. 427 * MPTCP uses the delack timer to do 3rd ack retransmissions 428 */ 429 static void schedule_3rdack_retransmission(struct sock *sk) 430 { 431 struct inet_connection_sock *icsk = inet_csk(sk); 432 struct tcp_sock *tp = tcp_sk(sk); 433 unsigned long timeout; 434 435 /* reschedule with a timeout above RTT, as we must look only for drop */ 436 if (tp->srtt_us) 437 timeout = tp->srtt_us << 1; 438 else 439 timeout = TCP_TIMEOUT_INIT; 440 441 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); 442 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 443 icsk->icsk_ack.timeout = timeout; 444 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 445 } 446 447 static void clear_3rdack_retransmission(struct sock *sk) 448 { 449 struct inet_connection_sock *icsk = inet_csk(sk); 450 451 sk_stop_timer(sk, &icsk->icsk_delack_timer); 452 icsk->icsk_ack.timeout = 0; 453 icsk->icsk_ack.ato = 0; 454 icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER); 455 } 456 457 static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, 458 bool snd_data_fin_enable, 459 unsigned int *size, 460 unsigned int remaining, 461 struct mptcp_out_options *opts) 462 { 463 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 464 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 465 struct mptcp_ext *mpext; 466 unsigned int data_len; 467 u8 len; 468 469 /* When skb is not available, we better over-estimate the emitted 470 * options len. A full DSS option (28 bytes) is longer than 471 * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so 472 * tell the caller to defer the estimate to 473 * mptcp_established_options_dss(), which will reserve enough space. 474 */ 475 if (!skb) 476 return false; 477 478 /* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */ 479 if (subflow->fully_established || snd_data_fin_enable || 480 subflow->snd_isn != TCP_SKB_CB(skb)->seq || 481 sk->sk_state != TCP_ESTABLISHED) 482 return false; 483 484 if (subflow->mp_capable) { 485 mpext = mptcp_get_ext(skb); 486 data_len = mpext ? mpext->data_len : 0; 487 488 /* we will check ext_copy.data_len in mptcp_write_options() to 489 * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and 490 * TCPOLEN_MPTCP_MPC_ACK 491 */ 492 opts->ext_copy.data_len = data_len; 493 opts->suboptions = OPTION_MPTCP_MPC_ACK; 494 opts->sndr_key = subflow->local_key; 495 opts->rcvr_key = subflow->remote_key; 496 opts->csum_reqd = READ_ONCE(msk->csum_enabled); 497 opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk)); 498 499 /* Section 3.1. 500 * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK 501 * packets that start the first subflow of an MPTCP connection, 502 * as well as the first packet that carries data 503 */ 504 if (data_len > 0) { 505 len = TCPOLEN_MPTCP_MPC_ACK_DATA; 506 if (opts->csum_reqd) { 507 /* we need to propagate more info to csum the pseudo hdr */ 508 opts->ext_copy.data_seq = mpext->data_seq; 509 opts->ext_copy.subflow_seq = mpext->subflow_seq; 510 opts->ext_copy.csum = mpext->csum; 511 len += TCPOLEN_MPTCP_DSS_CHECKSUM; 512 } 513 *size = ALIGN(len, 4); 514 } else { 515 *size = TCPOLEN_MPTCP_MPC_ACK; 516 } 517 518 pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d", 519 subflow, subflow->local_key, subflow->remote_key, 520 data_len); 521 522 return true; 523 } else if (subflow->mp_join) { 524 opts->suboptions = OPTION_MPTCP_MPJ_ACK; 525 memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN); 526 *size = TCPOLEN_MPTCP_MPJ_ACK; 527 pr_debug("subflow=%p", subflow); 528 529 schedule_3rdack_retransmission(sk); 530 return true; 531 } 532 return false; 533 } 534 535 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, 536 struct sk_buff *skb, struct mptcp_ext *ext) 537 { 538 /* The write_seq value has already been incremented, so the actual 539 * sequence number for the DATA_FIN is one less. 540 */ 541 u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1; 542 543 if (!ext->use_map || !skb->len) { 544 /* RFC6824 requires a DSS mapping with specific values 545 * if DATA_FIN is set but no data payload is mapped 546 */ 547 ext->data_fin = 1; 548 ext->use_map = 1; 549 ext->dsn64 = 1; 550 ext->data_seq = data_fin_tx_seq; 551 ext->subflow_seq = 0; 552 ext->data_len = 1; 553 } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) { 554 /* If there's an existing DSS mapping and it is the 555 * final mapping, DATA_FIN consumes 1 additional byte of 556 * mapping space. 557 */ 558 ext->data_fin = 1; 559 ext->data_len++; 560 } 561 } 562 563 static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, 564 bool snd_data_fin_enable, 565 unsigned int *size, 566 unsigned int remaining, 567 struct mptcp_out_options *opts) 568 { 569 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 570 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 571 unsigned int dss_size = 0; 572 struct mptcp_ext *mpext; 573 unsigned int ack_size; 574 bool ret = false; 575 u64 ack_seq; 576 577 opts->csum_reqd = READ_ONCE(msk->csum_enabled); 578 mpext = skb ? mptcp_get_ext(skb) : NULL; 579 580 if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) { 581 unsigned int map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64; 582 583 if (mpext) { 584 if (opts->csum_reqd) 585 map_size += TCPOLEN_MPTCP_DSS_CHECKSUM; 586 587 opts->ext_copy = *mpext; 588 } 589 590 remaining -= map_size; 591 dss_size = map_size; 592 if (skb && snd_data_fin_enable) 593 mptcp_write_data_fin(subflow, skb, &opts->ext_copy); 594 opts->suboptions = OPTION_MPTCP_DSS; 595 ret = true; 596 } 597 598 /* passive sockets msk will set the 'can_ack' after accept(), even 599 * if the first subflow may have the already the remote key handy 600 */ 601 opts->ext_copy.use_ack = 0; 602 if (!READ_ONCE(msk->can_ack)) { 603 *size = ALIGN(dss_size, 4); 604 return ret; 605 } 606 607 ack_seq = READ_ONCE(msk->ack_seq); 608 if (READ_ONCE(msk->use_64bit_ack)) { 609 ack_size = TCPOLEN_MPTCP_DSS_ACK64; 610 opts->ext_copy.data_ack = ack_seq; 611 opts->ext_copy.ack64 = 1; 612 } else { 613 ack_size = TCPOLEN_MPTCP_DSS_ACK32; 614 opts->ext_copy.data_ack32 = (uint32_t)ack_seq; 615 opts->ext_copy.ack64 = 0; 616 } 617 opts->ext_copy.use_ack = 1; 618 opts->suboptions = OPTION_MPTCP_DSS; 619 WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk)); 620 621 /* Add kind/length/subtype/flag overhead if mapping is not populated */ 622 if (dss_size == 0) 623 ack_size += TCPOLEN_MPTCP_DSS_BASE; 624 625 dss_size += ack_size; 626 627 *size = ALIGN(dss_size, 4); 628 return true; 629 } 630 631 static u64 add_addr_generate_hmac(u64 key1, u64 key2, 632 struct mptcp_addr_info *addr) 633 { 634 u16 port = ntohs(addr->port); 635 u8 hmac[SHA256_DIGEST_SIZE]; 636 u8 msg[19]; 637 int i = 0; 638 639 msg[i++] = addr->id; 640 if (addr->family == AF_INET) { 641 memcpy(&msg[i], &addr->addr.s_addr, 4); 642 i += 4; 643 } 644 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 645 else if (addr->family == AF_INET6) { 646 memcpy(&msg[i], &addr->addr6.s6_addr, 16); 647 i += 16; 648 } 649 #endif 650 msg[i++] = port >> 8; 651 msg[i++] = port & 0xFF; 652 653 mptcp_crypto_hmac_sha(key1, key2, msg, i, hmac); 654 655 return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); 656 } 657 658 static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb, 659 unsigned int *size, 660 unsigned int remaining, 661 struct mptcp_out_options *opts) 662 { 663 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 664 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 665 bool drop_other_suboptions = false; 666 unsigned int opt_size = *size; 667 bool echo; 668 bool port; 669 int len; 670 671 /* add addr will strip the existing options, be sure to avoid breaking 672 * MPC/MPJ handshakes 673 */ 674 if (!mptcp_pm_should_add_signal(msk) || 675 (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) || 676 !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr, 677 &echo, &port, &drop_other_suboptions)) 678 return false; 679 680 if (drop_other_suboptions) 681 remaining += opt_size; 682 len = mptcp_add_addr_len(opts->addr.family, echo, port); 683 if (remaining < len) 684 return false; 685 686 *size = len; 687 if (drop_other_suboptions) { 688 pr_debug("drop other suboptions"); 689 opts->suboptions = 0; 690 691 /* note that e.g. DSS could have written into the memory 692 * aliased by ahmac, we must reset the field here 693 * to avoid appending the hmac even for ADD_ADDR echo 694 * options 695 */ 696 opts->ahmac = 0; 697 *size -= opt_size; 698 } 699 opts->suboptions |= OPTION_MPTCP_ADD_ADDR; 700 if (!echo) { 701 opts->ahmac = add_addr_generate_hmac(msk->local_key, 702 msk->remote_key, 703 &opts->addr); 704 } 705 pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d", 706 opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port)); 707 708 return true; 709 } 710 711 static bool mptcp_established_options_rm_addr(struct sock *sk, 712 unsigned int *size, 713 unsigned int remaining, 714 struct mptcp_out_options *opts) 715 { 716 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 717 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 718 struct mptcp_rm_list rm_list; 719 int i, len; 720 721 if (!mptcp_pm_should_rm_signal(msk) || 722 !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_list))) 723 return false; 724 725 len = mptcp_rm_addr_len(&rm_list); 726 if (len < 0) 727 return false; 728 if (remaining < len) 729 return false; 730 731 *size = len; 732 opts->suboptions |= OPTION_MPTCP_RM_ADDR; 733 opts->rm_list = rm_list; 734 735 for (i = 0; i < opts->rm_list.nr; i++) 736 pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]); 737 738 return true; 739 } 740 741 static bool mptcp_established_options_mp_prio(struct sock *sk, 742 unsigned int *size, 743 unsigned int remaining, 744 struct mptcp_out_options *opts) 745 { 746 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 747 748 /* can't send MP_PRIO with MPC, as they share the same option space: 749 * 'backup'. Also it makes no sense at all 750 */ 751 if (!subflow->send_mp_prio || 752 ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | 753 OPTION_MPTCP_MPC_ACK) & opts->suboptions)) 754 return false; 755 756 /* account for the trailing 'nop' option */ 757 if (remaining < TCPOLEN_MPTCP_PRIO_ALIGN) 758 return false; 759 760 *size = TCPOLEN_MPTCP_PRIO_ALIGN; 761 opts->suboptions |= OPTION_MPTCP_PRIO; 762 opts->backup = subflow->request_bkup; 763 764 pr_debug("prio=%d", opts->backup); 765 766 return true; 767 } 768 769 static noinline bool mptcp_established_options_rst(struct sock *sk, struct sk_buff *skb, 770 unsigned int *size, 771 unsigned int remaining, 772 struct mptcp_out_options *opts) 773 { 774 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 775 776 if (remaining < TCPOLEN_MPTCP_RST) 777 return false; 778 779 *size = TCPOLEN_MPTCP_RST; 780 opts->suboptions |= OPTION_MPTCP_RST; 781 opts->reset_transient = subflow->reset_transient; 782 opts->reset_reason = subflow->reset_reason; 783 784 return true; 785 } 786 787 static bool mptcp_established_options_mp_fail(struct sock *sk, 788 unsigned int *size, 789 unsigned int remaining, 790 struct mptcp_out_options *opts) 791 { 792 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 793 794 if (likely(!subflow->send_mp_fail)) 795 return false; 796 797 if (remaining < TCPOLEN_MPTCP_FAIL) 798 return false; 799 800 *size = TCPOLEN_MPTCP_FAIL; 801 opts->suboptions |= OPTION_MPTCP_FAIL; 802 opts->fail_seq = subflow->map_seq; 803 804 pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq); 805 806 return true; 807 } 808 809 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, 810 unsigned int *size, unsigned int remaining, 811 struct mptcp_out_options *opts) 812 { 813 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 814 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 815 unsigned int opt_size = 0; 816 bool snd_data_fin; 817 bool ret = false; 818 819 opts->suboptions = 0; 820 821 if (unlikely(__mptcp_check_fallback(msk))) 822 return false; 823 824 if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) { 825 if (mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) { 826 *size += opt_size; 827 remaining -= opt_size; 828 } 829 if (mptcp_established_options_rst(sk, skb, &opt_size, remaining, opts)) { 830 *size += opt_size; 831 remaining -= opt_size; 832 } 833 return true; 834 } 835 836 snd_data_fin = mptcp_data_fin_enabled(msk); 837 if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts)) 838 ret = true; 839 else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts)) { 840 ret = true; 841 if (mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) { 842 *size += opt_size; 843 remaining -= opt_size; 844 return true; 845 } 846 } 847 848 /* we reserved enough space for the above options, and exceeding the 849 * TCP option space would be fatal 850 */ 851 if (WARN_ON_ONCE(opt_size > remaining)) 852 return false; 853 854 *size += opt_size; 855 remaining -= opt_size; 856 if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) { 857 *size += opt_size; 858 remaining -= opt_size; 859 ret = true; 860 } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) { 861 *size += opt_size; 862 remaining -= opt_size; 863 ret = true; 864 } 865 866 if (mptcp_established_options_mp_prio(sk, &opt_size, remaining, opts)) { 867 *size += opt_size; 868 remaining -= opt_size; 869 ret = true; 870 } 871 872 return ret; 873 } 874 875 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, 876 struct mptcp_out_options *opts) 877 { 878 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 879 880 if (subflow_req->mp_capable) { 881 opts->suboptions = OPTION_MPTCP_MPC_SYNACK; 882 opts->sndr_key = subflow_req->local_key; 883 opts->csum_reqd = subflow_req->csum_reqd; 884 opts->allow_join_id0 = subflow_req->allow_join_id0; 885 *size = TCPOLEN_MPTCP_MPC_SYNACK; 886 pr_debug("subflow_req=%p, local_key=%llu", 887 subflow_req, subflow_req->local_key); 888 return true; 889 } else if (subflow_req->mp_join) { 890 opts->suboptions = OPTION_MPTCP_MPJ_SYNACK; 891 opts->backup = subflow_req->backup; 892 opts->join_id = subflow_req->local_id; 893 opts->thmac = subflow_req->thmac; 894 opts->nonce = subflow_req->local_nonce; 895 pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u", 896 subflow_req, opts->backup, opts->join_id, 897 opts->thmac, opts->nonce); 898 *size = TCPOLEN_MPTCP_MPJ_SYNACK; 899 return true; 900 } 901 return false; 902 } 903 904 static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, 905 struct mptcp_subflow_context *subflow, 906 struct sk_buff *skb, 907 struct mptcp_options_received *mp_opt) 908 { 909 /* here we can process OoO, in-window pkts, only in-sequence 4th ack 910 * will make the subflow fully established 911 */ 912 if (likely(subflow->fully_established)) { 913 /* on passive sockets, check for 3rd ack retransmission 914 * note that msk is always set by subflow_syn_recv_sock() 915 * for mp_join subflows 916 */ 917 if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 && 918 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && 919 subflow->mp_join && (mp_opt->suboptions & OPTIONS_MPTCP_MPJ) && 920 READ_ONCE(msk->pm.server_side)) 921 tcp_send_ack(ssk); 922 goto fully_established; 923 } 924 925 /* we must process OoO packets before the first subflow is fully 926 * established. OoO packets are instead a protocol violation 927 * for MP_JOIN subflows as the peer must not send any data 928 * before receiving the forth ack - cfr. RFC 8684 section 3.2. 929 */ 930 if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) { 931 if (subflow->mp_join) 932 goto reset; 933 return subflow->mp_capable; 934 } 935 936 if (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) || 937 ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo)) { 938 /* subflows are fully established as soon as we get any 939 * additional ack, including ADD_ADDR. 940 */ 941 subflow->fully_established = 1; 942 WRITE_ONCE(msk->fully_established, true); 943 goto fully_established; 944 } 945 946 /* If the first established packet does not contain MP_CAPABLE + data 947 * then fallback to TCP. Fallback scenarios requires a reset for 948 * MP_JOIN subflows. 949 */ 950 if (!(mp_opt->suboptions & OPTIONS_MPTCP_MPC)) { 951 if (subflow->mp_join) 952 goto reset; 953 subflow->mp_capable = 0; 954 pr_fallback(msk); 955 __mptcp_do_fallback(msk); 956 return false; 957 } 958 959 if (mp_opt->deny_join_id0) 960 WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 961 962 if (unlikely(!READ_ONCE(msk->pm.server_side))) 963 pr_warn_once("bogus mpc option on established client sk"); 964 mptcp_subflow_fully_established(subflow, mp_opt); 965 966 fully_established: 967 /* if the subflow is not already linked into the conn_list, we can't 968 * notify the PM: this subflow is still on the listener queue 969 * and the PM possibly acquiring the subflow lock could race with 970 * the listener close 971 */ 972 if (likely(subflow->pm_notified) || list_empty(&subflow->node)) 973 return true; 974 975 subflow->pm_notified = 1; 976 if (subflow->mp_join) { 977 clear_3rdack_retransmission(ssk); 978 mptcp_pm_subflow_established(msk); 979 } else { 980 mptcp_pm_fully_established(msk, ssk, GFP_ATOMIC); 981 } 982 return true; 983 984 reset: 985 mptcp_subflow_reset(ssk); 986 return false; 987 } 988 989 u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq) 990 { 991 u32 old_seq32, cur_seq32; 992 993 old_seq32 = (u32)old_seq; 994 cur_seq32 = (u32)cur_seq; 995 cur_seq = (old_seq & GENMASK_ULL(63, 32)) + cur_seq32; 996 if (unlikely(cur_seq32 < old_seq32 && before(old_seq32, cur_seq32))) 997 return cur_seq + (1LL << 32); 998 999 /* reverse wrap could happen, too */ 1000 if (unlikely(cur_seq32 > old_seq32 && after(old_seq32, cur_seq32))) 1001 return cur_seq - (1LL << 32); 1002 return cur_seq; 1003 } 1004 1005 static void ack_update_msk(struct mptcp_sock *msk, 1006 struct sock *ssk, 1007 struct mptcp_options_received *mp_opt) 1008 { 1009 u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt); 1010 struct sock *sk = (struct sock *)msk; 1011 u64 old_snd_una; 1012 1013 mptcp_data_lock(sk); 1014 1015 /* avoid ack expansion on update conflict, to reduce the risk of 1016 * wrongly expanding to a future ack sequence number, which is way 1017 * more dangerous than missing an ack 1018 */ 1019 old_snd_una = msk->snd_una; 1020 new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64); 1021 1022 /* ACK for data not even sent yet? Ignore.*/ 1023 if (unlikely(after64(new_snd_una, snd_nxt))) 1024 new_snd_una = old_snd_una; 1025 1026 new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd; 1027 1028 if (after64(new_wnd_end, msk->wnd_end)) 1029 msk->wnd_end = new_wnd_end; 1030 1031 /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */ 1032 if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt))) 1033 __mptcp_check_push(sk, ssk); 1034 1035 if (after64(new_snd_una, old_snd_una)) { 1036 msk->snd_una = new_snd_una; 1037 __mptcp_data_acked(sk); 1038 } 1039 mptcp_data_unlock(sk); 1040 1041 trace_ack_update_msk(mp_opt->data_ack, 1042 old_snd_una, new_snd_una, 1043 new_wnd_end, msk->wnd_end); 1044 } 1045 1046 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit) 1047 { 1048 /* Skip if DATA_FIN was already received. 1049 * If updating simultaneously with the recvmsg loop, values 1050 * should match. If they mismatch, the peer is misbehaving and 1051 * we will prefer the most recent information. 1052 */ 1053 if (READ_ONCE(msk->rcv_data_fin)) 1054 return false; 1055 1056 WRITE_ONCE(msk->rcv_data_fin_seq, 1057 mptcp_expand_seq(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit)); 1058 WRITE_ONCE(msk->rcv_data_fin, 1); 1059 1060 return true; 1061 } 1062 1063 static bool add_addr_hmac_valid(struct mptcp_sock *msk, 1064 struct mptcp_options_received *mp_opt) 1065 { 1066 u64 hmac = 0; 1067 1068 if (mp_opt->echo) 1069 return true; 1070 1071 hmac = add_addr_generate_hmac(msk->remote_key, 1072 msk->local_key, 1073 &mp_opt->addr); 1074 1075 pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n", 1076 msk, (unsigned long long)hmac, 1077 (unsigned long long)mp_opt->ahmac); 1078 1079 return hmac == mp_opt->ahmac; 1080 } 1081 1082 /* Return false if a subflow has been reset, else return true */ 1083 bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) 1084 { 1085 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1086 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 1087 struct mptcp_options_received mp_opt; 1088 struct mptcp_ext *mpext; 1089 1090 if (__mptcp_check_fallback(msk)) { 1091 /* Keep it simple and unconditionally trigger send data cleanup and 1092 * pending queue spooling. We will need to acquire the data lock 1093 * for more accurate checks, and once the lock is acquired, such 1094 * helpers are cheap. 1095 */ 1096 mptcp_data_lock(subflow->conn); 1097 if (sk_stream_memory_free(sk)) 1098 __mptcp_check_push(subflow->conn, sk); 1099 __mptcp_data_acked(subflow->conn); 1100 mptcp_data_unlock(subflow->conn); 1101 return true; 1102 } 1103 1104 mptcp_get_options(sk, skb, &mp_opt); 1105 1106 /* The subflow can be in close state only if check_fully_established() 1107 * just sent a reset. If so, tell the caller to ignore the current packet. 1108 */ 1109 if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) 1110 return sk->sk_state != TCP_CLOSE; 1111 1112 if (unlikely(mp_opt.suboptions != OPTION_MPTCP_DSS)) { 1113 if ((mp_opt.suboptions & OPTION_MPTCP_FASTCLOSE) && 1114 msk->local_key == mp_opt.rcvr_key) { 1115 WRITE_ONCE(msk->rcv_fastclose, true); 1116 mptcp_schedule_work((struct sock *)msk); 1117 } 1118 1119 if ((mp_opt.suboptions & OPTION_MPTCP_ADD_ADDR) && 1120 add_addr_hmac_valid(msk, &mp_opt)) { 1121 if (!mp_opt.echo) { 1122 mptcp_pm_add_addr_received(msk, &mp_opt.addr); 1123 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR); 1124 } else { 1125 mptcp_pm_add_addr_echoed(msk, &mp_opt.addr); 1126 mptcp_pm_del_add_timer(msk, &mp_opt.addr, true); 1127 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD); 1128 } 1129 1130 if (mp_opt.addr.port) 1131 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD); 1132 } 1133 1134 if (mp_opt.suboptions & OPTION_MPTCP_RM_ADDR) 1135 mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list); 1136 1137 if (mp_opt.suboptions & OPTION_MPTCP_PRIO) { 1138 mptcp_pm_mp_prio_received(sk, mp_opt.backup); 1139 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX); 1140 } 1141 1142 if (mp_opt.suboptions & OPTION_MPTCP_FAIL) { 1143 mptcp_pm_mp_fail_received(sk, mp_opt.fail_seq); 1144 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILRX); 1145 } 1146 1147 if (mp_opt.suboptions & OPTION_MPTCP_RST) { 1148 subflow->reset_seen = 1; 1149 subflow->reset_reason = mp_opt.reset_reason; 1150 subflow->reset_transient = mp_opt.reset_transient; 1151 } 1152 1153 if (!(mp_opt.suboptions & OPTION_MPTCP_DSS)) 1154 return true; 1155 } 1156 1157 /* we can't wait for recvmsg() to update the ack_seq, otherwise 1158 * monodirectional flows will stuck 1159 */ 1160 if (mp_opt.use_ack) 1161 ack_update_msk(msk, sk, &mp_opt); 1162 1163 /* Zero-data-length packets are dropped by the caller and not 1164 * propagated to the MPTCP layer, so the skb extension does not 1165 * need to be allocated or populated. DATA_FIN information, if 1166 * present, needs to be updated here before the skb is freed. 1167 */ 1168 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 1169 if (mp_opt.data_fin && mp_opt.data_len == 1 && 1170 mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) && 1171 schedule_work(&msk->work)) 1172 sock_hold(subflow->conn); 1173 1174 return true; 1175 } 1176 1177 mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 1178 if (!mpext) 1179 return true; 1180 1181 memset(mpext, 0, sizeof(*mpext)); 1182 1183 if (likely(mp_opt.use_map)) { 1184 if (mp_opt.mpc_map) { 1185 /* this is an MP_CAPABLE carrying MPTCP data 1186 * we know this map the first chunk of data 1187 */ 1188 mptcp_crypto_key_sha(subflow->remote_key, NULL, 1189 &mpext->data_seq); 1190 mpext->data_seq++; 1191 mpext->subflow_seq = 1; 1192 mpext->dsn64 = 1; 1193 mpext->mpc_map = 1; 1194 mpext->data_fin = 0; 1195 } else { 1196 mpext->data_seq = mp_opt.data_seq; 1197 mpext->subflow_seq = mp_opt.subflow_seq; 1198 mpext->dsn64 = mp_opt.dsn64; 1199 mpext->data_fin = mp_opt.data_fin; 1200 } 1201 mpext->data_len = mp_opt.data_len; 1202 mpext->use_map = 1; 1203 mpext->csum_reqd = !!(mp_opt.suboptions & OPTION_MPTCP_CSUMREQD); 1204 1205 if (mpext->csum_reqd) 1206 mpext->csum = mp_opt.csum; 1207 } 1208 1209 return true; 1210 } 1211 1212 static void mptcp_set_rwin(const struct tcp_sock *tp) 1213 { 1214 const struct sock *ssk = (const struct sock *)tp; 1215 const struct mptcp_subflow_context *subflow; 1216 struct mptcp_sock *msk; 1217 u64 ack_seq; 1218 1219 subflow = mptcp_subflow_ctx(ssk); 1220 msk = mptcp_sk(subflow->conn); 1221 1222 ack_seq = READ_ONCE(msk->ack_seq) + tp->rcv_wnd; 1223 1224 if (after64(ack_seq, READ_ONCE(msk->rcv_wnd_sent))) 1225 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); 1226 } 1227 1228 static u16 mptcp_make_csum(const struct mptcp_ext *mpext) 1229 { 1230 struct csum_pseudo_header header; 1231 __wsum csum; 1232 1233 /* cfr RFC 8684 3.3.1.: 1234 * the data sequence number used in the pseudo-header is 1235 * always the 64-bit value, irrespective of what length is used in the 1236 * DSS option itself. 1237 */ 1238 header.data_seq = cpu_to_be64(mpext->data_seq); 1239 header.subflow_seq = htonl(mpext->subflow_seq); 1240 header.data_len = htons(mpext->data_len); 1241 header.csum = 0; 1242 1243 csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum)); 1244 return (__force u16)csum_fold(csum); 1245 } 1246 1247 void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, 1248 struct mptcp_out_options *opts) 1249 { 1250 if (unlikely(OPTION_MPTCP_FAIL & opts->suboptions)) { 1251 const struct sock *ssk = (const struct sock *)tp; 1252 struct mptcp_subflow_context *subflow; 1253 1254 subflow = mptcp_subflow_ctx(ssk); 1255 subflow->send_mp_fail = 0; 1256 1257 *ptr++ = mptcp_option(MPTCPOPT_MP_FAIL, 1258 TCPOLEN_MPTCP_FAIL, 1259 0, 0); 1260 put_unaligned_be64(opts->fail_seq, ptr); 1261 ptr += 2; 1262 } 1263 1264 /* RST is mutually exclusive with everything else */ 1265 if (unlikely(OPTION_MPTCP_RST & opts->suboptions)) { 1266 *ptr++ = mptcp_option(MPTCPOPT_RST, 1267 TCPOLEN_MPTCP_RST, 1268 opts->reset_transient, 1269 opts->reset_reason); 1270 return; 1271 } 1272 1273 /* DSS, MPC, MPJ and ADD_ADDR are mutually exclusive, see 1274 * mptcp_established_options*() 1275 */ 1276 if (likely(OPTION_MPTCP_DSS & opts->suboptions)) { 1277 struct mptcp_ext *mpext = &opts->ext_copy; 1278 u8 len = TCPOLEN_MPTCP_DSS_BASE; 1279 u8 flags = 0; 1280 1281 if (mpext->use_ack) { 1282 flags = MPTCP_DSS_HAS_ACK; 1283 if (mpext->ack64) { 1284 len += TCPOLEN_MPTCP_DSS_ACK64; 1285 flags |= MPTCP_DSS_ACK64; 1286 } else { 1287 len += TCPOLEN_MPTCP_DSS_ACK32; 1288 } 1289 } 1290 1291 if (mpext->use_map) { 1292 len += TCPOLEN_MPTCP_DSS_MAP64; 1293 1294 /* Use only 64-bit mapping flags for now, add 1295 * support for optional 32-bit mappings later. 1296 */ 1297 flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64; 1298 if (mpext->data_fin) 1299 flags |= MPTCP_DSS_DATA_FIN; 1300 1301 if (opts->csum_reqd) 1302 len += TCPOLEN_MPTCP_DSS_CHECKSUM; 1303 } 1304 1305 *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags); 1306 1307 if (mpext->use_ack) { 1308 if (mpext->ack64) { 1309 put_unaligned_be64(mpext->data_ack, ptr); 1310 ptr += 2; 1311 } else { 1312 put_unaligned_be32(mpext->data_ack32, ptr); 1313 ptr += 1; 1314 } 1315 } 1316 1317 if (mpext->use_map) { 1318 put_unaligned_be64(mpext->data_seq, ptr); 1319 ptr += 2; 1320 put_unaligned_be32(mpext->subflow_seq, ptr); 1321 ptr += 1; 1322 if (opts->csum_reqd) { 1323 put_unaligned_be32(mpext->data_len << 16 | 1324 mptcp_make_csum(mpext), ptr); 1325 } else { 1326 put_unaligned_be32(mpext->data_len << 16 | 1327 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); 1328 } 1329 } 1330 } else if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | 1331 OPTION_MPTCP_MPC_ACK) & opts->suboptions) { 1332 u8 len, flag = MPTCP_CAP_HMAC_SHA256; 1333 1334 if (OPTION_MPTCP_MPC_SYN & opts->suboptions) { 1335 len = TCPOLEN_MPTCP_MPC_SYN; 1336 } else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) { 1337 len = TCPOLEN_MPTCP_MPC_SYNACK; 1338 } else if (opts->ext_copy.data_len) { 1339 len = TCPOLEN_MPTCP_MPC_ACK_DATA; 1340 if (opts->csum_reqd) 1341 len += TCPOLEN_MPTCP_DSS_CHECKSUM; 1342 } else { 1343 len = TCPOLEN_MPTCP_MPC_ACK; 1344 } 1345 1346 if (opts->csum_reqd) 1347 flag |= MPTCP_CAP_CHECKSUM_REQD; 1348 1349 if (!opts->allow_join_id0) 1350 flag |= MPTCP_CAP_DENY_JOIN_ID0; 1351 1352 *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len, 1353 MPTCP_SUPPORTED_VERSION, 1354 flag); 1355 1356 if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) & 1357 opts->suboptions)) 1358 goto mp_capable_done; 1359 1360 put_unaligned_be64(opts->sndr_key, ptr); 1361 ptr += 2; 1362 if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions)) 1363 goto mp_capable_done; 1364 1365 put_unaligned_be64(opts->rcvr_key, ptr); 1366 ptr += 2; 1367 if (!opts->ext_copy.data_len) 1368 goto mp_capable_done; 1369 1370 if (opts->csum_reqd) { 1371 put_unaligned_be32(opts->ext_copy.data_len << 16 | 1372 mptcp_make_csum(&opts->ext_copy), ptr); 1373 } else { 1374 put_unaligned_be32(opts->ext_copy.data_len << 16 | 1375 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); 1376 } 1377 ptr += 1; 1378 1379 /* MPC is additionally mutually exclusive with MP_PRIO */ 1380 goto mp_capable_done; 1381 } else if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) { 1382 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1383 TCPOLEN_MPTCP_MPJ_SYN, 1384 opts->backup, opts->join_id); 1385 put_unaligned_be32(opts->token, ptr); 1386 ptr += 1; 1387 put_unaligned_be32(opts->nonce, ptr); 1388 ptr += 1; 1389 } else if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) { 1390 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1391 TCPOLEN_MPTCP_MPJ_SYNACK, 1392 opts->backup, opts->join_id); 1393 put_unaligned_be64(opts->thmac, ptr); 1394 ptr += 2; 1395 put_unaligned_be32(opts->nonce, ptr); 1396 ptr += 1; 1397 } else if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) { 1398 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1399 TCPOLEN_MPTCP_MPJ_ACK, 0, 0); 1400 memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN); 1401 ptr += 5; 1402 } else if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) { 1403 u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE; 1404 u8 echo = MPTCP_ADDR_ECHO; 1405 1406 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1407 if (opts->addr.family == AF_INET6) 1408 len = TCPOLEN_MPTCP_ADD_ADDR6_BASE; 1409 #endif 1410 1411 if (opts->addr.port) 1412 len += TCPOLEN_MPTCP_PORT_LEN; 1413 1414 if (opts->ahmac) { 1415 len += sizeof(opts->ahmac); 1416 echo = 0; 1417 } 1418 1419 *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, 1420 len, echo, opts->addr.id); 1421 if (opts->addr.family == AF_INET) { 1422 memcpy((u8 *)ptr, (u8 *)&opts->addr.addr.s_addr, 4); 1423 ptr += 1; 1424 } 1425 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1426 else if (opts->addr.family == AF_INET6) { 1427 memcpy((u8 *)ptr, opts->addr.addr6.s6_addr, 16); 1428 ptr += 4; 1429 } 1430 #endif 1431 1432 if (!opts->addr.port) { 1433 if (opts->ahmac) { 1434 put_unaligned_be64(opts->ahmac, ptr); 1435 ptr += 2; 1436 } 1437 } else { 1438 u16 port = ntohs(opts->addr.port); 1439 1440 if (opts->ahmac) { 1441 u8 *bptr = (u8 *)ptr; 1442 1443 put_unaligned_be16(port, bptr); 1444 bptr += 2; 1445 put_unaligned_be64(opts->ahmac, bptr); 1446 bptr += 8; 1447 put_unaligned_be16(TCPOPT_NOP << 8 | 1448 TCPOPT_NOP, bptr); 1449 1450 ptr += 3; 1451 } else { 1452 put_unaligned_be32(port << 16 | 1453 TCPOPT_NOP << 8 | 1454 TCPOPT_NOP, ptr); 1455 ptr += 1; 1456 } 1457 } 1458 } 1459 1460 if (OPTION_MPTCP_PRIO & opts->suboptions) { 1461 const struct sock *ssk = (const struct sock *)tp; 1462 struct mptcp_subflow_context *subflow; 1463 1464 subflow = mptcp_subflow_ctx(ssk); 1465 subflow->send_mp_prio = 0; 1466 1467 *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO, 1468 TCPOLEN_MPTCP_PRIO, 1469 opts->backup, TCPOPT_NOP); 1470 } 1471 1472 mp_capable_done: 1473 if (OPTION_MPTCP_RM_ADDR & opts->suboptions) { 1474 u8 i = 1; 1475 1476 *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR, 1477 TCPOLEN_MPTCP_RM_ADDR_BASE + opts->rm_list.nr, 1478 0, opts->rm_list.ids[0]); 1479 1480 while (i < opts->rm_list.nr) { 1481 u8 id1, id2, id3, id4; 1482 1483 id1 = opts->rm_list.ids[i]; 1484 id2 = i + 1 < opts->rm_list.nr ? opts->rm_list.ids[i + 1] : TCPOPT_NOP; 1485 id3 = i + 2 < opts->rm_list.nr ? opts->rm_list.ids[i + 2] : TCPOPT_NOP; 1486 id4 = i + 3 < opts->rm_list.nr ? opts->rm_list.ids[i + 3] : TCPOPT_NOP; 1487 put_unaligned_be32(id1 << 24 | id2 << 16 | id3 << 8 | id4, ptr); 1488 ptr += 1; 1489 i += 4; 1490 } 1491 } 1492 1493 if (tp) 1494 mptcp_set_rwin(tp); 1495 } 1496 1497 __be32 mptcp_get_reset_option(const struct sk_buff *skb) 1498 { 1499 const struct mptcp_ext *ext = mptcp_get_ext(skb); 1500 u8 flags, reason; 1501 1502 if (ext) { 1503 flags = ext->reset_transient; 1504 reason = ext->reset_reason; 1505 1506 return mptcp_option(MPTCPOPT_RST, TCPOLEN_MPTCP_RST, 1507 flags, reason); 1508 } 1509 1510 return htonl(0u); 1511 } 1512 EXPORT_SYMBOL_GPL(mptcp_get_reset_option); 1513