1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <crypto/sha2.h> 11 #include <net/tcp.h> 12 #include <net/mptcp.h> 13 #include "protocol.h" 14 #include "mib.h" 15 16 #include <trace/events/mptcp.h> 17 18 static bool mptcp_cap_flag_sha256(u8 flags) 19 { 20 return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256; 21 } 22 23 static void mptcp_parse_option(const struct sk_buff *skb, 24 const unsigned char *ptr, int opsize, 25 struct mptcp_options_received *mp_opt) 26 { 27 u8 subtype = *ptr >> 4; 28 int expected_opsize; 29 u8 version; 30 u8 flags; 31 u8 i; 32 33 switch (subtype) { 34 case MPTCPOPT_MP_CAPABLE: 35 /* strict size checking */ 36 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 37 if (skb->len > tcp_hdr(skb)->doff << 2) 38 expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA; 39 else 40 expected_opsize = TCPOLEN_MPTCP_MPC_ACK; 41 } else { 42 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) 43 expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK; 44 else 45 expected_opsize = TCPOLEN_MPTCP_MPC_SYN; 46 } 47 if (opsize != expected_opsize) 48 break; 49 50 /* try to be gentle vs future versions on the initial syn */ 51 version = *ptr++ & MPTCP_VERSION_MASK; 52 if (opsize != TCPOLEN_MPTCP_MPC_SYN) { 53 if (version != MPTCP_SUPPORTED_VERSION) 54 break; 55 } else if (version < MPTCP_SUPPORTED_VERSION) { 56 break; 57 } 58 59 flags = *ptr++; 60 if (!mptcp_cap_flag_sha256(flags) || 61 (flags & MPTCP_CAP_EXTENSIBILITY)) 62 break; 63 64 /* RFC 6824, Section 3.1: 65 * "For the Checksum Required bit (labeled "A"), if either 66 * host requires the use of checksums, checksums MUST be used. 67 * In other words, the only way for checksums not to be used 68 * is if both hosts in their SYNs set A=0." 69 * 70 * Section 3.3.0: 71 * "If a checksum is not present when its use has been 72 * negotiated, the receiver MUST close the subflow with a RST as 73 * it is considered broken." 74 * 75 * We don't implement DSS checksum - fall back to TCP. 76 */ 77 if (flags & MPTCP_CAP_CHECKSUM_REQD) 78 break; 79 80 mp_opt->mp_capable = 1; 81 if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) { 82 mp_opt->sndr_key = get_unaligned_be64(ptr); 83 ptr += 8; 84 } 85 if (opsize >= TCPOLEN_MPTCP_MPC_ACK) { 86 mp_opt->rcvr_key = get_unaligned_be64(ptr); 87 ptr += 8; 88 } 89 if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA) { 90 /* Section 3.1.: 91 * "the data parameters in a MP_CAPABLE are semantically 92 * equivalent to those in a DSS option and can be used 93 * interchangeably." 94 */ 95 mp_opt->dss = 1; 96 mp_opt->use_map = 1; 97 mp_opt->mpc_map = 1; 98 mp_opt->data_len = get_unaligned_be16(ptr); 99 ptr += 2; 100 } 101 pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d", 102 version, flags, opsize, mp_opt->sndr_key, 103 mp_opt->rcvr_key, mp_opt->data_len); 104 break; 105 106 case MPTCPOPT_MP_JOIN: 107 mp_opt->mp_join = 1; 108 if (opsize == TCPOLEN_MPTCP_MPJ_SYN) { 109 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; 110 mp_opt->join_id = *ptr++; 111 mp_opt->token = get_unaligned_be32(ptr); 112 ptr += 4; 113 mp_opt->nonce = get_unaligned_be32(ptr); 114 ptr += 4; 115 pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u", 116 mp_opt->backup, mp_opt->join_id, 117 mp_opt->token, mp_opt->nonce); 118 } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) { 119 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; 120 mp_opt->join_id = *ptr++; 121 mp_opt->thmac = get_unaligned_be64(ptr); 122 ptr += 8; 123 mp_opt->nonce = get_unaligned_be32(ptr); 124 ptr += 4; 125 pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u", 126 mp_opt->backup, mp_opt->join_id, 127 mp_opt->thmac, mp_opt->nonce); 128 } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) { 129 ptr += 2; 130 memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN); 131 pr_debug("MP_JOIN hmac"); 132 } else { 133 pr_warn("MP_JOIN bad option size"); 134 mp_opt->mp_join = 0; 135 } 136 break; 137 138 case MPTCPOPT_DSS: 139 pr_debug("DSS"); 140 ptr++; 141 142 /* we must clear 'mpc_map' be able to detect MP_CAPABLE 143 * map vs DSS map in mptcp_incoming_options(), and reconstruct 144 * map info accordingly 145 */ 146 mp_opt->mpc_map = 0; 147 flags = (*ptr++) & MPTCP_DSS_FLAG_MASK; 148 mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0; 149 mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0; 150 mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0; 151 mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0; 152 mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK); 153 154 pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d", 155 mp_opt->data_fin, mp_opt->dsn64, 156 mp_opt->use_map, mp_opt->ack64, 157 mp_opt->use_ack); 158 159 expected_opsize = TCPOLEN_MPTCP_DSS_BASE; 160 161 if (mp_opt->use_ack) { 162 if (mp_opt->ack64) 163 expected_opsize += TCPOLEN_MPTCP_DSS_ACK64; 164 else 165 expected_opsize += TCPOLEN_MPTCP_DSS_ACK32; 166 } 167 168 if (mp_opt->use_map) { 169 if (mp_opt->dsn64) 170 expected_opsize += TCPOLEN_MPTCP_DSS_MAP64; 171 else 172 expected_opsize += TCPOLEN_MPTCP_DSS_MAP32; 173 } 174 175 /* RFC 6824, Section 3.3: 176 * If a checksum is present, but its use had 177 * not been negotiated in the MP_CAPABLE handshake, 178 * the checksum field MUST be ignored. 179 */ 180 if (opsize != expected_opsize && 181 opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) 182 break; 183 184 mp_opt->dss = 1; 185 186 if (mp_opt->use_ack) { 187 if (mp_opt->ack64) { 188 mp_opt->data_ack = get_unaligned_be64(ptr); 189 ptr += 8; 190 } else { 191 mp_opt->data_ack = get_unaligned_be32(ptr); 192 ptr += 4; 193 } 194 195 pr_debug("data_ack=%llu", mp_opt->data_ack); 196 } 197 198 if (mp_opt->use_map) { 199 if (mp_opt->dsn64) { 200 mp_opt->data_seq = get_unaligned_be64(ptr); 201 ptr += 8; 202 } else { 203 mp_opt->data_seq = get_unaligned_be32(ptr); 204 ptr += 4; 205 } 206 207 mp_opt->subflow_seq = get_unaligned_be32(ptr); 208 ptr += 4; 209 210 mp_opt->data_len = get_unaligned_be16(ptr); 211 ptr += 2; 212 213 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u", 214 mp_opt->data_seq, mp_opt->subflow_seq, 215 mp_opt->data_len); 216 } 217 218 break; 219 220 case MPTCPOPT_ADD_ADDR: 221 mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO; 222 if (!mp_opt->echo) { 223 if (opsize == TCPOLEN_MPTCP_ADD_ADDR || 224 opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT) 225 mp_opt->addr.family = AF_INET; 226 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 227 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 || 228 opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT) 229 mp_opt->addr.family = AF_INET6; 230 #endif 231 else 232 break; 233 } else { 234 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE || 235 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) 236 mp_opt->addr.family = AF_INET; 237 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 238 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE || 239 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) 240 mp_opt->addr.family = AF_INET6; 241 #endif 242 else 243 break; 244 } 245 246 mp_opt->add_addr = 1; 247 mp_opt->addr.id = *ptr++; 248 if (mp_opt->addr.family == AF_INET) { 249 memcpy((u8 *)&mp_opt->addr.addr.s_addr, (u8 *)ptr, 4); 250 ptr += 4; 251 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT || 252 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) { 253 mp_opt->addr.port = htons(get_unaligned_be16(ptr)); 254 ptr += 2; 255 } 256 } 257 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 258 else { 259 memcpy(mp_opt->addr.addr6.s6_addr, (u8 *)ptr, 16); 260 ptr += 16; 261 if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT || 262 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) { 263 mp_opt->addr.port = htons(get_unaligned_be16(ptr)); 264 ptr += 2; 265 } 266 } 267 #endif 268 if (!mp_opt->echo) { 269 mp_opt->ahmac = get_unaligned_be64(ptr); 270 ptr += 8; 271 } 272 pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d", 273 (mp_opt->addr.family == AF_INET6) ? "6" : "", 274 mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port)); 275 break; 276 277 case MPTCPOPT_RM_ADDR: 278 if (opsize < TCPOLEN_MPTCP_RM_ADDR_BASE + 1 || 279 opsize > TCPOLEN_MPTCP_RM_ADDR_BASE + MPTCP_RM_IDS_MAX) 280 break; 281 282 ptr++; 283 284 mp_opt->rm_addr = 1; 285 mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE; 286 for (i = 0; i < mp_opt->rm_list.nr; i++) 287 mp_opt->rm_list.ids[i] = *ptr++; 288 pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr); 289 break; 290 291 case MPTCPOPT_MP_PRIO: 292 if (opsize != TCPOLEN_MPTCP_PRIO) 293 break; 294 295 mp_opt->mp_prio = 1; 296 mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP; 297 pr_debug("MP_PRIO: prio=%d", mp_opt->backup); 298 break; 299 300 case MPTCPOPT_MP_FASTCLOSE: 301 if (opsize != TCPOLEN_MPTCP_FASTCLOSE) 302 break; 303 304 ptr += 2; 305 mp_opt->rcvr_key = get_unaligned_be64(ptr); 306 ptr += 8; 307 mp_opt->fastclose = 1; 308 break; 309 310 case MPTCPOPT_RST: 311 if (opsize != TCPOLEN_MPTCP_RST) 312 break; 313 314 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) 315 break; 316 mp_opt->reset = 1; 317 flags = *ptr++; 318 mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT; 319 mp_opt->reset_reason = *ptr; 320 break; 321 322 default: 323 break; 324 } 325 } 326 327 void mptcp_get_options(const struct sk_buff *skb, 328 struct mptcp_options_received *mp_opt) 329 { 330 const struct tcphdr *th = tcp_hdr(skb); 331 const unsigned char *ptr; 332 int length; 333 334 /* initialize option status */ 335 mp_opt->mp_capable = 0; 336 mp_opt->mp_join = 0; 337 mp_opt->add_addr = 0; 338 mp_opt->ahmac = 0; 339 mp_opt->fastclose = 0; 340 mp_opt->addr.port = 0; 341 mp_opt->rm_addr = 0; 342 mp_opt->dss = 0; 343 mp_opt->mp_prio = 0; 344 mp_opt->reset = 0; 345 346 length = (th->doff * 4) - sizeof(struct tcphdr); 347 ptr = (const unsigned char *)(th + 1); 348 349 while (length > 0) { 350 int opcode = *ptr++; 351 int opsize; 352 353 switch (opcode) { 354 case TCPOPT_EOL: 355 return; 356 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 357 length--; 358 continue; 359 default: 360 opsize = *ptr++; 361 if (opsize < 2) /* "silly options" */ 362 return; 363 if (opsize > length) 364 return; /* don't parse partial options */ 365 if (opcode == TCPOPT_MPTCP) 366 mptcp_parse_option(skb, ptr, opsize, mp_opt); 367 ptr += opsize - 2; 368 length -= opsize; 369 } 370 } 371 } 372 373 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, 374 unsigned int *size, struct mptcp_out_options *opts) 375 { 376 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 377 378 /* we will use snd_isn to detect first pkt [re]transmission 379 * in mptcp_established_options_mp() 380 */ 381 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; 382 if (subflow->request_mptcp) { 383 opts->suboptions = OPTION_MPTCP_MPC_SYN; 384 *size = TCPOLEN_MPTCP_MPC_SYN; 385 return true; 386 } else if (subflow->request_join) { 387 pr_debug("remote_token=%u, nonce=%u", subflow->remote_token, 388 subflow->local_nonce); 389 opts->suboptions = OPTION_MPTCP_MPJ_SYN; 390 opts->join_id = subflow->local_id; 391 opts->token = subflow->remote_token; 392 opts->nonce = subflow->local_nonce; 393 opts->backup = subflow->request_bkup; 394 *size = TCPOLEN_MPTCP_MPJ_SYN; 395 return true; 396 } 397 return false; 398 } 399 400 /* MP_JOIN client subflow must wait for 4th ack before sending any data: 401 * TCP can't schedule delack timer before the subflow is fully established. 402 * MPTCP uses the delack timer to do 3rd ack retransmissions 403 */ 404 static void schedule_3rdack_retransmission(struct sock *sk) 405 { 406 struct inet_connection_sock *icsk = inet_csk(sk); 407 struct tcp_sock *tp = tcp_sk(sk); 408 unsigned long timeout; 409 410 /* reschedule with a timeout above RTT, as we must look only for drop */ 411 if (tp->srtt_us) 412 timeout = tp->srtt_us << 1; 413 else 414 timeout = TCP_TIMEOUT_INIT; 415 416 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); 417 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 418 icsk->icsk_ack.timeout = timeout; 419 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 420 } 421 422 static void clear_3rdack_retransmission(struct sock *sk) 423 { 424 struct inet_connection_sock *icsk = inet_csk(sk); 425 426 sk_stop_timer(sk, &icsk->icsk_delack_timer); 427 icsk->icsk_ack.timeout = 0; 428 icsk->icsk_ack.ato = 0; 429 icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER); 430 } 431 432 static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, 433 bool snd_data_fin_enable, 434 unsigned int *size, 435 unsigned int remaining, 436 struct mptcp_out_options *opts) 437 { 438 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 439 struct mptcp_ext *mpext; 440 unsigned int data_len; 441 442 /* When skb is not available, we better over-estimate the emitted 443 * options len. A full DSS option (28 bytes) is longer than 444 * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so 445 * tell the caller to defer the estimate to 446 * mptcp_established_options_dss(), which will reserve enough space. 447 */ 448 if (!skb) 449 return false; 450 451 /* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */ 452 if (subflow->fully_established || snd_data_fin_enable || 453 subflow->snd_isn != TCP_SKB_CB(skb)->seq || 454 sk->sk_state != TCP_ESTABLISHED) 455 return false; 456 457 if (subflow->mp_capable) { 458 mpext = mptcp_get_ext(skb); 459 data_len = mpext ? mpext->data_len : 0; 460 461 /* we will check ext_copy.data_len in mptcp_write_options() to 462 * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and 463 * TCPOLEN_MPTCP_MPC_ACK 464 */ 465 opts->ext_copy.data_len = data_len; 466 opts->suboptions = OPTION_MPTCP_MPC_ACK; 467 opts->sndr_key = subflow->local_key; 468 opts->rcvr_key = subflow->remote_key; 469 470 /* Section 3.1. 471 * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK 472 * packets that start the first subflow of an MPTCP connection, 473 * as well as the first packet that carries data 474 */ 475 if (data_len > 0) 476 *size = ALIGN(TCPOLEN_MPTCP_MPC_ACK_DATA, 4); 477 else 478 *size = TCPOLEN_MPTCP_MPC_ACK; 479 480 pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d", 481 subflow, subflow->local_key, subflow->remote_key, 482 data_len); 483 484 return true; 485 } else if (subflow->mp_join) { 486 opts->suboptions = OPTION_MPTCP_MPJ_ACK; 487 memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN); 488 *size = TCPOLEN_MPTCP_MPJ_ACK; 489 pr_debug("subflow=%p", subflow); 490 491 schedule_3rdack_retransmission(sk); 492 return true; 493 } 494 return false; 495 } 496 497 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, 498 struct sk_buff *skb, struct mptcp_ext *ext) 499 { 500 /* The write_seq value has already been incremented, so the actual 501 * sequence number for the DATA_FIN is one less. 502 */ 503 u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1; 504 505 if (!ext->use_map || !skb->len) { 506 /* RFC6824 requires a DSS mapping with specific values 507 * if DATA_FIN is set but no data payload is mapped 508 */ 509 ext->data_fin = 1; 510 ext->use_map = 1; 511 ext->dsn64 = 1; 512 ext->data_seq = data_fin_tx_seq; 513 ext->subflow_seq = 0; 514 ext->data_len = 1; 515 } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) { 516 /* If there's an existing DSS mapping and it is the 517 * final mapping, DATA_FIN consumes 1 additional byte of 518 * mapping space. 519 */ 520 ext->data_fin = 1; 521 ext->data_len++; 522 } 523 } 524 525 static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, 526 bool snd_data_fin_enable, 527 unsigned int *size, 528 unsigned int remaining, 529 struct mptcp_out_options *opts) 530 { 531 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 532 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 533 unsigned int dss_size = 0; 534 struct mptcp_ext *mpext; 535 unsigned int ack_size; 536 bool ret = false; 537 u64 ack_seq; 538 539 mpext = skb ? mptcp_get_ext(skb) : NULL; 540 541 if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) { 542 unsigned int map_size; 543 544 map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64; 545 546 remaining -= map_size; 547 dss_size = map_size; 548 if (mpext) 549 opts->ext_copy = *mpext; 550 551 if (skb && snd_data_fin_enable) 552 mptcp_write_data_fin(subflow, skb, &opts->ext_copy); 553 ret = true; 554 } 555 556 /* passive sockets msk will set the 'can_ack' after accept(), even 557 * if the first subflow may have the already the remote key handy 558 */ 559 opts->ext_copy.use_ack = 0; 560 if (!READ_ONCE(msk->can_ack)) { 561 *size = ALIGN(dss_size, 4); 562 return ret; 563 } 564 565 ack_seq = READ_ONCE(msk->ack_seq); 566 if (READ_ONCE(msk->use_64bit_ack)) { 567 ack_size = TCPOLEN_MPTCP_DSS_ACK64; 568 opts->ext_copy.data_ack = ack_seq; 569 opts->ext_copy.ack64 = 1; 570 } else { 571 ack_size = TCPOLEN_MPTCP_DSS_ACK32; 572 opts->ext_copy.data_ack32 = (uint32_t)ack_seq; 573 opts->ext_copy.ack64 = 0; 574 } 575 opts->ext_copy.use_ack = 1; 576 WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk)); 577 578 /* Add kind/length/subtype/flag overhead if mapping is not populated */ 579 if (dss_size == 0) 580 ack_size += TCPOLEN_MPTCP_DSS_BASE; 581 582 dss_size += ack_size; 583 584 *size = ALIGN(dss_size, 4); 585 return true; 586 } 587 588 static u64 add_addr_generate_hmac(u64 key1, u64 key2, 589 struct mptcp_addr_info *addr) 590 { 591 u16 port = ntohs(addr->port); 592 u8 hmac[SHA256_DIGEST_SIZE]; 593 u8 msg[19]; 594 int i = 0; 595 596 msg[i++] = addr->id; 597 if (addr->family == AF_INET) { 598 memcpy(&msg[i], &addr->addr.s_addr, 4); 599 i += 4; 600 } 601 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 602 else if (addr->family == AF_INET6) { 603 memcpy(&msg[i], &addr->addr6.s6_addr, 16); 604 i += 16; 605 } 606 #endif 607 msg[i++] = port >> 8; 608 msg[i++] = port & 0xFF; 609 610 mptcp_crypto_hmac_sha(key1, key2, msg, i, hmac); 611 612 return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); 613 } 614 615 static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb, 616 unsigned int *size, 617 unsigned int remaining, 618 struct mptcp_out_options *opts) 619 { 620 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 621 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 622 bool drop_other_suboptions = false; 623 unsigned int opt_size = *size; 624 bool echo; 625 bool port; 626 int len; 627 628 if ((mptcp_pm_should_add_signal_ipv6(msk) || 629 mptcp_pm_should_add_signal_port(msk) || 630 mptcp_pm_should_add_signal_echo(msk)) && 631 skb && skb_is_tcp_pure_ack(skb)) { 632 pr_debug("drop other suboptions"); 633 opts->suboptions = 0; 634 opts->ext_copy.use_ack = 0; 635 opts->ext_copy.use_map = 0; 636 remaining += opt_size; 637 drop_other_suboptions = true; 638 } 639 640 if (!mptcp_pm_should_add_signal(msk) || 641 !(mptcp_pm_add_addr_signal(msk, remaining, &opts->addr, &echo, &port))) 642 return false; 643 644 len = mptcp_add_addr_len(opts->addr.family, echo, port); 645 if (remaining < len) 646 return false; 647 648 *size = len; 649 if (drop_other_suboptions) 650 *size -= opt_size; 651 opts->suboptions |= OPTION_MPTCP_ADD_ADDR; 652 if (!echo) { 653 opts->ahmac = add_addr_generate_hmac(msk->local_key, 654 msk->remote_key, 655 &opts->addr); 656 } 657 pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d", 658 opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port)); 659 660 return true; 661 } 662 663 static bool mptcp_established_options_rm_addr(struct sock *sk, 664 unsigned int *size, 665 unsigned int remaining, 666 struct mptcp_out_options *opts) 667 { 668 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 669 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 670 struct mptcp_rm_list rm_list; 671 int i, len; 672 673 if (!mptcp_pm_should_rm_signal(msk) || 674 !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_list))) 675 return false; 676 677 len = mptcp_rm_addr_len(&rm_list); 678 if (len < 0) 679 return false; 680 if (remaining < len) 681 return false; 682 683 *size = len; 684 opts->suboptions |= OPTION_MPTCP_RM_ADDR; 685 opts->rm_list = rm_list; 686 687 for (i = 0; i < opts->rm_list.nr; i++) 688 pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]); 689 690 return true; 691 } 692 693 static bool mptcp_established_options_mp_prio(struct sock *sk, 694 unsigned int *size, 695 unsigned int remaining, 696 struct mptcp_out_options *opts) 697 { 698 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 699 700 if (!subflow->send_mp_prio) 701 return false; 702 703 /* account for the trailing 'nop' option */ 704 if (remaining < TCPOLEN_MPTCP_PRIO_ALIGN) 705 return false; 706 707 *size = TCPOLEN_MPTCP_PRIO_ALIGN; 708 opts->suboptions |= OPTION_MPTCP_PRIO; 709 opts->backup = subflow->request_bkup; 710 711 pr_debug("prio=%d", opts->backup); 712 713 return true; 714 } 715 716 static noinline void mptcp_established_options_rst(struct sock *sk, struct sk_buff *skb, 717 unsigned int *size, 718 unsigned int remaining, 719 struct mptcp_out_options *opts) 720 { 721 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 722 723 if (remaining < TCPOLEN_MPTCP_RST) 724 return; 725 726 *size = TCPOLEN_MPTCP_RST; 727 opts->suboptions |= OPTION_MPTCP_RST; 728 opts->reset_transient = subflow->reset_transient; 729 opts->reset_reason = subflow->reset_reason; 730 } 731 732 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, 733 unsigned int *size, unsigned int remaining, 734 struct mptcp_out_options *opts) 735 { 736 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 737 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 738 unsigned int opt_size = 0; 739 bool snd_data_fin; 740 bool ret = false; 741 742 opts->suboptions = 0; 743 744 if (unlikely(__mptcp_check_fallback(msk))) 745 return false; 746 747 if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) { 748 mptcp_established_options_rst(sk, skb, size, remaining, opts); 749 return true; 750 } 751 752 snd_data_fin = mptcp_data_fin_enabled(msk); 753 if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts)) 754 ret = true; 755 else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts)) 756 ret = true; 757 758 /* we reserved enough space for the above options, and exceeding the 759 * TCP option space would be fatal 760 */ 761 if (WARN_ON_ONCE(opt_size > remaining)) 762 return false; 763 764 *size += opt_size; 765 remaining -= opt_size; 766 if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) { 767 *size += opt_size; 768 remaining -= opt_size; 769 ret = true; 770 } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) { 771 *size += opt_size; 772 remaining -= opt_size; 773 ret = true; 774 } 775 776 if (mptcp_established_options_mp_prio(sk, &opt_size, remaining, opts)) { 777 *size += opt_size; 778 remaining -= opt_size; 779 ret = true; 780 } 781 782 return ret; 783 } 784 785 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, 786 struct mptcp_out_options *opts) 787 { 788 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 789 790 if (subflow_req->mp_capable) { 791 opts->suboptions = OPTION_MPTCP_MPC_SYNACK; 792 opts->sndr_key = subflow_req->local_key; 793 *size = TCPOLEN_MPTCP_MPC_SYNACK; 794 pr_debug("subflow_req=%p, local_key=%llu", 795 subflow_req, subflow_req->local_key); 796 return true; 797 } else if (subflow_req->mp_join) { 798 opts->suboptions = OPTION_MPTCP_MPJ_SYNACK; 799 opts->backup = subflow_req->backup; 800 opts->join_id = subflow_req->local_id; 801 opts->thmac = subflow_req->thmac; 802 opts->nonce = subflow_req->local_nonce; 803 pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u", 804 subflow_req, opts->backup, opts->join_id, 805 opts->thmac, opts->nonce); 806 *size = TCPOLEN_MPTCP_MPJ_SYNACK; 807 return true; 808 } 809 return false; 810 } 811 812 static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, 813 struct mptcp_subflow_context *subflow, 814 struct sk_buff *skb, 815 struct mptcp_options_received *mp_opt) 816 { 817 /* here we can process OoO, in-window pkts, only in-sequence 4th ack 818 * will make the subflow fully established 819 */ 820 if (likely(subflow->fully_established)) { 821 /* on passive sockets, check for 3rd ack retransmission 822 * note that msk is always set by subflow_syn_recv_sock() 823 * for mp_join subflows 824 */ 825 if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 && 826 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && 827 subflow->mp_join && mp_opt->mp_join && 828 READ_ONCE(msk->pm.server_side)) 829 tcp_send_ack(ssk); 830 goto fully_established; 831 } 832 833 /* we must process OoO packets before the first subflow is fully 834 * established. OoO packets are instead a protocol violation 835 * for MP_JOIN subflows as the peer must not send any data 836 * before receiving the forth ack - cfr. RFC 8684 section 3.2. 837 */ 838 if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) { 839 if (subflow->mp_join) 840 goto reset; 841 return subflow->mp_capable; 842 } 843 844 if (mp_opt->dss && mp_opt->use_ack) { 845 /* subflows are fully established as soon as we get any 846 * additional ack. 847 */ 848 subflow->fully_established = 1; 849 WRITE_ONCE(msk->fully_established, true); 850 goto fully_established; 851 } 852 853 if (mp_opt->add_addr) { 854 WRITE_ONCE(msk->fully_established, true); 855 return true; 856 } 857 858 /* If the first established packet does not contain MP_CAPABLE + data 859 * then fallback to TCP. Fallback scenarios requires a reset for 860 * MP_JOIN subflows. 861 */ 862 if (!mp_opt->mp_capable) { 863 if (subflow->mp_join) 864 goto reset; 865 subflow->mp_capable = 0; 866 pr_fallback(msk); 867 __mptcp_do_fallback(msk); 868 return false; 869 } 870 871 if (unlikely(!READ_ONCE(msk->pm.server_side))) 872 pr_warn_once("bogus mpc option on established client sk"); 873 mptcp_subflow_fully_established(subflow, mp_opt); 874 875 fully_established: 876 /* if the subflow is not already linked into the conn_list, we can't 877 * notify the PM: this subflow is still on the listener queue 878 * and the PM possibly acquiring the subflow lock could race with 879 * the listener close 880 */ 881 if (likely(subflow->pm_notified) || list_empty(&subflow->node)) 882 return true; 883 884 subflow->pm_notified = 1; 885 if (subflow->mp_join) { 886 clear_3rdack_retransmission(ssk); 887 mptcp_pm_subflow_established(msk); 888 } else { 889 mptcp_pm_fully_established(msk, ssk, GFP_ATOMIC); 890 } 891 return true; 892 893 reset: 894 mptcp_subflow_reset(ssk); 895 return false; 896 } 897 898 static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit) 899 { 900 u32 old_ack32, cur_ack32; 901 902 if (use_64bit) 903 return cur_ack; 904 905 old_ack32 = (u32)old_ack; 906 cur_ack32 = (u32)cur_ack; 907 cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32; 908 if (unlikely(before(cur_ack32, old_ack32))) 909 return cur_ack + (1LL << 32); 910 return cur_ack; 911 } 912 913 static void ack_update_msk(struct mptcp_sock *msk, 914 struct sock *ssk, 915 struct mptcp_options_received *mp_opt) 916 { 917 u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt); 918 struct sock *sk = (struct sock *)msk; 919 u64 old_snd_una; 920 921 mptcp_data_lock(sk); 922 923 /* avoid ack expansion on update conflict, to reduce the risk of 924 * wrongly expanding to a future ack sequence number, which is way 925 * more dangerous than missing an ack 926 */ 927 old_snd_una = msk->snd_una; 928 new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64); 929 930 /* ACK for data not even sent yet? Ignore. */ 931 if (after64(new_snd_una, snd_nxt)) 932 new_snd_una = old_snd_una; 933 934 new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd; 935 936 if (after64(new_wnd_end, msk->wnd_end)) 937 msk->wnd_end = new_wnd_end; 938 939 /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */ 940 if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt))) 941 __mptcp_check_push(sk, ssk); 942 943 if (after64(new_snd_una, old_snd_una)) { 944 msk->snd_una = new_snd_una; 945 __mptcp_data_acked(sk); 946 } 947 mptcp_data_unlock(sk); 948 949 trace_ack_update_msk(mp_opt->data_ack, 950 old_snd_una, new_snd_una, 951 new_wnd_end, msk->wnd_end); 952 } 953 954 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit) 955 { 956 /* Skip if DATA_FIN was already received. 957 * If updating simultaneously with the recvmsg loop, values 958 * should match. If they mismatch, the peer is misbehaving and 959 * we will prefer the most recent information. 960 */ 961 if (READ_ONCE(msk->rcv_data_fin)) 962 return false; 963 964 WRITE_ONCE(msk->rcv_data_fin_seq, 965 expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit)); 966 WRITE_ONCE(msk->rcv_data_fin, 1); 967 968 return true; 969 } 970 971 static bool add_addr_hmac_valid(struct mptcp_sock *msk, 972 struct mptcp_options_received *mp_opt) 973 { 974 u64 hmac = 0; 975 976 if (mp_opt->echo) 977 return true; 978 979 hmac = add_addr_generate_hmac(msk->remote_key, 980 msk->local_key, 981 &mp_opt->addr); 982 983 pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n", 984 msk, (unsigned long long)hmac, 985 (unsigned long long)mp_opt->ahmac); 986 987 return hmac == mp_opt->ahmac; 988 } 989 990 void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) 991 { 992 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 993 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 994 struct mptcp_options_received mp_opt; 995 struct mptcp_ext *mpext; 996 997 if (__mptcp_check_fallback(msk)) { 998 /* Keep it simple and unconditionally trigger send data cleanup and 999 * pending queue spooling. We will need to acquire the data lock 1000 * for more accurate checks, and once the lock is acquired, such 1001 * helpers are cheap. 1002 */ 1003 mptcp_data_lock(subflow->conn); 1004 if (sk_stream_memory_free(sk)) 1005 __mptcp_check_push(subflow->conn, sk); 1006 __mptcp_data_acked(subflow->conn); 1007 mptcp_data_unlock(subflow->conn); 1008 return; 1009 } 1010 1011 mptcp_get_options(skb, &mp_opt); 1012 if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) 1013 return; 1014 1015 if (mp_opt.fastclose && 1016 msk->local_key == mp_opt.rcvr_key) { 1017 WRITE_ONCE(msk->rcv_fastclose, true); 1018 mptcp_schedule_work((struct sock *)msk); 1019 } 1020 1021 if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) { 1022 if (!mp_opt.echo) { 1023 mptcp_pm_add_addr_received(msk, &mp_opt.addr); 1024 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR); 1025 } else { 1026 mptcp_pm_add_addr_echoed(msk, &mp_opt.addr); 1027 mptcp_pm_del_add_timer(msk, &mp_opt.addr); 1028 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD); 1029 } 1030 1031 if (mp_opt.addr.port) 1032 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD); 1033 1034 mp_opt.add_addr = 0; 1035 } 1036 1037 if (mp_opt.rm_addr) { 1038 mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list); 1039 mp_opt.rm_addr = 0; 1040 } 1041 1042 if (mp_opt.mp_prio) { 1043 mptcp_pm_mp_prio_received(sk, mp_opt.backup); 1044 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX); 1045 mp_opt.mp_prio = 0; 1046 } 1047 1048 if (mp_opt.reset) { 1049 subflow->reset_seen = 1; 1050 subflow->reset_reason = mp_opt.reset_reason; 1051 subflow->reset_transient = mp_opt.reset_transient; 1052 } 1053 1054 if (!mp_opt.dss) 1055 return; 1056 1057 /* we can't wait for recvmsg() to update the ack_seq, otherwise 1058 * monodirectional flows will stuck 1059 */ 1060 if (mp_opt.use_ack) 1061 ack_update_msk(msk, sk, &mp_opt); 1062 1063 /* Zero-data-length packets are dropped by the caller and not 1064 * propagated to the MPTCP layer, so the skb extension does not 1065 * need to be allocated or populated. DATA_FIN information, if 1066 * present, needs to be updated here before the skb is freed. 1067 */ 1068 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 1069 if (mp_opt.data_fin && mp_opt.data_len == 1 && 1070 mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) && 1071 schedule_work(&msk->work)) 1072 sock_hold(subflow->conn); 1073 1074 return; 1075 } 1076 1077 mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 1078 if (!mpext) 1079 return; 1080 1081 memset(mpext, 0, sizeof(*mpext)); 1082 1083 if (mp_opt.use_map) { 1084 if (mp_opt.mpc_map) { 1085 /* this is an MP_CAPABLE carrying MPTCP data 1086 * we know this map the first chunk of data 1087 */ 1088 mptcp_crypto_key_sha(subflow->remote_key, NULL, 1089 &mpext->data_seq); 1090 mpext->data_seq++; 1091 mpext->subflow_seq = 1; 1092 mpext->dsn64 = 1; 1093 mpext->mpc_map = 1; 1094 mpext->data_fin = 0; 1095 } else { 1096 mpext->data_seq = mp_opt.data_seq; 1097 mpext->subflow_seq = mp_opt.subflow_seq; 1098 mpext->dsn64 = mp_opt.dsn64; 1099 mpext->data_fin = mp_opt.data_fin; 1100 } 1101 mpext->data_len = mp_opt.data_len; 1102 mpext->use_map = 1; 1103 } 1104 } 1105 1106 static void mptcp_set_rwin(const struct tcp_sock *tp) 1107 { 1108 const struct sock *ssk = (const struct sock *)tp; 1109 const struct mptcp_subflow_context *subflow; 1110 struct mptcp_sock *msk; 1111 u64 ack_seq; 1112 1113 subflow = mptcp_subflow_ctx(ssk); 1114 msk = mptcp_sk(subflow->conn); 1115 1116 ack_seq = READ_ONCE(msk->ack_seq) + tp->rcv_wnd; 1117 1118 if (after64(ack_seq, READ_ONCE(msk->rcv_wnd_sent))) 1119 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); 1120 } 1121 1122 void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, 1123 struct mptcp_out_options *opts) 1124 { 1125 if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | 1126 OPTION_MPTCP_MPC_ACK) & opts->suboptions) { 1127 u8 len; 1128 1129 if (OPTION_MPTCP_MPC_SYN & opts->suboptions) 1130 len = TCPOLEN_MPTCP_MPC_SYN; 1131 else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) 1132 len = TCPOLEN_MPTCP_MPC_SYNACK; 1133 else if (opts->ext_copy.data_len) 1134 len = TCPOLEN_MPTCP_MPC_ACK_DATA; 1135 else 1136 len = TCPOLEN_MPTCP_MPC_ACK; 1137 1138 *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len, 1139 MPTCP_SUPPORTED_VERSION, 1140 MPTCP_CAP_HMAC_SHA256); 1141 1142 if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) & 1143 opts->suboptions)) 1144 goto mp_capable_done; 1145 1146 put_unaligned_be64(opts->sndr_key, ptr); 1147 ptr += 2; 1148 if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions)) 1149 goto mp_capable_done; 1150 1151 put_unaligned_be64(opts->rcvr_key, ptr); 1152 ptr += 2; 1153 if (!opts->ext_copy.data_len) 1154 goto mp_capable_done; 1155 1156 put_unaligned_be32(opts->ext_copy.data_len << 16 | 1157 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); 1158 ptr += 1; 1159 } 1160 1161 mp_capable_done: 1162 if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) { 1163 u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE; 1164 u8 echo = MPTCP_ADDR_ECHO; 1165 1166 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1167 if (opts->addr.family == AF_INET6) 1168 len = TCPOLEN_MPTCP_ADD_ADDR6_BASE; 1169 #endif 1170 1171 if (opts->addr.port) 1172 len += TCPOLEN_MPTCP_PORT_LEN; 1173 1174 if (opts->ahmac) { 1175 len += sizeof(opts->ahmac); 1176 echo = 0; 1177 } 1178 1179 *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, 1180 len, echo, opts->addr.id); 1181 if (opts->addr.family == AF_INET) { 1182 memcpy((u8 *)ptr, (u8 *)&opts->addr.addr.s_addr, 4); 1183 ptr += 1; 1184 } 1185 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1186 else if (opts->addr.family == AF_INET6) { 1187 memcpy((u8 *)ptr, opts->addr.addr6.s6_addr, 16); 1188 ptr += 4; 1189 } 1190 #endif 1191 1192 if (!opts->addr.port) { 1193 if (opts->ahmac) { 1194 put_unaligned_be64(opts->ahmac, ptr); 1195 ptr += 2; 1196 } 1197 } else { 1198 u16 port = ntohs(opts->addr.port); 1199 1200 if (opts->ahmac) { 1201 u8 *bptr = (u8 *)ptr; 1202 1203 put_unaligned_be16(port, bptr); 1204 bptr += 2; 1205 put_unaligned_be64(opts->ahmac, bptr); 1206 bptr += 8; 1207 put_unaligned_be16(TCPOPT_NOP << 8 | 1208 TCPOPT_NOP, bptr); 1209 1210 ptr += 3; 1211 } else { 1212 put_unaligned_be32(port << 16 | 1213 TCPOPT_NOP << 8 | 1214 TCPOPT_NOP, ptr); 1215 ptr += 1; 1216 } 1217 } 1218 } 1219 1220 if (OPTION_MPTCP_RM_ADDR & opts->suboptions) { 1221 u8 i = 1; 1222 1223 *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR, 1224 TCPOLEN_MPTCP_RM_ADDR_BASE + opts->rm_list.nr, 1225 0, opts->rm_list.ids[0]); 1226 1227 while (i < opts->rm_list.nr) { 1228 u8 id1, id2, id3, id4; 1229 1230 id1 = opts->rm_list.ids[i]; 1231 id2 = i + 1 < opts->rm_list.nr ? opts->rm_list.ids[i + 1] : TCPOPT_NOP; 1232 id3 = i + 2 < opts->rm_list.nr ? opts->rm_list.ids[i + 2] : TCPOPT_NOP; 1233 id4 = i + 3 < opts->rm_list.nr ? opts->rm_list.ids[i + 3] : TCPOPT_NOP; 1234 put_unaligned_be32(id1 << 24 | id2 << 16 | id3 << 8 | id4, ptr); 1235 ptr += 1; 1236 i += 4; 1237 } 1238 } 1239 1240 if (OPTION_MPTCP_PRIO & opts->suboptions) { 1241 const struct sock *ssk = (const struct sock *)tp; 1242 struct mptcp_subflow_context *subflow; 1243 1244 subflow = mptcp_subflow_ctx(ssk); 1245 subflow->send_mp_prio = 0; 1246 1247 *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO, 1248 TCPOLEN_MPTCP_PRIO, 1249 opts->backup, TCPOPT_NOP); 1250 } 1251 1252 if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) { 1253 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1254 TCPOLEN_MPTCP_MPJ_SYN, 1255 opts->backup, opts->join_id); 1256 put_unaligned_be32(opts->token, ptr); 1257 ptr += 1; 1258 put_unaligned_be32(opts->nonce, ptr); 1259 ptr += 1; 1260 } 1261 1262 if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) { 1263 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1264 TCPOLEN_MPTCP_MPJ_SYNACK, 1265 opts->backup, opts->join_id); 1266 put_unaligned_be64(opts->thmac, ptr); 1267 ptr += 2; 1268 put_unaligned_be32(opts->nonce, ptr); 1269 ptr += 1; 1270 } 1271 1272 if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) { 1273 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1274 TCPOLEN_MPTCP_MPJ_ACK, 0, 0); 1275 memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN); 1276 ptr += 5; 1277 } 1278 1279 if (OPTION_MPTCP_RST & opts->suboptions) 1280 *ptr++ = mptcp_option(MPTCPOPT_RST, 1281 TCPOLEN_MPTCP_RST, 1282 opts->reset_transient, 1283 opts->reset_reason); 1284 1285 if (opts->ext_copy.use_ack || opts->ext_copy.use_map) { 1286 struct mptcp_ext *mpext = &opts->ext_copy; 1287 u8 len = TCPOLEN_MPTCP_DSS_BASE; 1288 u8 flags = 0; 1289 1290 if (mpext->use_ack) { 1291 flags = MPTCP_DSS_HAS_ACK; 1292 if (mpext->ack64) { 1293 len += TCPOLEN_MPTCP_DSS_ACK64; 1294 flags |= MPTCP_DSS_ACK64; 1295 } else { 1296 len += TCPOLEN_MPTCP_DSS_ACK32; 1297 } 1298 } 1299 1300 if (mpext->use_map) { 1301 len += TCPOLEN_MPTCP_DSS_MAP64; 1302 1303 /* Use only 64-bit mapping flags for now, add 1304 * support for optional 32-bit mappings later. 1305 */ 1306 flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64; 1307 if (mpext->data_fin) 1308 flags |= MPTCP_DSS_DATA_FIN; 1309 } 1310 1311 *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags); 1312 1313 if (mpext->use_ack) { 1314 if (mpext->ack64) { 1315 put_unaligned_be64(mpext->data_ack, ptr); 1316 ptr += 2; 1317 } else { 1318 put_unaligned_be32(mpext->data_ack32, ptr); 1319 ptr += 1; 1320 } 1321 } 1322 1323 if (mpext->use_map) { 1324 put_unaligned_be64(mpext->data_seq, ptr); 1325 ptr += 2; 1326 put_unaligned_be32(mpext->subflow_seq, ptr); 1327 ptr += 1; 1328 put_unaligned_be32(mpext->data_len << 16 | 1329 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); 1330 } 1331 } 1332 1333 if (tp) 1334 mptcp_set_rwin(tp); 1335 } 1336 1337 __be32 mptcp_get_reset_option(const struct sk_buff *skb) 1338 { 1339 const struct mptcp_ext *ext = mptcp_get_ext(skb); 1340 u8 flags, reason; 1341 1342 if (ext) { 1343 flags = ext->reset_transient; 1344 reason = ext->reset_reason; 1345 1346 return mptcp_option(MPTCPOPT_RST, TCPOLEN_MPTCP_RST, 1347 flags, reason); 1348 } 1349 1350 return htonl(0u); 1351 } 1352 EXPORT_SYMBOL_GPL(mptcp_get_reset_option); 1353