1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2019, Intel Corporation. 5 */ 6 #define pr_fmt(fmt) "MPTCP: " fmt 7 8 #include <linux/kernel.h> 9 #include <net/tcp.h> 10 #include <net/mptcp.h> 11 #include "protocol.h" 12 13 #include "mib.h" 14 15 /* path manager command handlers */ 16 17 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 18 const struct mptcp_addr_info *addr, 19 bool echo) 20 { 21 u8 add_addr = READ_ONCE(msk->pm.addr_signal); 22 23 pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); 24 25 lockdep_assert_held(&msk->pm.lock); 26 27 if (add_addr & 28 (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { 29 MPTCP_INC_STATS(sock_net((struct sock *)msk), 30 echo ? MPTCP_MIB_ECHOADDTXDROP : MPTCP_MIB_ADDADDRTXDROP); 31 return -EINVAL; 32 } 33 34 if (echo) { 35 msk->pm.remote = *addr; 36 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); 37 } else { 38 msk->pm.local = *addr; 39 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); 40 } 41 WRITE_ONCE(msk->pm.addr_signal, add_addr); 42 return 0; 43 } 44 45 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 46 { 47 u8 rm_addr = READ_ONCE(msk->pm.addr_signal); 48 49 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 50 51 if (rm_addr) { 52 MPTCP_ADD_STATS(sock_net((struct sock *)msk), 53 MPTCP_MIB_RMADDRTXDROP, rm_list->nr); 54 return -EINVAL; 55 } 56 57 msk->pm.rm_list_tx = *rm_list; 58 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); 59 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 60 mptcp_pm_nl_addr_send_ack(msk); 61 return 0; 62 } 63 64 /* path manager event handlers */ 65 66 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) 67 { 68 struct mptcp_pm_data *pm = &msk->pm; 69 70 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); 71 72 WRITE_ONCE(pm->server_side, server_side); 73 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); 74 } 75 76 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) 77 { 78 struct mptcp_pm_data *pm = &msk->pm; 79 unsigned int subflows_max; 80 int ret = 0; 81 82 if (mptcp_pm_is_userspace(msk)) { 83 if (mptcp_userspace_pm_active(msk)) { 84 spin_lock_bh(&pm->lock); 85 pm->subflows++; 86 spin_unlock_bh(&pm->lock); 87 return true; 88 } 89 return false; 90 } 91 92 subflows_max = mptcp_pm_get_subflows_max(msk); 93 94 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, 95 subflows_max, READ_ONCE(pm->accept_subflow)); 96 97 /* try to avoid acquiring the lock below */ 98 if (!READ_ONCE(pm->accept_subflow)) 99 return false; 100 101 spin_lock_bh(&pm->lock); 102 if (READ_ONCE(pm->accept_subflow)) { 103 ret = pm->subflows < subflows_max; 104 if (ret && ++pm->subflows == subflows_max) 105 WRITE_ONCE(pm->accept_subflow, false); 106 } 107 spin_unlock_bh(&pm->lock); 108 109 return ret; 110 } 111 112 /* return true if the new status bit is currently cleared, that is, this event 113 * can be server, eventually by an already scheduled work 114 */ 115 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 116 enum mptcp_pm_status new_status) 117 { 118 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, 119 BIT(new_status)); 120 if (msk->pm.status & BIT(new_status)) 121 return false; 122 123 msk->pm.status |= BIT(new_status); 124 mptcp_schedule_work((struct sock *)msk); 125 return true; 126 } 127 128 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) 129 { 130 struct mptcp_pm_data *pm = &msk->pm; 131 bool announce = false; 132 133 pr_debug("msk=%p", msk); 134 135 spin_lock_bh(&pm->lock); 136 137 /* mptcp_pm_fully_established() can be invoked by multiple 138 * racing paths - accept() and check_fully_established() 139 * be sure to serve this event only once. 140 */ 141 if (READ_ONCE(pm->work_pending) && 142 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) 143 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); 144 145 if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) 146 announce = true; 147 148 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); 149 spin_unlock_bh(&pm->lock); 150 151 if (announce) 152 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC); 153 } 154 155 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 156 { 157 pr_debug("msk=%p", msk); 158 } 159 160 void mptcp_pm_subflow_established(struct mptcp_sock *msk) 161 { 162 struct mptcp_pm_data *pm = &msk->pm; 163 164 pr_debug("msk=%p", msk); 165 166 if (!READ_ONCE(pm->work_pending)) 167 return; 168 169 spin_lock_bh(&pm->lock); 170 171 if (READ_ONCE(pm->work_pending)) 172 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 173 174 spin_unlock_bh(&pm->lock); 175 } 176 177 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk, 178 const struct mptcp_subflow_context *subflow) 179 { 180 struct mptcp_pm_data *pm = &msk->pm; 181 bool update_subflows; 182 183 update_subflows = subflow->request_join || subflow->mp_join; 184 if (mptcp_pm_is_userspace(msk)) { 185 if (update_subflows) { 186 spin_lock_bh(&pm->lock); 187 pm->subflows--; 188 spin_unlock_bh(&pm->lock); 189 } 190 return; 191 } 192 193 if (!READ_ONCE(pm->work_pending) && !update_subflows) 194 return; 195 196 spin_lock_bh(&pm->lock); 197 if (update_subflows) 198 __mptcp_pm_close_subflow(msk); 199 200 /* Even if this subflow is not really established, tell the PM to try 201 * to pick the next ones, if possible. 202 */ 203 if (mptcp_pm_nl_check_work_pending(msk)) 204 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 205 206 spin_unlock_bh(&pm->lock); 207 } 208 209 void mptcp_pm_add_addr_received(const struct sock *ssk, 210 const struct mptcp_addr_info *addr) 211 { 212 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 213 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 214 struct mptcp_pm_data *pm = &msk->pm; 215 216 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, 217 READ_ONCE(pm->accept_addr)); 218 219 mptcp_event_addr_announced(ssk, addr); 220 221 spin_lock_bh(&pm->lock); 222 223 if (mptcp_pm_is_userspace(msk)) { 224 if (mptcp_userspace_pm_active(msk)) { 225 mptcp_pm_announce_addr(msk, addr, true); 226 mptcp_pm_add_addr_send_ack(msk); 227 } else { 228 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 229 } 230 } else if (!READ_ONCE(pm->accept_addr)) { 231 mptcp_pm_announce_addr(msk, addr, true); 232 mptcp_pm_add_addr_send_ack(msk); 233 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 234 pm->remote = *addr; 235 } else { 236 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 237 } 238 239 spin_unlock_bh(&pm->lock); 240 } 241 242 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, 243 const struct mptcp_addr_info *addr) 244 { 245 struct mptcp_pm_data *pm = &msk->pm; 246 247 pr_debug("msk=%p", msk); 248 249 spin_lock_bh(&pm->lock); 250 251 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) 252 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 253 254 spin_unlock_bh(&pm->lock); 255 } 256 257 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) 258 { 259 if (!mptcp_pm_should_add_signal(msk)) 260 return; 261 262 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); 263 } 264 265 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, 266 const struct mptcp_rm_list *rm_list) 267 { 268 struct mptcp_pm_data *pm = &msk->pm; 269 u8 i; 270 271 pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); 272 273 for (i = 0; i < rm_list->nr; i++) 274 mptcp_event_addr_removed(msk, rm_list->ids[i]); 275 276 spin_lock_bh(&pm->lock); 277 if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) 278 pm->rm_list_rx = *rm_list; 279 else 280 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); 281 spin_unlock_bh(&pm->lock); 282 } 283 284 void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) 285 { 286 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 287 struct sock *sk = subflow->conn; 288 struct mptcp_sock *msk; 289 290 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); 291 msk = mptcp_sk(sk); 292 if (subflow->backup != bkup) 293 subflow->backup = bkup; 294 295 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); 296 } 297 298 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) 299 { 300 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 301 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 302 303 pr_debug("fail_seq=%llu", fail_seq); 304 305 if (!READ_ONCE(msk->allow_infinite_fallback)) 306 return; 307 308 if (!subflow->fail_tout) { 309 pr_debug("send MP_FAIL response and infinite map"); 310 311 subflow->send_mp_fail = 1; 312 subflow->send_infinite_map = 1; 313 tcp_send_ack(sk); 314 } else { 315 pr_debug("MP_FAIL response received"); 316 WRITE_ONCE(subflow->fail_tout, 0); 317 } 318 } 319 320 /* path manager helpers */ 321 322 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, 323 unsigned int opt_size, unsigned int remaining, 324 struct mptcp_addr_info *addr, bool *echo, 325 bool *drop_other_suboptions) 326 { 327 int ret = false; 328 u8 add_addr; 329 u8 family; 330 bool port; 331 332 spin_lock_bh(&msk->pm.lock); 333 334 /* double check after the lock is acquired */ 335 if (!mptcp_pm_should_add_signal(msk)) 336 goto out_unlock; 337 338 /* always drop every other options for pure ack ADD_ADDR; this is a 339 * plain dup-ack from TCP perspective. The other MPTCP-relevant info, 340 * if any, will be carried by the 'original' TCP ack 341 */ 342 if (skb && skb_is_tcp_pure_ack(skb)) { 343 remaining += opt_size; 344 *drop_other_suboptions = true; 345 } 346 347 *echo = mptcp_pm_should_add_signal_echo(msk); 348 port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); 349 350 family = *echo ? msk->pm.remote.family : msk->pm.local.family; 351 if (remaining < mptcp_add_addr_len(family, *echo, port)) 352 goto out_unlock; 353 354 if (*echo) { 355 *addr = msk->pm.remote; 356 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); 357 } else { 358 *addr = msk->pm.local; 359 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); 360 } 361 WRITE_ONCE(msk->pm.addr_signal, add_addr); 362 ret = true; 363 364 out_unlock: 365 spin_unlock_bh(&msk->pm.lock); 366 return ret; 367 } 368 369 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 370 struct mptcp_rm_list *rm_list) 371 { 372 int ret = false, len; 373 u8 rm_addr; 374 375 spin_lock_bh(&msk->pm.lock); 376 377 /* double check after the lock is acquired */ 378 if (!mptcp_pm_should_rm_signal(msk)) 379 goto out_unlock; 380 381 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); 382 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); 383 if (len < 0) { 384 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 385 goto out_unlock; 386 } 387 if (remaining < len) 388 goto out_unlock; 389 390 *rm_list = msk->pm.rm_list_tx; 391 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 392 ret = true; 393 394 out_unlock: 395 spin_unlock_bh(&msk->pm.lock); 396 return ret; 397 } 398 399 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) 400 { 401 struct mptcp_addr_info skc_local; 402 struct mptcp_addr_info msk_local; 403 404 if (WARN_ON_ONCE(!msk)) 405 return -1; 406 407 /* The 0 ID mapping is defined by the first subflow, copied into the msk 408 * addr 409 */ 410 mptcp_local_address((struct sock_common *)msk, &msk_local); 411 mptcp_local_address((struct sock_common *)skc, &skc_local); 412 if (mptcp_addresses_equal(&msk_local, &skc_local, false)) 413 return 0; 414 415 if (mptcp_pm_is_userspace(msk)) 416 return mptcp_userspace_pm_get_local_id(msk, &skc_local); 417 return mptcp_pm_nl_get_local_id(msk, &skc_local); 418 } 419 420 bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc) 421 { 422 struct mptcp_addr_info skc_local; 423 424 mptcp_local_address((struct sock_common *)skc, &skc_local); 425 426 if (mptcp_pm_is_userspace(msk)) 427 return mptcp_userspace_pm_is_backup(msk, &skc_local); 428 429 return mptcp_pm_nl_is_backup(msk, &skc_local); 430 } 431 432 int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, 433 u8 *flags, int *ifindex) 434 { 435 *flags = 0; 436 *ifindex = 0; 437 438 if (mptcp_pm_is_userspace(msk)) 439 return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); 440 return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); 441 } 442 443 int mptcp_pm_set_flags(struct net *net, struct nlattr *token, 444 struct mptcp_pm_addr_entry *loc, 445 struct mptcp_pm_addr_entry *rem, u8 bkup) 446 { 447 if (token) 448 return mptcp_userspace_pm_set_flags(net, token, loc, rem, bkup); 449 return mptcp_pm_nl_set_flags(net, loc, bkup); 450 } 451 452 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 453 { 454 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 455 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); 456 457 /* keep track of rtx periods with no progress */ 458 if (!subflow->stale_count) { 459 subflow->stale_rcv_tstamp = rcv_tstamp; 460 subflow->stale_count++; 461 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { 462 if (subflow->stale_count < U8_MAX) 463 subflow->stale_count++; 464 mptcp_pm_nl_subflow_chk_stale(msk, ssk); 465 } else { 466 subflow->stale_count = 0; 467 mptcp_subflow_set_active(subflow); 468 } 469 } 470 471 /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, 472 * otherwise allow any matching local/remote pair 473 */ 474 bool mptcp_pm_addr_families_match(const struct sock *sk, 475 const struct mptcp_addr_info *loc, 476 const struct mptcp_addr_info *rem) 477 { 478 bool mptcp_is_v4 = sk->sk_family == AF_INET; 479 480 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 481 bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); 482 bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); 483 484 if (mptcp_is_v4) 485 return loc_is_v4 && rem_is_v4; 486 487 if (ipv6_only_sock(sk)) 488 return !loc_is_v4 && !rem_is_v4; 489 490 return loc_is_v4 == rem_is_v4; 491 #else 492 return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; 493 #endif 494 } 495 496 void mptcp_pm_data_reset(struct mptcp_sock *msk) 497 { 498 u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk)); 499 struct mptcp_pm_data *pm = &msk->pm; 500 501 pm->add_addr_signaled = 0; 502 pm->add_addr_accepted = 0; 503 pm->local_addr_used = 0; 504 pm->subflows = 0; 505 pm->rm_list_tx.nr = 0; 506 pm->rm_list_rx.nr = 0; 507 WRITE_ONCE(pm->pm_type, pm_type); 508 509 if (pm_type == MPTCP_PM_TYPE_KERNEL) { 510 bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk); 511 512 /* pm->work_pending must be only be set to 'true' when 513 * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL 514 */ 515 WRITE_ONCE(pm->work_pending, 516 (!!mptcp_pm_get_local_addr_max(msk) && 517 subflows_allowed) || 518 !!mptcp_pm_get_add_addr_signal_max(msk)); 519 WRITE_ONCE(pm->accept_addr, 520 !!mptcp_pm_get_add_addr_accept_max(msk) && 521 subflows_allowed); 522 WRITE_ONCE(pm->accept_subflow, subflows_allowed); 523 } else { 524 WRITE_ONCE(pm->work_pending, 0); 525 WRITE_ONCE(pm->accept_addr, 0); 526 WRITE_ONCE(pm->accept_subflow, 0); 527 } 528 529 WRITE_ONCE(pm->addr_signal, 0); 530 WRITE_ONCE(pm->remote_deny_join_id0, false); 531 pm->status = 0; 532 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 533 } 534 535 void mptcp_pm_data_init(struct mptcp_sock *msk) 536 { 537 spin_lock_init(&msk->pm.lock); 538 INIT_LIST_HEAD(&msk->pm.anno_list); 539 INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list); 540 mptcp_pm_data_reset(msk); 541 } 542 543 void __init mptcp_pm_init(void) 544 { 545 mptcp_pm_nl_init(); 546 } 547