1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2019, Intel Corporation. 5 */ 6 #define pr_fmt(fmt) "MPTCP: " fmt 7 8 #include <linux/kernel.h> 9 #include <net/tcp.h> 10 #include <net/mptcp.h> 11 #include "protocol.h" 12 13 #include "mib.h" 14 15 /* path manager command handlers */ 16 17 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 18 const struct mptcp_addr_info *addr, 19 bool echo) 20 { 21 u8 add_addr = READ_ONCE(msk->pm.addr_signal); 22 23 pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); 24 25 lockdep_assert_held(&msk->pm.lock); 26 27 if (add_addr & 28 (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { 29 pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo); 30 return -EINVAL; 31 } 32 33 if (echo) { 34 msk->pm.remote = *addr; 35 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); 36 } else { 37 msk->pm.local = *addr; 38 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); 39 } 40 WRITE_ONCE(msk->pm.addr_signal, add_addr); 41 return 0; 42 } 43 44 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 45 { 46 u8 rm_addr = READ_ONCE(msk->pm.addr_signal); 47 48 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 49 50 if (rm_addr) { 51 pr_warn("addr_signal error, rm_addr=%d", rm_addr); 52 return -EINVAL; 53 } 54 55 msk->pm.rm_list_tx = *rm_list; 56 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); 57 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 58 mptcp_pm_nl_addr_send_ack(msk); 59 return 0; 60 } 61 62 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 63 { 64 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 65 66 spin_lock_bh(&msk->pm.lock); 67 mptcp_pm_nl_rm_subflow_received(msk, rm_list); 68 spin_unlock_bh(&msk->pm.lock); 69 return 0; 70 } 71 72 /* path manager event handlers */ 73 74 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) 75 { 76 struct mptcp_pm_data *pm = &msk->pm; 77 78 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); 79 80 WRITE_ONCE(pm->server_side, server_side); 81 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); 82 } 83 84 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) 85 { 86 struct mptcp_pm_data *pm = &msk->pm; 87 unsigned int subflows_max; 88 int ret = 0; 89 90 subflows_max = mptcp_pm_get_subflows_max(msk); 91 92 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, 93 subflows_max, READ_ONCE(pm->accept_subflow)); 94 95 /* try to avoid acquiring the lock below */ 96 if (!READ_ONCE(pm->accept_subflow)) 97 return false; 98 99 spin_lock_bh(&pm->lock); 100 if (READ_ONCE(pm->accept_subflow)) { 101 ret = pm->subflows < subflows_max; 102 if (ret && ++pm->subflows == subflows_max) 103 WRITE_ONCE(pm->accept_subflow, false); 104 } 105 spin_unlock_bh(&pm->lock); 106 107 return ret; 108 } 109 110 /* return true if the new status bit is currently cleared, that is, this event 111 * can be server, eventually by an already scheduled work 112 */ 113 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 114 enum mptcp_pm_status new_status) 115 { 116 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, 117 BIT(new_status)); 118 if (msk->pm.status & BIT(new_status)) 119 return false; 120 121 msk->pm.status |= BIT(new_status); 122 mptcp_schedule_work((struct sock *)msk); 123 return true; 124 } 125 126 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp) 127 { 128 struct mptcp_pm_data *pm = &msk->pm; 129 bool announce = false; 130 131 pr_debug("msk=%p", msk); 132 133 spin_lock_bh(&pm->lock); 134 135 /* mptcp_pm_fully_established() can be invoked by multiple 136 * racing paths - accept() and check_fully_established() 137 * be sure to serve this event only once. 138 */ 139 if (READ_ONCE(pm->work_pending) && 140 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) 141 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); 142 143 if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) 144 announce = true; 145 146 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); 147 spin_unlock_bh(&pm->lock); 148 149 if (announce) 150 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp); 151 } 152 153 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 154 { 155 pr_debug("msk=%p", msk); 156 } 157 158 void mptcp_pm_subflow_established(struct mptcp_sock *msk) 159 { 160 struct mptcp_pm_data *pm = &msk->pm; 161 162 pr_debug("msk=%p", msk); 163 164 if (!READ_ONCE(pm->work_pending)) 165 return; 166 167 spin_lock_bh(&pm->lock); 168 169 if (READ_ONCE(pm->work_pending)) 170 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 171 172 spin_unlock_bh(&pm->lock); 173 } 174 175 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk, 176 const struct mptcp_subflow_context *subflow) 177 { 178 struct mptcp_pm_data *pm = &msk->pm; 179 bool update_subflows; 180 181 update_subflows = (ssk->sk_state == TCP_CLOSE) && 182 (subflow->request_join || subflow->mp_join); 183 if (!READ_ONCE(pm->work_pending) && !update_subflows) 184 return; 185 186 spin_lock_bh(&pm->lock); 187 if (update_subflows) 188 pm->subflows--; 189 190 /* Even if this subflow is not really established, tell the PM to try 191 * to pick the next ones, if possible. 192 */ 193 if (mptcp_pm_nl_check_work_pending(msk)) 194 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 195 196 spin_unlock_bh(&pm->lock); 197 } 198 199 void mptcp_pm_add_addr_received(struct mptcp_sock *msk, 200 const struct mptcp_addr_info *addr) 201 { 202 struct mptcp_pm_data *pm = &msk->pm; 203 204 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, 205 READ_ONCE(pm->accept_addr)); 206 207 mptcp_event_addr_announced(msk, addr); 208 209 spin_lock_bh(&pm->lock); 210 211 if (!READ_ONCE(pm->accept_addr)) { 212 mptcp_pm_announce_addr(msk, addr, true); 213 mptcp_pm_add_addr_send_ack(msk); 214 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 215 pm->remote = *addr; 216 } else { 217 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 218 } 219 220 spin_unlock_bh(&pm->lock); 221 } 222 223 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, 224 struct mptcp_addr_info *addr) 225 { 226 struct mptcp_pm_data *pm = &msk->pm; 227 228 pr_debug("msk=%p", msk); 229 230 spin_lock_bh(&pm->lock); 231 232 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) 233 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 234 235 spin_unlock_bh(&pm->lock); 236 } 237 238 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) 239 { 240 if (!mptcp_pm_should_add_signal(msk)) 241 return; 242 243 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); 244 } 245 246 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, 247 const struct mptcp_rm_list *rm_list) 248 { 249 struct mptcp_pm_data *pm = &msk->pm; 250 u8 i; 251 252 pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); 253 254 for (i = 0; i < rm_list->nr; i++) 255 mptcp_event_addr_removed(msk, rm_list->ids[i]); 256 257 spin_lock_bh(&pm->lock); 258 if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) 259 pm->rm_list_rx = *rm_list; 260 else 261 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); 262 spin_unlock_bh(&pm->lock); 263 } 264 265 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup) 266 { 267 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 268 269 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); 270 subflow->backup = bkup; 271 272 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC); 273 } 274 275 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) 276 { 277 pr_debug("fail_seq=%llu", fail_seq); 278 } 279 280 /* path manager helpers */ 281 282 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, struct sk_buff *skb, 283 unsigned int opt_size, unsigned int remaining, 284 struct mptcp_addr_info *addr, bool *echo, 285 bool *port, bool *drop_other_suboptions) 286 { 287 int ret = false; 288 u8 add_addr; 289 u8 family; 290 291 spin_lock_bh(&msk->pm.lock); 292 293 /* double check after the lock is acquired */ 294 if (!mptcp_pm_should_add_signal(msk)) 295 goto out_unlock; 296 297 /* always drop every other options for pure ack ADD_ADDR; this is a 298 * plain dup-ack from TCP perspective. The other MPTCP-relevant info, 299 * if any, will be carried by the 'original' TCP ack 300 */ 301 if (skb && skb_is_tcp_pure_ack(skb)) { 302 remaining += opt_size; 303 *drop_other_suboptions = true; 304 } 305 306 *echo = mptcp_pm_should_add_signal_echo(msk); 307 *port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); 308 309 family = *echo ? msk->pm.remote.family : msk->pm.local.family; 310 if (remaining < mptcp_add_addr_len(family, *echo, *port)) 311 goto out_unlock; 312 313 if (*echo) { 314 *addr = msk->pm.remote; 315 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); 316 } else { 317 *addr = msk->pm.local; 318 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); 319 } 320 WRITE_ONCE(msk->pm.addr_signal, add_addr); 321 ret = true; 322 323 out_unlock: 324 spin_unlock_bh(&msk->pm.lock); 325 return ret; 326 } 327 328 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 329 struct mptcp_rm_list *rm_list) 330 { 331 int ret = false, len; 332 u8 rm_addr; 333 334 spin_lock_bh(&msk->pm.lock); 335 336 /* double check after the lock is acquired */ 337 if (!mptcp_pm_should_rm_signal(msk)) 338 goto out_unlock; 339 340 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); 341 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); 342 if (len < 0) { 343 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 344 goto out_unlock; 345 } 346 if (remaining < len) 347 goto out_unlock; 348 349 *rm_list = msk->pm.rm_list_tx; 350 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 351 ret = true; 352 353 out_unlock: 354 spin_unlock_bh(&msk->pm.lock); 355 return ret; 356 } 357 358 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) 359 { 360 return mptcp_pm_nl_get_local_id(msk, skc); 361 } 362 363 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 364 { 365 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 366 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); 367 368 /* keep track of rtx periods with no progress */ 369 if (!subflow->stale_count) { 370 subflow->stale_rcv_tstamp = rcv_tstamp; 371 subflow->stale_count++; 372 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { 373 if (subflow->stale_count < U8_MAX) 374 subflow->stale_count++; 375 mptcp_pm_nl_subflow_chk_stale(msk, ssk); 376 } else { 377 subflow->stale_count = 0; 378 mptcp_subflow_set_active(subflow); 379 } 380 } 381 382 void mptcp_pm_data_reset(struct mptcp_sock *msk) 383 { 384 msk->pm.add_addr_signaled = 0; 385 msk->pm.add_addr_accepted = 0; 386 msk->pm.local_addr_used = 0; 387 msk->pm.subflows = 0; 388 msk->pm.rm_list_tx.nr = 0; 389 msk->pm.rm_list_rx.nr = 0; 390 WRITE_ONCE(msk->pm.work_pending, false); 391 WRITE_ONCE(msk->pm.addr_signal, 0); 392 WRITE_ONCE(msk->pm.accept_addr, false); 393 WRITE_ONCE(msk->pm.accept_subflow, false); 394 WRITE_ONCE(msk->pm.remote_deny_join_id0, false); 395 msk->pm.status = 0; 396 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 397 398 mptcp_pm_nl_data_init(msk); 399 } 400 401 void mptcp_pm_data_init(struct mptcp_sock *msk) 402 { 403 spin_lock_init(&msk->pm.lock); 404 INIT_LIST_HEAD(&msk->pm.anno_list); 405 mptcp_pm_data_reset(msk); 406 } 407 408 void __init mptcp_pm_init(void) 409 { 410 mptcp_pm_nl_init(); 411 } 412