1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2019, Intel Corporation. 5 */ 6 #define pr_fmt(fmt) "MPTCP: " fmt 7 8 #include <linux/kernel.h> 9 #include <net/tcp.h> 10 #include <net/mptcp.h> 11 #include "protocol.h" 12 13 /* path manager command handlers */ 14 15 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 16 const struct mptcp_addr_info *addr, 17 bool echo, bool port) 18 { 19 u8 add_addr = READ_ONCE(msk->pm.addr_signal); 20 21 pr_debug("msk=%p, local_id=%d", msk, addr->id); 22 23 lockdep_assert_held(&msk->pm.lock); 24 25 if (add_addr) { 26 pr_warn("addr_signal error, add_addr=%d", add_addr); 27 return -EINVAL; 28 } 29 30 msk->pm.local = *addr; 31 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); 32 if (echo) 33 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); 34 if (addr->family == AF_INET6) 35 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6); 36 if (port) 37 add_addr |= BIT(MPTCP_ADD_ADDR_PORT); 38 WRITE_ONCE(msk->pm.addr_signal, add_addr); 39 return 0; 40 } 41 42 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 43 { 44 u8 rm_addr = READ_ONCE(msk->pm.addr_signal); 45 46 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 47 48 if (rm_addr) { 49 pr_warn("addr_signal error, rm_addr=%d", rm_addr); 50 return -EINVAL; 51 } 52 53 msk->pm.rm_list_tx = *rm_list; 54 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); 55 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 56 return 0; 57 } 58 59 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 60 { 61 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 62 63 spin_lock_bh(&msk->pm.lock); 64 mptcp_pm_nl_rm_subflow_received(msk, rm_list); 65 spin_unlock_bh(&msk->pm.lock); 66 return 0; 67 } 68 69 /* path manager event handlers */ 70 71 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) 72 { 73 struct mptcp_pm_data *pm = &msk->pm; 74 75 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); 76 77 WRITE_ONCE(pm->server_side, server_side); 78 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); 79 } 80 81 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) 82 { 83 struct mptcp_pm_data *pm = &msk->pm; 84 unsigned int subflows_max; 85 int ret = 0; 86 87 subflows_max = mptcp_pm_get_subflows_max(msk); 88 89 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, 90 subflows_max, READ_ONCE(pm->accept_subflow)); 91 92 /* try to avoid acquiring the lock below */ 93 if (!READ_ONCE(pm->accept_subflow)) 94 return false; 95 96 spin_lock_bh(&pm->lock); 97 if (READ_ONCE(pm->accept_subflow)) { 98 ret = pm->subflows < subflows_max; 99 if (ret && ++pm->subflows == subflows_max) 100 WRITE_ONCE(pm->accept_subflow, false); 101 } 102 spin_unlock_bh(&pm->lock); 103 104 return ret; 105 } 106 107 /* return true if the new status bit is currently cleared, that is, this event 108 * can be server, eventually by an already scheduled work 109 */ 110 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 111 enum mptcp_pm_status new_status) 112 { 113 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, 114 BIT(new_status)); 115 if (msk->pm.status & BIT(new_status)) 116 return false; 117 118 msk->pm.status |= BIT(new_status); 119 mptcp_schedule_work((struct sock *)msk); 120 return true; 121 } 122 123 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp) 124 { 125 struct mptcp_pm_data *pm = &msk->pm; 126 bool announce = false; 127 128 pr_debug("msk=%p", msk); 129 130 spin_lock_bh(&pm->lock); 131 132 /* mptcp_pm_fully_established() can be invoked by multiple 133 * racing paths - accept() and check_fully_established() 134 * be sure to serve this event only once. 135 */ 136 if (READ_ONCE(pm->work_pending) && 137 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) 138 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); 139 140 if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) 141 announce = true; 142 143 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); 144 spin_unlock_bh(&pm->lock); 145 146 if (announce) 147 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp); 148 } 149 150 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 151 { 152 pr_debug("msk=%p", msk); 153 } 154 155 void mptcp_pm_subflow_established(struct mptcp_sock *msk, 156 struct mptcp_subflow_context *subflow) 157 { 158 struct mptcp_pm_data *pm = &msk->pm; 159 160 pr_debug("msk=%p", msk); 161 162 if (!READ_ONCE(pm->work_pending)) 163 return; 164 165 spin_lock_bh(&pm->lock); 166 167 if (READ_ONCE(pm->work_pending)) 168 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 169 170 spin_unlock_bh(&pm->lock); 171 } 172 173 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id) 174 { 175 pr_debug("msk=%p", msk); 176 } 177 178 void mptcp_pm_add_addr_received(struct mptcp_sock *msk, 179 const struct mptcp_addr_info *addr) 180 { 181 struct mptcp_pm_data *pm = &msk->pm; 182 183 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, 184 READ_ONCE(pm->accept_addr)); 185 186 mptcp_event_addr_announced(msk, addr); 187 188 spin_lock_bh(&pm->lock); 189 190 if (!READ_ONCE(pm->accept_addr)) { 191 mptcp_pm_announce_addr(msk, addr, true, addr->port); 192 mptcp_pm_add_addr_send_ack(msk); 193 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 194 pm->remote = *addr; 195 } 196 197 spin_unlock_bh(&pm->lock); 198 } 199 200 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) 201 { 202 if (!mptcp_pm_should_add_signal(msk)) 203 return; 204 205 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); 206 } 207 208 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, 209 const struct mptcp_rm_list *rm_list) 210 { 211 struct mptcp_pm_data *pm = &msk->pm; 212 u8 i; 213 214 pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); 215 216 for (i = 0; i < rm_list->nr; i++) 217 mptcp_event_addr_removed(msk, rm_list->ids[i]); 218 219 spin_lock_bh(&pm->lock); 220 mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); 221 pm->rm_list_rx = *rm_list; 222 spin_unlock_bh(&pm->lock); 223 } 224 225 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup) 226 { 227 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 228 229 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); 230 subflow->backup = bkup; 231 232 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC); 233 } 234 235 /* path manager helpers */ 236 237 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 238 struct mptcp_addr_info *saddr, bool *echo, bool *port) 239 { 240 int ret = false; 241 242 spin_lock_bh(&msk->pm.lock); 243 244 /* double check after the lock is acquired */ 245 if (!mptcp_pm_should_add_signal(msk)) 246 goto out_unlock; 247 248 *echo = mptcp_pm_should_add_signal_echo(msk); 249 *port = mptcp_pm_should_add_signal_port(msk); 250 251 if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port)) 252 goto out_unlock; 253 254 *saddr = msk->pm.local; 255 WRITE_ONCE(msk->pm.addr_signal, 0); 256 ret = true; 257 258 out_unlock: 259 spin_unlock_bh(&msk->pm.lock); 260 return ret; 261 } 262 263 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 264 struct mptcp_rm_list *rm_list) 265 { 266 int ret = false, len; 267 268 spin_lock_bh(&msk->pm.lock); 269 270 /* double check after the lock is acquired */ 271 if (!mptcp_pm_should_rm_signal(msk)) 272 goto out_unlock; 273 274 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); 275 if (len < 0) { 276 WRITE_ONCE(msk->pm.addr_signal, 0); 277 goto out_unlock; 278 } 279 if (remaining < len) 280 goto out_unlock; 281 282 *rm_list = msk->pm.rm_list_tx; 283 WRITE_ONCE(msk->pm.addr_signal, 0); 284 ret = true; 285 286 out_unlock: 287 spin_unlock_bh(&msk->pm.lock); 288 return ret; 289 } 290 291 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) 292 { 293 return mptcp_pm_nl_get_local_id(msk, skc); 294 } 295 296 void mptcp_pm_data_init(struct mptcp_sock *msk) 297 { 298 msk->pm.add_addr_signaled = 0; 299 msk->pm.add_addr_accepted = 0; 300 msk->pm.local_addr_used = 0; 301 msk->pm.subflows = 0; 302 msk->pm.rm_list_tx.nr = 0; 303 msk->pm.rm_list_rx.nr = 0; 304 WRITE_ONCE(msk->pm.work_pending, false); 305 WRITE_ONCE(msk->pm.addr_signal, 0); 306 WRITE_ONCE(msk->pm.accept_addr, false); 307 WRITE_ONCE(msk->pm.accept_subflow, false); 308 msk->pm.status = 0; 309 310 spin_lock_init(&msk->pm.lock); 311 INIT_LIST_HEAD(&msk->pm.anno_list); 312 313 mptcp_pm_nl_data_init(msk); 314 } 315 316 void __init mptcp_pm_init(void) 317 { 318 mptcp_pm_nl_init(); 319 } 320