1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2019, Intel Corporation. 5 */ 6 #define pr_fmt(fmt) "MPTCP: " fmt 7 8 #include <linux/kernel.h> 9 #include <net/tcp.h> 10 #include <net/mptcp.h> 11 #include "protocol.h" 12 13 /* path manager command handlers */ 14 15 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 16 const struct mptcp_addr_info *addr, 17 bool echo, bool port) 18 { 19 u8 add_addr = READ_ONCE(msk->pm.addr_signal); 20 21 pr_debug("msk=%p, local_id=%d", msk, addr->id); 22 23 if (add_addr) { 24 pr_warn("addr_signal error, add_addr=%d", add_addr); 25 return -EINVAL; 26 } 27 28 msk->pm.local = *addr; 29 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); 30 if (echo) 31 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); 32 if (addr->family == AF_INET6) 33 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6); 34 if (port) 35 add_addr |= BIT(MPTCP_ADD_ADDR_PORT); 36 WRITE_ONCE(msk->pm.addr_signal, add_addr); 37 return 0; 38 } 39 40 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id) 41 { 42 u8 rm_addr = READ_ONCE(msk->pm.addr_signal); 43 44 pr_debug("msk=%p, local_id=%d", msk, local_id); 45 46 if (rm_addr) { 47 pr_warn("addr_signal error, rm_addr=%d", rm_addr); 48 return -EINVAL; 49 } 50 51 msk->pm.rm_id = local_id; 52 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); 53 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 54 return 0; 55 } 56 57 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id) 58 { 59 pr_debug("msk=%p, local_id=%d", msk, local_id); 60 61 spin_lock_bh(&msk->pm.lock); 62 mptcp_pm_nl_rm_subflow_received(msk, local_id); 63 spin_unlock_bh(&msk->pm.lock); 64 return 0; 65 } 66 67 /* path manager event handlers */ 68 69 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side) 70 { 71 struct mptcp_pm_data *pm = &msk->pm; 72 73 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); 74 75 WRITE_ONCE(pm->server_side, server_side); 76 } 77 78 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) 79 { 80 struct mptcp_pm_data *pm = &msk->pm; 81 int ret = 0; 82 83 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, 84 pm->subflows_max, READ_ONCE(pm->accept_subflow)); 85 86 /* try to avoid acquiring the lock below */ 87 if (!READ_ONCE(pm->accept_subflow)) 88 return false; 89 90 spin_lock_bh(&pm->lock); 91 if (READ_ONCE(pm->accept_subflow)) { 92 ret = pm->subflows < pm->subflows_max; 93 if (ret && ++pm->subflows == pm->subflows_max) 94 WRITE_ONCE(pm->accept_subflow, false); 95 } 96 spin_unlock_bh(&pm->lock); 97 98 return ret; 99 } 100 101 /* return true if the new status bit is currently cleared, that is, this event 102 * can be server, eventually by an already scheduled work 103 */ 104 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 105 enum mptcp_pm_status new_status) 106 { 107 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, 108 BIT(new_status)); 109 if (msk->pm.status & BIT(new_status)) 110 return false; 111 112 msk->pm.status |= BIT(new_status); 113 mptcp_schedule_work((struct sock *)msk); 114 return true; 115 } 116 117 void mptcp_pm_fully_established(struct mptcp_sock *msk) 118 { 119 struct mptcp_pm_data *pm = &msk->pm; 120 121 pr_debug("msk=%p", msk); 122 123 /* try to avoid acquiring the lock below */ 124 if (!READ_ONCE(pm->work_pending)) 125 return; 126 127 spin_lock_bh(&pm->lock); 128 129 /* mptcp_pm_fully_established() can be invoked by multiple 130 * racing paths - accept() and check_fully_established() 131 * be sure to serve this event only once. 132 */ 133 if (READ_ONCE(pm->work_pending) && 134 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) 135 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); 136 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); 137 138 spin_unlock_bh(&pm->lock); 139 } 140 141 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 142 { 143 pr_debug("msk=%p", msk); 144 } 145 146 void mptcp_pm_subflow_established(struct mptcp_sock *msk, 147 struct mptcp_subflow_context *subflow) 148 { 149 struct mptcp_pm_data *pm = &msk->pm; 150 151 pr_debug("msk=%p", msk); 152 153 if (!READ_ONCE(pm->work_pending)) 154 return; 155 156 spin_lock_bh(&pm->lock); 157 158 if (READ_ONCE(pm->work_pending)) 159 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 160 161 spin_unlock_bh(&pm->lock); 162 } 163 164 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id) 165 { 166 pr_debug("msk=%p", msk); 167 } 168 169 void mptcp_pm_add_addr_received(struct mptcp_sock *msk, 170 const struct mptcp_addr_info *addr) 171 { 172 struct mptcp_pm_data *pm = &msk->pm; 173 174 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, 175 READ_ONCE(pm->accept_addr)); 176 177 spin_lock_bh(&pm->lock); 178 179 if (!READ_ONCE(pm->accept_addr)) { 180 mptcp_pm_announce_addr(msk, addr, true, addr->port); 181 mptcp_pm_add_addr_send_ack(msk); 182 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 183 pm->remote = *addr; 184 } 185 186 spin_unlock_bh(&pm->lock); 187 } 188 189 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) 190 { 191 if (!mptcp_pm_should_add_signal_ipv6(msk) && 192 !mptcp_pm_should_add_signal_port(msk)) 193 return; 194 195 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); 196 } 197 198 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id) 199 { 200 struct mptcp_pm_data *pm = &msk->pm; 201 202 pr_debug("msk=%p remote_id=%d", msk, rm_id); 203 204 spin_lock_bh(&pm->lock); 205 mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); 206 pm->rm_id = rm_id; 207 spin_unlock_bh(&pm->lock); 208 } 209 210 /* path manager helpers */ 211 212 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 213 struct mptcp_addr_info *saddr, bool *echo, bool *port) 214 { 215 int ret = false; 216 217 spin_lock_bh(&msk->pm.lock); 218 219 /* double check after the lock is acquired */ 220 if (!mptcp_pm_should_add_signal(msk)) 221 goto out_unlock; 222 223 *echo = mptcp_pm_should_add_signal_echo(msk); 224 *port = mptcp_pm_should_add_signal_port(msk); 225 226 if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port)) 227 goto out_unlock; 228 229 *saddr = msk->pm.local; 230 WRITE_ONCE(msk->pm.addr_signal, 0); 231 ret = true; 232 233 out_unlock: 234 spin_unlock_bh(&msk->pm.lock); 235 return ret; 236 } 237 238 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 239 u8 *rm_id) 240 { 241 int ret = false; 242 243 spin_lock_bh(&msk->pm.lock); 244 245 /* double check after the lock is acquired */ 246 if (!mptcp_pm_should_rm_signal(msk)) 247 goto out_unlock; 248 249 if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE) 250 goto out_unlock; 251 252 *rm_id = msk->pm.rm_id; 253 WRITE_ONCE(msk->pm.addr_signal, 0); 254 ret = true; 255 256 out_unlock: 257 spin_unlock_bh(&msk->pm.lock); 258 return ret; 259 } 260 261 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) 262 { 263 return mptcp_pm_nl_get_local_id(msk, skc); 264 } 265 266 void mptcp_pm_data_init(struct mptcp_sock *msk) 267 { 268 msk->pm.add_addr_signaled = 0; 269 msk->pm.add_addr_accepted = 0; 270 msk->pm.local_addr_used = 0; 271 msk->pm.subflows = 0; 272 msk->pm.rm_id = 0; 273 WRITE_ONCE(msk->pm.work_pending, false); 274 WRITE_ONCE(msk->pm.addr_signal, 0); 275 WRITE_ONCE(msk->pm.accept_addr, false); 276 WRITE_ONCE(msk->pm.accept_subflow, false); 277 msk->pm.status = 0; 278 279 spin_lock_init(&msk->pm.lock); 280 INIT_LIST_HEAD(&msk->pm.anno_list); 281 282 mptcp_pm_nl_data_init(msk); 283 } 284 285 void __init mptcp_pm_init(void) 286 { 287 mptcp_pm_nl_init(); 288 } 289