1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2019, Intel Corporation. 5 */ 6 #define pr_fmt(fmt) "MPTCP: " fmt 7 8 #include <linux/kernel.h> 9 #include <net/tcp.h> 10 #include <net/mptcp.h> 11 #include "protocol.h" 12 13 /* path manager command handlers */ 14 15 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 16 const struct mptcp_addr_info *addr, 17 bool echo) 18 { 19 u8 add_addr = READ_ONCE(msk->pm.add_addr_signal); 20 21 pr_debug("msk=%p, local_id=%d", msk, addr->id); 22 23 msk->pm.local = *addr; 24 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); 25 if (echo) 26 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); 27 if (addr->family == AF_INET6) 28 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6); 29 WRITE_ONCE(msk->pm.add_addr_signal, add_addr); 30 return 0; 31 } 32 33 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id) 34 { 35 pr_debug("msk=%p, local_id=%d", msk, local_id); 36 37 msk->pm.rm_id = local_id; 38 WRITE_ONCE(msk->pm.rm_addr_signal, true); 39 return 0; 40 } 41 42 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id) 43 { 44 pr_debug("msk=%p, local_id=%d", msk, local_id); 45 46 spin_lock_bh(&msk->pm.lock); 47 mptcp_pm_nl_rm_subflow_received(msk, local_id); 48 spin_unlock_bh(&msk->pm.lock); 49 return 0; 50 } 51 52 /* path manager event handlers */ 53 54 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side) 55 { 56 struct mptcp_pm_data *pm = &msk->pm; 57 58 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); 59 60 WRITE_ONCE(pm->server_side, server_side); 61 } 62 63 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) 64 { 65 struct mptcp_pm_data *pm = &msk->pm; 66 int ret = 0; 67 68 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, 69 pm->subflows_max, READ_ONCE(pm->accept_subflow)); 70 71 /* try to avoid acquiring the lock below */ 72 if (!READ_ONCE(pm->accept_subflow)) 73 return false; 74 75 spin_lock_bh(&pm->lock); 76 if (READ_ONCE(pm->accept_subflow)) { 77 ret = pm->subflows < pm->subflows_max; 78 if (ret && ++pm->subflows == pm->subflows_max) 79 WRITE_ONCE(pm->accept_subflow, false); 80 } 81 spin_unlock_bh(&pm->lock); 82 83 return ret; 84 } 85 86 /* return true if the new status bit is currently cleared, that is, this event 87 * can be server, eventually by an already scheduled work 88 */ 89 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 90 enum mptcp_pm_status new_status) 91 { 92 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, 93 BIT(new_status)); 94 if (msk->pm.status & BIT(new_status)) 95 return false; 96 97 msk->pm.status |= BIT(new_status); 98 mptcp_schedule_work((struct sock *)msk); 99 return true; 100 } 101 102 void mptcp_pm_fully_established(struct mptcp_sock *msk) 103 { 104 struct mptcp_pm_data *pm = &msk->pm; 105 106 pr_debug("msk=%p", msk); 107 108 /* try to avoid acquiring the lock below */ 109 if (!READ_ONCE(pm->work_pending)) 110 return; 111 112 spin_lock_bh(&pm->lock); 113 114 if (READ_ONCE(pm->work_pending)) 115 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); 116 117 spin_unlock_bh(&pm->lock); 118 } 119 120 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 121 { 122 pr_debug("msk=%p", msk); 123 } 124 125 void mptcp_pm_subflow_established(struct mptcp_sock *msk, 126 struct mptcp_subflow_context *subflow) 127 { 128 struct mptcp_pm_data *pm = &msk->pm; 129 130 pr_debug("msk=%p", msk); 131 132 if (!READ_ONCE(pm->work_pending)) 133 return; 134 135 spin_lock_bh(&pm->lock); 136 137 if (READ_ONCE(pm->work_pending)) 138 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 139 140 spin_unlock_bh(&pm->lock); 141 } 142 143 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id) 144 { 145 pr_debug("msk=%p", msk); 146 } 147 148 void mptcp_pm_add_addr_received(struct mptcp_sock *msk, 149 const struct mptcp_addr_info *addr) 150 { 151 struct mptcp_pm_data *pm = &msk->pm; 152 153 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, 154 READ_ONCE(pm->accept_addr)); 155 156 spin_lock_bh(&pm->lock); 157 158 if (!READ_ONCE(pm->accept_addr)) { 159 mptcp_pm_announce_addr(msk, addr, true); 160 mptcp_pm_add_addr_send_ack(msk); 161 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 162 pm->remote = *addr; 163 } 164 165 spin_unlock_bh(&pm->lock); 166 } 167 168 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) 169 { 170 if (!mptcp_pm_should_add_signal_ipv6(msk)) 171 return; 172 173 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); 174 } 175 176 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id) 177 { 178 struct mptcp_pm_data *pm = &msk->pm; 179 180 pr_debug("msk=%p remote_id=%d", msk, rm_id); 181 182 spin_lock_bh(&pm->lock); 183 mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); 184 pm->rm_id = rm_id; 185 spin_unlock_bh(&pm->lock); 186 } 187 188 /* path manager helpers */ 189 190 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 191 struct mptcp_addr_info *saddr, bool *echo) 192 { 193 int ret = false; 194 195 spin_lock_bh(&msk->pm.lock); 196 197 /* double check after the lock is acquired */ 198 if (!mptcp_pm_should_add_signal(msk)) 199 goto out_unlock; 200 201 *echo = mptcp_pm_should_add_signal_echo(msk); 202 203 if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo)) 204 goto out_unlock; 205 206 *saddr = msk->pm.local; 207 WRITE_ONCE(msk->pm.add_addr_signal, 0); 208 ret = true; 209 210 out_unlock: 211 spin_unlock_bh(&msk->pm.lock); 212 return ret; 213 } 214 215 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 216 u8 *rm_id) 217 { 218 int ret = false; 219 220 spin_lock_bh(&msk->pm.lock); 221 222 /* double check after the lock is acquired */ 223 if (!mptcp_pm_should_rm_signal(msk)) 224 goto out_unlock; 225 226 if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE) 227 goto out_unlock; 228 229 *rm_id = msk->pm.rm_id; 230 WRITE_ONCE(msk->pm.rm_addr_signal, false); 231 ret = true; 232 233 out_unlock: 234 spin_unlock_bh(&msk->pm.lock); 235 return ret; 236 } 237 238 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) 239 { 240 return mptcp_pm_nl_get_local_id(msk, skc); 241 } 242 243 void mptcp_pm_data_init(struct mptcp_sock *msk) 244 { 245 msk->pm.add_addr_signaled = 0; 246 msk->pm.add_addr_accepted = 0; 247 msk->pm.local_addr_used = 0; 248 msk->pm.subflows = 0; 249 msk->pm.rm_id = 0; 250 WRITE_ONCE(msk->pm.work_pending, false); 251 WRITE_ONCE(msk->pm.add_addr_signal, 0); 252 WRITE_ONCE(msk->pm.rm_addr_signal, false); 253 WRITE_ONCE(msk->pm.accept_addr, false); 254 WRITE_ONCE(msk->pm.accept_subflow, false); 255 msk->pm.status = 0; 256 257 spin_lock_init(&msk->pm.lock); 258 INIT_LIST_HEAD(&msk->pm.anno_list); 259 260 mptcp_pm_nl_data_init(msk); 261 } 262 263 void __init mptcp_pm_init(void) 264 { 265 mptcp_pm_nl_init(); 266 } 267