xref: /openbmc/linux/net/mptcp/pm.c (revision c494a447)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2019, Intel Corporation.
5  */
6 #define pr_fmt(fmt) "MPTCP: " fmt
7 
8 #include <linux/kernel.h>
9 #include <net/tcp.h>
10 #include <net/mptcp.h>
11 #include "protocol.h"
12 
13 #include "mib.h"
14 
15 /* path manager command handlers */
16 
17 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
18 			   const struct mptcp_addr_info *addr,
19 			   bool echo)
20 {
21 	u8 add_addr = READ_ONCE(msk->pm.addr_signal);
22 
23 	pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
24 
25 	lockdep_assert_held(&msk->pm.lock);
26 
27 	if (add_addr &
28 	    (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) {
29 		pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo);
30 		return -EINVAL;
31 	}
32 
33 	if (echo) {
34 		msk->pm.remote = *addr;
35 		add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
36 	} else {
37 		msk->pm.local = *addr;
38 		add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
39 	}
40 	WRITE_ONCE(msk->pm.addr_signal, add_addr);
41 	return 0;
42 }
43 
44 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
45 {
46 	u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
47 
48 	pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
49 
50 	if (rm_addr) {
51 		pr_warn("addr_signal error, rm_addr=%d", rm_addr);
52 		return -EINVAL;
53 	}
54 
55 	msk->pm.rm_list_tx = *rm_list;
56 	rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
57 	WRITE_ONCE(msk->pm.addr_signal, rm_addr);
58 	mptcp_pm_nl_addr_send_ack(msk);
59 	return 0;
60 }
61 
62 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
63 {
64 	pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
65 
66 	spin_lock_bh(&msk->pm.lock);
67 	mptcp_pm_nl_rm_subflow_received(msk, rm_list);
68 	spin_unlock_bh(&msk->pm.lock);
69 	return 0;
70 }
71 
72 /* path manager event handlers */
73 
74 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
75 {
76 	struct mptcp_pm_data *pm = &msk->pm;
77 
78 	pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
79 
80 	WRITE_ONCE(pm->server_side, server_side);
81 	mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
82 }
83 
84 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
85 {
86 	struct mptcp_pm_data *pm = &msk->pm;
87 	unsigned int subflows_max;
88 	int ret = 0;
89 
90 	if (mptcp_pm_is_userspace(msk))
91 		return mptcp_userspace_pm_active(msk);
92 
93 	subflows_max = mptcp_pm_get_subflows_max(msk);
94 
95 	pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
96 		 subflows_max, READ_ONCE(pm->accept_subflow));
97 
98 	/* try to avoid acquiring the lock below */
99 	if (!READ_ONCE(pm->accept_subflow))
100 		return false;
101 
102 	spin_lock_bh(&pm->lock);
103 	if (READ_ONCE(pm->accept_subflow)) {
104 		ret = pm->subflows < subflows_max;
105 		if (ret && ++pm->subflows == subflows_max)
106 			WRITE_ONCE(pm->accept_subflow, false);
107 	}
108 	spin_unlock_bh(&pm->lock);
109 
110 	return ret;
111 }
112 
113 /* return true if the new status bit is currently cleared, that is, this event
114  * can be server, eventually by an already scheduled work
115  */
116 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
117 				   enum mptcp_pm_status new_status)
118 {
119 	pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
120 		 BIT(new_status));
121 	if (msk->pm.status & BIT(new_status))
122 		return false;
123 
124 	msk->pm.status |= BIT(new_status);
125 	mptcp_schedule_work((struct sock *)msk);
126 	return true;
127 }
128 
129 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp)
130 {
131 	struct mptcp_pm_data *pm = &msk->pm;
132 	bool announce = false;
133 
134 	pr_debug("msk=%p", msk);
135 
136 	spin_lock_bh(&pm->lock);
137 
138 	/* mptcp_pm_fully_established() can be invoked by multiple
139 	 * racing paths - accept() and check_fully_established()
140 	 * be sure to serve this event only once.
141 	 */
142 	if (READ_ONCE(pm->work_pending) &&
143 	    !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
144 		mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
145 
146 	if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
147 		announce = true;
148 
149 	msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
150 	spin_unlock_bh(&pm->lock);
151 
152 	if (announce)
153 		mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp);
154 }
155 
156 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
157 {
158 	pr_debug("msk=%p", msk);
159 }
160 
161 void mptcp_pm_subflow_established(struct mptcp_sock *msk)
162 {
163 	struct mptcp_pm_data *pm = &msk->pm;
164 
165 	pr_debug("msk=%p", msk);
166 
167 	if (!READ_ONCE(pm->work_pending))
168 		return;
169 
170 	spin_lock_bh(&pm->lock);
171 
172 	if (READ_ONCE(pm->work_pending))
173 		mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
174 
175 	spin_unlock_bh(&pm->lock);
176 }
177 
178 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
179 				 const struct mptcp_subflow_context *subflow)
180 {
181 	struct mptcp_pm_data *pm = &msk->pm;
182 	bool update_subflows;
183 
184 	update_subflows = (subflow->request_join || subflow->mp_join) &&
185 			  mptcp_pm_is_kernel(msk);
186 	if (!READ_ONCE(pm->work_pending) && !update_subflows)
187 		return;
188 
189 	spin_lock_bh(&pm->lock);
190 	if (update_subflows)
191 		__mptcp_pm_close_subflow(msk);
192 
193 	/* Even if this subflow is not really established, tell the PM to try
194 	 * to pick the next ones, if possible.
195 	 */
196 	if (mptcp_pm_nl_check_work_pending(msk))
197 		mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
198 
199 	spin_unlock_bh(&pm->lock);
200 }
201 
202 void mptcp_pm_add_addr_received(const struct sock *ssk,
203 				const struct mptcp_addr_info *addr)
204 {
205 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
206 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
207 	struct mptcp_pm_data *pm = &msk->pm;
208 
209 	pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
210 		 READ_ONCE(pm->accept_addr));
211 
212 	mptcp_event_addr_announced(ssk, addr);
213 
214 	spin_lock_bh(&pm->lock);
215 
216 	if (mptcp_pm_is_userspace(msk)) {
217 		if (mptcp_userspace_pm_active(msk)) {
218 			mptcp_pm_announce_addr(msk, addr, true);
219 			mptcp_pm_add_addr_send_ack(msk);
220 		} else {
221 			__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
222 		}
223 	} else if (!READ_ONCE(pm->accept_addr)) {
224 		mptcp_pm_announce_addr(msk, addr, true);
225 		mptcp_pm_add_addr_send_ack(msk);
226 	} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
227 		pm->remote = *addr;
228 	} else {
229 		__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
230 	}
231 
232 	spin_unlock_bh(&pm->lock);
233 }
234 
235 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
236 			      const struct mptcp_addr_info *addr)
237 {
238 	struct mptcp_pm_data *pm = &msk->pm;
239 
240 	pr_debug("msk=%p", msk);
241 
242 	spin_lock_bh(&pm->lock);
243 
244 	if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending))
245 		mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
246 
247 	spin_unlock_bh(&pm->lock);
248 }
249 
250 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
251 {
252 	if (!mptcp_pm_should_add_signal(msk))
253 		return;
254 
255 	mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
256 }
257 
258 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
259 			       const struct mptcp_rm_list *rm_list)
260 {
261 	struct mptcp_pm_data *pm = &msk->pm;
262 	u8 i;
263 
264 	pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
265 
266 	for (i = 0; i < rm_list->nr; i++)
267 		mptcp_event_addr_removed(msk, rm_list->ids[i]);
268 
269 	spin_lock_bh(&pm->lock);
270 	if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
271 		pm->rm_list_rx = *rm_list;
272 	else
273 		__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
274 	spin_unlock_bh(&pm->lock);
275 }
276 
277 void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)
278 {
279 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
280 	struct sock *sk = subflow->conn;
281 	struct mptcp_sock *msk;
282 
283 	pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
284 	msk = mptcp_sk(sk);
285 	if (subflow->backup != bkup) {
286 		subflow->backup = bkup;
287 		mptcp_data_lock(sk);
288 		if (!sock_owned_by_user(sk))
289 			msk->last_snd = NULL;
290 		else
291 			__set_bit(MPTCP_RESET_SCHEDULER,  &msk->cb_flags);
292 		mptcp_data_unlock(sk);
293 	}
294 
295 	mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
296 }
297 
298 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
299 {
300 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
301 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
302 	struct sock *s = (struct sock *)msk;
303 
304 	pr_debug("fail_seq=%llu", fail_seq);
305 
306 	if (!READ_ONCE(msk->allow_infinite_fallback))
307 		return;
308 
309 	if (!READ_ONCE(subflow->mp_fail_response_expect)) {
310 		pr_debug("send MP_FAIL response and infinite map");
311 
312 		subflow->send_mp_fail = 1;
313 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
314 		subflow->send_infinite_map = 1;
315 	} else if (!sock_flag(sk, SOCK_DEAD)) {
316 		pr_debug("MP_FAIL response received");
317 
318 		sk_stop_timer(s, &s->sk_timer);
319 	}
320 }
321 
322 /* path manager helpers */
323 
324 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
325 			      unsigned int opt_size, unsigned int remaining,
326 			      struct mptcp_addr_info *addr, bool *echo,
327 			      bool *drop_other_suboptions)
328 {
329 	int ret = false;
330 	u8 add_addr;
331 	u8 family;
332 	bool port;
333 
334 	spin_lock_bh(&msk->pm.lock);
335 
336 	/* double check after the lock is acquired */
337 	if (!mptcp_pm_should_add_signal(msk))
338 		goto out_unlock;
339 
340 	/* always drop every other options for pure ack ADD_ADDR; this is a
341 	 * plain dup-ack from TCP perspective. The other MPTCP-relevant info,
342 	 * if any, will be carried by the 'original' TCP ack
343 	 */
344 	if (skb && skb_is_tcp_pure_ack(skb)) {
345 		remaining += opt_size;
346 		*drop_other_suboptions = true;
347 	}
348 
349 	*echo = mptcp_pm_should_add_signal_echo(msk);
350 	port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
351 
352 	family = *echo ? msk->pm.remote.family : msk->pm.local.family;
353 	if (remaining < mptcp_add_addr_len(family, *echo, port))
354 		goto out_unlock;
355 
356 	if (*echo) {
357 		*addr = msk->pm.remote;
358 		add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO);
359 	} else {
360 		*addr = msk->pm.local;
361 		add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL);
362 	}
363 	WRITE_ONCE(msk->pm.addr_signal, add_addr);
364 	ret = true;
365 
366 out_unlock:
367 	spin_unlock_bh(&msk->pm.lock);
368 	return ret;
369 }
370 
371 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
372 			     struct mptcp_rm_list *rm_list)
373 {
374 	int ret = false, len;
375 	u8 rm_addr;
376 
377 	spin_lock_bh(&msk->pm.lock);
378 
379 	/* double check after the lock is acquired */
380 	if (!mptcp_pm_should_rm_signal(msk))
381 		goto out_unlock;
382 
383 	rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL);
384 	len = mptcp_rm_addr_len(&msk->pm.rm_list_tx);
385 	if (len < 0) {
386 		WRITE_ONCE(msk->pm.addr_signal, rm_addr);
387 		goto out_unlock;
388 	}
389 	if (remaining < len)
390 		goto out_unlock;
391 
392 	*rm_list = msk->pm.rm_list_tx;
393 	WRITE_ONCE(msk->pm.addr_signal, rm_addr);
394 	ret = true;
395 
396 out_unlock:
397 	spin_unlock_bh(&msk->pm.lock);
398 	return ret;
399 }
400 
401 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
402 {
403 	return mptcp_pm_nl_get_local_id(msk, skc);
404 }
405 
406 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
407 {
408 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
409 	u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp);
410 
411 	/* keep track of rtx periods with no progress */
412 	if (!subflow->stale_count) {
413 		subflow->stale_rcv_tstamp = rcv_tstamp;
414 		subflow->stale_count++;
415 	} else if (subflow->stale_rcv_tstamp == rcv_tstamp) {
416 		if (subflow->stale_count < U8_MAX)
417 			subflow->stale_count++;
418 		mptcp_pm_nl_subflow_chk_stale(msk, ssk);
419 	} else {
420 		subflow->stale_count = 0;
421 		mptcp_subflow_set_active(subflow);
422 	}
423 }
424 
425 void mptcp_pm_data_reset(struct mptcp_sock *msk)
426 {
427 	u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
428 	struct mptcp_pm_data *pm = &msk->pm;
429 
430 	pm->add_addr_signaled = 0;
431 	pm->add_addr_accepted = 0;
432 	pm->local_addr_used = 0;
433 	pm->subflows = 0;
434 	pm->rm_list_tx.nr = 0;
435 	pm->rm_list_rx.nr = 0;
436 	WRITE_ONCE(pm->pm_type, pm_type);
437 
438 	if (pm_type == MPTCP_PM_TYPE_KERNEL) {
439 		bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk);
440 
441 		/* pm->work_pending must be only be set to 'true' when
442 		 * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL
443 		 */
444 		WRITE_ONCE(pm->work_pending,
445 			   (!!mptcp_pm_get_local_addr_max(msk) &&
446 			    subflows_allowed) ||
447 			   !!mptcp_pm_get_add_addr_signal_max(msk));
448 		WRITE_ONCE(pm->accept_addr,
449 			   !!mptcp_pm_get_add_addr_accept_max(msk) &&
450 			   subflows_allowed);
451 		WRITE_ONCE(pm->accept_subflow, subflows_allowed);
452 	} else {
453 		WRITE_ONCE(pm->work_pending, 0);
454 		WRITE_ONCE(pm->accept_addr, 0);
455 		WRITE_ONCE(pm->accept_subflow, 0);
456 	}
457 
458 	WRITE_ONCE(pm->addr_signal, 0);
459 	WRITE_ONCE(pm->remote_deny_join_id0, false);
460 	pm->status = 0;
461 	bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
462 }
463 
464 void mptcp_pm_data_init(struct mptcp_sock *msk)
465 {
466 	spin_lock_init(&msk->pm.lock);
467 	INIT_LIST_HEAD(&msk->pm.anno_list);
468 	INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list);
469 	mptcp_pm_data_reset(msk);
470 }
471 
472 void __init mptcp_pm_init(void)
473 {
474 	mptcp_pm_nl_init();
475 }
476