xref: /openbmc/linux/net/mptcp/subflow.c (revision 4e32f599)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
22 #endif
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
25 #include "protocol.h"
26 #include "mib.h"
27 
28 #include <trace/events/mptcp.h>
29 #include <trace/events/sock.h>
30 
31 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
32 
33 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
34 				  enum linux_mptcp_mib_field field)
35 {
36 	MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
37 }
38 
39 static void subflow_req_destructor(struct request_sock *req)
40 {
41 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
42 
43 	pr_debug("subflow_req=%p", subflow_req);
44 
45 	if (subflow_req->msk)
46 		sock_put((struct sock *)subflow_req->msk);
47 
48 	mptcp_token_destroy_request(req);
49 }
50 
51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
52 				  void *hmac)
53 {
54 	u8 msg[8];
55 
56 	put_unaligned_be32(nonce1, &msg[0]);
57 	put_unaligned_be32(nonce2, &msg[4]);
58 
59 	mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
60 }
61 
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63 {
64 	return mptcp_is_fully_established((void *)msk) &&
65 		((mptcp_pm_is_userspace(msk) &&
66 		  mptcp_userspace_pm_active(msk)) ||
67 		 READ_ONCE(msk->pm.accept_subflow));
68 }
69 
70 /* validate received token and create truncated hmac and nonce for SYN-ACK */
71 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
72 {
73 	struct mptcp_sock *msk = subflow_req->msk;
74 	u8 hmac[SHA256_DIGEST_SIZE];
75 
76 	get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
77 
78 	subflow_generate_hmac(msk->local_key, msk->remote_key,
79 			      subflow_req->local_nonce,
80 			      subflow_req->remote_nonce, hmac);
81 
82 	subflow_req->thmac = get_unaligned_be64(hmac);
83 }
84 
85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
86 {
87 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
88 	struct mptcp_sock *msk;
89 	int local_id;
90 
91 	msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92 	if (!msk) {
93 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
94 		return NULL;
95 	}
96 
97 	local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
98 	if (local_id < 0) {
99 		sock_put((struct sock *)msk);
100 		return NULL;
101 	}
102 	subflow_req->local_id = local_id;
103 
104 	return msk;
105 }
106 
107 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
108 {
109 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
110 
111 	subflow_req->mp_capable = 0;
112 	subflow_req->mp_join = 0;
113 	subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
114 	subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
115 	subflow_req->msk = NULL;
116 	mptcp_token_init_request(req);
117 }
118 
119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
120 {
121 	return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
122 }
123 
124 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
125 {
126 	struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
127 
128 	if (mpext) {
129 		memset(mpext, 0, sizeof(*mpext));
130 		mpext->reset_reason = reason;
131 	}
132 }
133 
134 /* Init mptcp request socket.
135  *
136  * Returns an error code if a JOIN has failed and a TCP reset
137  * should be sent.
138  */
139 static int subflow_check_req(struct request_sock *req,
140 			     const struct sock *sk_listener,
141 			     struct sk_buff *skb)
142 {
143 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
144 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
145 	struct mptcp_options_received mp_opt;
146 	bool opt_mp_capable, opt_mp_join;
147 
148 	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
149 
150 #ifdef CONFIG_TCP_MD5SIG
151 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
152 	 * TCP option space.
153 	 */
154 	if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
155 		return -EINVAL;
156 #endif
157 
158 	mptcp_get_options(skb, &mp_opt);
159 
160 	opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
161 	opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
162 	if (opt_mp_capable) {
163 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
164 
165 		if (opt_mp_join)
166 			return 0;
167 	} else if (opt_mp_join) {
168 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
169 	}
170 
171 	if (opt_mp_capable && listener->request_mptcp) {
172 		int err, retries = MPTCP_TOKEN_MAX_RETRIES;
173 
174 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
175 again:
176 		do {
177 			get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
178 		} while (subflow_req->local_key == 0);
179 
180 		if (unlikely(req->syncookie)) {
181 			mptcp_crypto_key_sha(subflow_req->local_key,
182 					     &subflow_req->token,
183 					     &subflow_req->idsn);
184 			if (mptcp_token_exists(subflow_req->token)) {
185 				if (retries-- > 0)
186 					goto again;
187 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
188 			} else {
189 				subflow_req->mp_capable = 1;
190 			}
191 			return 0;
192 		}
193 
194 		err = mptcp_token_new_request(req);
195 		if (err == 0)
196 			subflow_req->mp_capable = 1;
197 		else if (retries-- > 0)
198 			goto again;
199 		else
200 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
201 
202 	} else if (opt_mp_join && listener->request_mptcp) {
203 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
204 		subflow_req->mp_join = 1;
205 		subflow_req->backup = mp_opt.backup;
206 		subflow_req->remote_id = mp_opt.join_id;
207 		subflow_req->token = mp_opt.token;
208 		subflow_req->remote_nonce = mp_opt.nonce;
209 		subflow_req->msk = subflow_token_join_request(req);
210 
211 		/* Can't fall back to TCP in this case. */
212 		if (!subflow_req->msk) {
213 			subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
214 			return -EPERM;
215 		}
216 
217 		if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
218 			pr_debug("syn inet_sport=%d %d",
219 				 ntohs(inet_sk(sk_listener)->inet_sport),
220 				 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
221 			if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
222 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
223 				return -EPERM;
224 			}
225 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
226 		}
227 
228 		subflow_req_create_thmac(subflow_req);
229 
230 		if (unlikely(req->syncookie)) {
231 			if (mptcp_can_accept_new_subflow(subflow_req->msk))
232 				subflow_init_req_cookie_join_save(subflow_req, skb);
233 			else
234 				return -EPERM;
235 		}
236 
237 		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
238 			 subflow_req->remote_nonce, subflow_req->msk);
239 	}
240 
241 	return 0;
242 }
243 
244 int mptcp_subflow_init_cookie_req(struct request_sock *req,
245 				  const struct sock *sk_listener,
246 				  struct sk_buff *skb)
247 {
248 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
249 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
250 	struct mptcp_options_received mp_opt;
251 	bool opt_mp_capable, opt_mp_join;
252 	int err;
253 
254 	subflow_init_req(req, sk_listener);
255 	mptcp_get_options(skb, &mp_opt);
256 
257 	opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
258 	opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
259 	if (opt_mp_capable && opt_mp_join)
260 		return -EINVAL;
261 
262 	if (opt_mp_capable && listener->request_mptcp) {
263 		if (mp_opt.sndr_key == 0)
264 			return -EINVAL;
265 
266 		subflow_req->local_key = mp_opt.rcvr_key;
267 		err = mptcp_token_new_request(req);
268 		if (err)
269 			return err;
270 
271 		subflow_req->mp_capable = 1;
272 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
273 	} else if (opt_mp_join && listener->request_mptcp) {
274 		if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
275 			return -EINVAL;
276 
277 		subflow_req->mp_join = 1;
278 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
279 	}
280 
281 	return 0;
282 }
283 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
284 
285 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
286 					      struct sk_buff *skb,
287 					      struct flowi *fl,
288 					      struct request_sock *req)
289 {
290 	struct dst_entry *dst;
291 	int err;
292 
293 	tcp_rsk(req)->is_mptcp = 1;
294 	subflow_init_req(req, sk);
295 
296 	dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
297 	if (!dst)
298 		return NULL;
299 
300 	err = subflow_check_req(req, sk, skb);
301 	if (err == 0)
302 		return dst;
303 
304 	dst_release(dst);
305 	if (!req->syncookie)
306 		tcp_request_sock_ops.send_reset(sk, skb);
307 	return NULL;
308 }
309 
310 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
311 				struct tcp_fastopen_cookie *foc,
312 				enum tcp_synack_type synack_type)
313 {
314 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
315 	struct inet_request_sock *ireq = inet_rsk(req);
316 
317 	/* clear tstamp_ok, as needed depending on cookie */
318 	if (foc && foc->len > -1)
319 		ireq->tstamp_ok = 0;
320 
321 	if (synack_type == TCP_SYNACK_FASTOPEN)
322 		mptcp_fastopen_subflow_synack_set_params(subflow, req);
323 }
324 
325 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
326 				  struct flowi *fl,
327 				  struct request_sock *req,
328 				  struct tcp_fastopen_cookie *foc,
329 				  enum tcp_synack_type synack_type,
330 				  struct sk_buff *syn_skb)
331 {
332 	subflow_prep_synack(sk, req, foc, synack_type);
333 
334 	return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
335 						     synack_type, syn_skb);
336 }
337 
338 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
339 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
340 				  struct flowi *fl,
341 				  struct request_sock *req,
342 				  struct tcp_fastopen_cookie *foc,
343 				  enum tcp_synack_type synack_type,
344 				  struct sk_buff *syn_skb)
345 {
346 	subflow_prep_synack(sk, req, foc, synack_type);
347 
348 	return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
349 						     synack_type, syn_skb);
350 }
351 
352 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
353 					      struct sk_buff *skb,
354 					      struct flowi *fl,
355 					      struct request_sock *req)
356 {
357 	struct dst_entry *dst;
358 	int err;
359 
360 	tcp_rsk(req)->is_mptcp = 1;
361 	subflow_init_req(req, sk);
362 
363 	dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
364 	if (!dst)
365 		return NULL;
366 
367 	err = subflow_check_req(req, sk, skb);
368 	if (err == 0)
369 		return dst;
370 
371 	dst_release(dst);
372 	if (!req->syncookie)
373 		tcp6_request_sock_ops.send_reset(sk, skb);
374 	return NULL;
375 }
376 #endif
377 
378 /* validate received truncated hmac and create hmac for third ACK */
379 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
380 {
381 	u8 hmac[SHA256_DIGEST_SIZE];
382 	u64 thmac;
383 
384 	subflow_generate_hmac(subflow->remote_key, subflow->local_key,
385 			      subflow->remote_nonce, subflow->local_nonce,
386 			      hmac);
387 
388 	thmac = get_unaligned_be64(hmac);
389 	pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
390 		 subflow, subflow->token, thmac, subflow->thmac);
391 
392 	return thmac == subflow->thmac;
393 }
394 
395 void mptcp_subflow_reset(struct sock *ssk)
396 {
397 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
398 	struct sock *sk = subflow->conn;
399 
400 	/* mptcp_mp_fail_no_response() can reach here on an already closed
401 	 * socket
402 	 */
403 	if (ssk->sk_state == TCP_CLOSE)
404 		return;
405 
406 	/* must hold: tcp_done() could drop last reference on parent */
407 	sock_hold(sk);
408 
409 	tcp_send_active_reset(ssk, GFP_ATOMIC);
410 	tcp_done(ssk);
411 	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
412 		mptcp_schedule_work(sk);
413 
414 	sock_put(sk);
415 }
416 
417 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
418 {
419 	return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
420 }
421 
422 void __mptcp_sync_state(struct sock *sk, int state)
423 {
424 	struct mptcp_sock *msk = mptcp_sk(sk);
425 
426 	__mptcp_propagate_sndbuf(sk, msk->first);
427 	if (sk->sk_state == TCP_SYN_SENT) {
428 		inet_sk_state_store(sk, state);
429 		sk->sk_state_change(sk);
430 	}
431 }
432 
433 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk)
434 {
435 	struct mptcp_sock *msk = mptcp_sk(sk);
436 
437 	mptcp_data_lock(sk);
438 	if (!sock_owned_by_user(sk)) {
439 		__mptcp_sync_state(sk, ssk->sk_state);
440 	} else {
441 		msk->pending_state = ssk->sk_state;
442 		__set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
443 	}
444 	mptcp_data_unlock(sk);
445 }
446 
447 static void subflow_set_remote_key(struct mptcp_sock *msk,
448 				   struct mptcp_subflow_context *subflow,
449 				   const struct mptcp_options_received *mp_opt)
450 {
451 	/* active MPC subflow will reach here multiple times:
452 	 * at subflow_finish_connect() time and at 4th ack time
453 	 */
454 	if (subflow->remote_key_valid)
455 		return;
456 
457 	subflow->remote_key_valid = 1;
458 	subflow->remote_key = mp_opt->sndr_key;
459 	mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn);
460 	subflow->iasn++;
461 
462 	WRITE_ONCE(msk->remote_key, subflow->remote_key);
463 	WRITE_ONCE(msk->ack_seq, subflow->iasn);
464 	WRITE_ONCE(msk->can_ack, true);
465 	atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
466 }
467 
468 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
469 {
470 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
471 	struct mptcp_options_received mp_opt;
472 	struct sock *parent = subflow->conn;
473 	struct mptcp_sock *msk;
474 
475 	subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
476 
477 	/* be sure no special action on any packet other than syn-ack */
478 	if (subflow->conn_finished)
479 		return;
480 
481 	msk = mptcp_sk(parent);
482 	subflow->rel_write_seq = 1;
483 	subflow->conn_finished = 1;
484 	subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
485 	pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
486 
487 	mptcp_get_options(skb, &mp_opt);
488 	if (subflow->request_mptcp) {
489 		if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
490 			MPTCP_INC_STATS(sock_net(sk),
491 					MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
492 			mptcp_do_fallback(sk);
493 			pr_fallback(msk);
494 			goto fallback;
495 		}
496 
497 		if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
498 			WRITE_ONCE(msk->csum_enabled, true);
499 		if (mp_opt.deny_join_id0)
500 			WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
501 		subflow->mp_capable = 1;
502 		subflow_set_remote_key(msk, subflow, &mp_opt);
503 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
504 		mptcp_finish_connect(sk);
505 		mptcp_propagate_state(parent, sk);
506 	} else if (subflow->request_join) {
507 		u8 hmac[SHA256_DIGEST_SIZE];
508 
509 		if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
510 			subflow->reset_reason = MPTCP_RST_EMPTCP;
511 			goto do_reset;
512 		}
513 
514 		subflow->backup = mp_opt.backup;
515 		subflow->thmac = mp_opt.thmac;
516 		subflow->remote_nonce = mp_opt.nonce;
517 		subflow->remote_id = mp_opt.join_id;
518 		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
519 			 subflow, subflow->thmac, subflow->remote_nonce,
520 			 subflow->backup);
521 
522 		if (!subflow_thmac_valid(subflow)) {
523 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
524 			subflow->reset_reason = MPTCP_RST_EMPTCP;
525 			goto do_reset;
526 		}
527 
528 		if (!mptcp_finish_join(sk))
529 			goto do_reset;
530 
531 		subflow_generate_hmac(subflow->local_key, subflow->remote_key,
532 				      subflow->local_nonce,
533 				      subflow->remote_nonce,
534 				      hmac);
535 		memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
536 
537 		subflow->mp_join = 1;
538 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
539 
540 		if (subflow_use_different_dport(msk, sk)) {
541 			pr_debug("synack inet_dport=%d %d",
542 				 ntohs(inet_sk(sk)->inet_dport),
543 				 ntohs(inet_sk(parent)->inet_dport));
544 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
545 		}
546 	} else if (mptcp_check_fallback(sk)) {
547 fallback:
548 		mptcp_rcv_space_init(msk, sk);
549 		mptcp_propagate_state(parent, sk);
550 	}
551 	return;
552 
553 do_reset:
554 	subflow->reset_transient = 0;
555 	mptcp_subflow_reset(sk);
556 }
557 
558 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
559 {
560 	subflow->local_id = local_id;
561 	subflow->local_id_valid = 1;
562 }
563 
564 static int subflow_chk_local_id(struct sock *sk)
565 {
566 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
567 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
568 	int err;
569 
570 	if (likely(subflow->local_id_valid))
571 		return 0;
572 
573 	err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
574 	if (err < 0)
575 		return err;
576 
577 	subflow_set_local_id(subflow, err);
578 	return 0;
579 }
580 
581 static int subflow_rebuild_header(struct sock *sk)
582 {
583 	int err = subflow_chk_local_id(sk);
584 
585 	if (unlikely(err < 0))
586 		return err;
587 
588 	return inet_sk_rebuild_header(sk);
589 }
590 
591 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
592 static int subflow_v6_rebuild_header(struct sock *sk)
593 {
594 	int err = subflow_chk_local_id(sk);
595 
596 	if (unlikely(err < 0))
597 		return err;
598 
599 	return inet6_sk_rebuild_header(sk);
600 }
601 #endif
602 
603 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
604 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
605 
606 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
607 {
608 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
609 
610 	pr_debug("subflow=%p", subflow);
611 
612 	/* Never answer to SYNs sent to broadcast or multicast */
613 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
614 		goto drop;
615 
616 	return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
617 				&subflow_request_sock_ipv4_ops,
618 				sk, skb);
619 drop:
620 	tcp_listendrop(sk);
621 	return 0;
622 }
623 
624 static void subflow_v4_req_destructor(struct request_sock *req)
625 {
626 	subflow_req_destructor(req);
627 	tcp_request_sock_ops.destructor(req);
628 }
629 
630 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
631 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
632 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
633 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
634 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
635 static struct proto tcpv6_prot_override __ro_after_init;
636 
637 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
638 {
639 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
640 
641 	pr_debug("subflow=%p", subflow);
642 
643 	if (skb->protocol == htons(ETH_P_IP))
644 		return subflow_v4_conn_request(sk, skb);
645 
646 	if (!ipv6_unicast_destination(skb))
647 		goto drop;
648 
649 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
650 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
651 		return 0;
652 	}
653 
654 	return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
655 				&subflow_request_sock_ipv6_ops, sk, skb);
656 
657 drop:
658 	tcp_listendrop(sk);
659 	return 0; /* don't send reset */
660 }
661 
662 static void subflow_v6_req_destructor(struct request_sock *req)
663 {
664 	subflow_req_destructor(req);
665 	tcp6_request_sock_ops.destructor(req);
666 }
667 #endif
668 
669 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
670 					       struct sock *sk_listener,
671 					       bool attach_listener)
672 {
673 	if (ops->family == AF_INET)
674 		ops = &mptcp_subflow_v4_request_sock_ops;
675 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
676 	else if (ops->family == AF_INET6)
677 		ops = &mptcp_subflow_v6_request_sock_ops;
678 #endif
679 
680 	return inet_reqsk_alloc(ops, sk_listener, attach_listener);
681 }
682 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
683 
684 /* validate hmac received in third ACK */
685 static bool subflow_hmac_valid(const struct request_sock *req,
686 			       const struct mptcp_options_received *mp_opt)
687 {
688 	const struct mptcp_subflow_request_sock *subflow_req;
689 	u8 hmac[SHA256_DIGEST_SIZE];
690 	struct mptcp_sock *msk;
691 
692 	subflow_req = mptcp_subflow_rsk(req);
693 	msk = subflow_req->msk;
694 	if (!msk)
695 		return false;
696 
697 	subflow_generate_hmac(msk->remote_key, msk->local_key,
698 			      subflow_req->remote_nonce,
699 			      subflow_req->local_nonce, hmac);
700 
701 	return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
702 }
703 
704 static void subflow_ulp_fallback(struct sock *sk,
705 				 struct mptcp_subflow_context *old_ctx)
706 {
707 	struct inet_connection_sock *icsk = inet_csk(sk);
708 
709 	mptcp_subflow_tcp_fallback(sk, old_ctx);
710 	icsk->icsk_ulp_ops = NULL;
711 	rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
712 	tcp_sk(sk)->is_mptcp = 0;
713 
714 	mptcp_subflow_ops_undo_override(sk);
715 }
716 
717 void mptcp_subflow_drop_ctx(struct sock *ssk)
718 {
719 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
720 
721 	if (!ctx)
722 		return;
723 
724 	list_del(&mptcp_subflow_ctx(ssk)->node);
725 	if (inet_csk(ssk)->icsk_ulp_ops) {
726 		subflow_ulp_fallback(ssk, ctx);
727 		if (ctx->conn)
728 			sock_put(ctx->conn);
729 	}
730 
731 	kfree_rcu(ctx, rcu);
732 }
733 
734 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
735 				     const struct mptcp_options_received *mp_opt)
736 {
737 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
738 
739 	subflow_set_remote_key(msk, subflow, mp_opt);
740 	subflow->fully_established = 1;
741 	WRITE_ONCE(msk->fully_established, true);
742 
743 	if (subflow->is_mptfo)
744 		mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
745 }
746 
747 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
748 					  struct sk_buff *skb,
749 					  struct request_sock *req,
750 					  struct dst_entry *dst,
751 					  struct request_sock *req_unhash,
752 					  bool *own_req)
753 {
754 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
755 	struct mptcp_subflow_request_sock *subflow_req;
756 	struct mptcp_options_received mp_opt;
757 	bool fallback, fallback_is_fatal;
758 	struct mptcp_sock *owner;
759 	struct sock *child;
760 
761 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
762 
763 	/* After child creation we must look for MPC even when options
764 	 * are not parsed
765 	 */
766 	mp_opt.suboptions = 0;
767 
768 	/* hopefully temporary handling for MP_JOIN+syncookie */
769 	subflow_req = mptcp_subflow_rsk(req);
770 	fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
771 	fallback = !tcp_rsk(req)->is_mptcp;
772 	if (fallback)
773 		goto create_child;
774 
775 	/* if the sk is MP_CAPABLE, we try to fetch the client key */
776 	if (subflow_req->mp_capable) {
777 		/* we can receive and accept an in-window, out-of-order pkt,
778 		 * which may not carry the MP_CAPABLE opt even on mptcp enabled
779 		 * paths: always try to extract the peer key, and fallback
780 		 * for packets missing it.
781 		 * Even OoO DSS packets coming legitly after dropped or
782 		 * reordered MPC will cause fallback, but we don't have other
783 		 * options.
784 		 */
785 		mptcp_get_options(skb, &mp_opt);
786 		if (!(mp_opt.suboptions &
787 		      (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
788 			fallback = true;
789 
790 	} else if (subflow_req->mp_join) {
791 		mptcp_get_options(skb, &mp_opt);
792 		if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
793 		    !subflow_hmac_valid(req, &mp_opt) ||
794 		    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
795 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
796 			fallback = true;
797 		}
798 	}
799 
800 create_child:
801 	child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
802 						     req_unhash, own_req);
803 
804 	if (child && *own_req) {
805 		struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
806 
807 		tcp_rsk(req)->drop_req = false;
808 
809 		/* we need to fallback on ctx allocation failure and on pre-reqs
810 		 * checking above. In the latter scenario we additionally need
811 		 * to reset the context to non MPTCP status.
812 		 */
813 		if (!ctx || fallback) {
814 			if (fallback_is_fatal) {
815 				subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
816 				goto dispose_child;
817 			}
818 			goto fallback;
819 		}
820 
821 		/* ssk inherits options of listener sk */
822 		ctx->setsockopt_seq = listener->setsockopt_seq;
823 
824 		if (ctx->mp_capable) {
825 			ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req);
826 			if (!ctx->conn)
827 				goto fallback;
828 
829 			ctx->subflow_id = 1;
830 			owner = mptcp_sk(ctx->conn);
831 			mptcp_pm_new_connection(owner, child, 1);
832 
833 			/* with OoO packets we can reach here without ingress
834 			 * mpc option
835 			 */
836 			if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
837 				mptcp_subflow_fully_established(ctx, &mp_opt);
838 				mptcp_pm_fully_established(owner, child);
839 				ctx->pm_notified = 1;
840 			}
841 		} else if (ctx->mp_join) {
842 			owner = subflow_req->msk;
843 			if (!owner) {
844 				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
845 				goto dispose_child;
846 			}
847 
848 			/* move the msk reference ownership to the subflow */
849 			subflow_req->msk = NULL;
850 			ctx->conn = (struct sock *)owner;
851 
852 			if (subflow_use_different_sport(owner, sk)) {
853 				pr_debug("ack inet_sport=%d %d",
854 					 ntohs(inet_sk(sk)->inet_sport),
855 					 ntohs(inet_sk((struct sock *)owner)->inet_sport));
856 				if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
857 					SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
858 					goto dispose_child;
859 				}
860 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
861 			}
862 
863 			if (!mptcp_finish_join(child))
864 				goto dispose_child;
865 
866 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
867 			tcp_rsk(req)->drop_req = true;
868 		}
869 	}
870 
871 	/* check for expected invariant - should never trigger, just help
872 	 * catching eariler subtle bugs
873 	 */
874 	WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
875 		     (!mptcp_subflow_ctx(child) ||
876 		      !mptcp_subflow_ctx(child)->conn));
877 	return child;
878 
879 dispose_child:
880 	mptcp_subflow_drop_ctx(child);
881 	tcp_rsk(req)->drop_req = true;
882 	inet_csk_prepare_for_destroy_sock(child);
883 	tcp_done(child);
884 	req->rsk_ops->send_reset(sk, skb);
885 
886 	/* The last child reference will be released by the caller */
887 	return child;
888 
889 fallback:
890 	mptcp_subflow_drop_ctx(child);
891 	return child;
892 }
893 
894 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
895 static struct proto tcp_prot_override __ro_after_init;
896 
897 enum mapping_status {
898 	MAPPING_OK,
899 	MAPPING_INVALID,
900 	MAPPING_EMPTY,
901 	MAPPING_DATA_FIN,
902 	MAPPING_DUMMY,
903 	MAPPING_BAD_CSUM
904 };
905 
906 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
907 {
908 	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
909 		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
910 }
911 
912 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
913 {
914 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
915 	unsigned int skb_consumed;
916 
917 	skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
918 	if (WARN_ON_ONCE(skb_consumed >= skb->len))
919 		return true;
920 
921 	return skb->len - skb_consumed <= subflow->map_data_len -
922 					  mptcp_subflow_get_map_offset(subflow);
923 }
924 
925 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
926 {
927 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
928 	u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
929 
930 	if (unlikely(before(ssn, subflow->map_subflow_seq))) {
931 		/* Mapping covers data later in the subflow stream,
932 		 * currently unsupported.
933 		 */
934 		dbg_bad_map(subflow, ssn);
935 		return false;
936 	}
937 	if (unlikely(!before(ssn, subflow->map_subflow_seq +
938 				  subflow->map_data_len))) {
939 		/* Mapping does covers past subflow data, invalid */
940 		dbg_bad_map(subflow, ssn);
941 		return false;
942 	}
943 	return true;
944 }
945 
946 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
947 					      bool csum_reqd)
948 {
949 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
950 	u32 offset, seq, delta;
951 	__sum16 csum;
952 	int len;
953 
954 	if (!csum_reqd)
955 		return MAPPING_OK;
956 
957 	/* mapping already validated on previous traversal */
958 	if (subflow->map_csum_len == subflow->map_data_len)
959 		return MAPPING_OK;
960 
961 	/* traverse the receive queue, ensuring it contains a full
962 	 * DSS mapping and accumulating the related csum.
963 	 * Preserve the accoumlate csum across multiple calls, to compute
964 	 * the csum only once
965 	 */
966 	delta = subflow->map_data_len - subflow->map_csum_len;
967 	for (;;) {
968 		seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
969 		offset = seq - TCP_SKB_CB(skb)->seq;
970 
971 		/* if the current skb has not been accounted yet, csum its contents
972 		 * up to the amount covered by the current DSS
973 		 */
974 		if (offset < skb->len) {
975 			__wsum csum;
976 
977 			len = min(skb->len - offset, delta);
978 			csum = skb_checksum(skb, offset, len, 0);
979 			subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
980 								subflow->map_csum_len);
981 
982 			delta -= len;
983 			subflow->map_csum_len += len;
984 		}
985 		if (delta == 0)
986 			break;
987 
988 		if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
989 			/* if this subflow is closed, the partial mapping
990 			 * will be never completed; flush the pending skbs, so
991 			 * that subflow_sched_work_if_closed() can kick in
992 			 */
993 			if (unlikely(ssk->sk_state == TCP_CLOSE))
994 				while ((skb = skb_peek(&ssk->sk_receive_queue)))
995 					sk_eat_skb(ssk, skb);
996 
997 			/* not enough data to validate the csum */
998 			return MAPPING_EMPTY;
999 		}
1000 
1001 		/* the DSS mapping for next skbs will be validated later,
1002 		 * when a get_mapping_status call will process such skb
1003 		 */
1004 		skb = skb->next;
1005 	}
1006 
1007 	/* note that 'map_data_len' accounts only for the carried data, does
1008 	 * not include the eventual seq increment due to the data fin,
1009 	 * while the pseudo header requires the original DSS data len,
1010 	 * including that
1011 	 */
1012 	csum = __mptcp_make_csum(subflow->map_seq,
1013 				 subflow->map_subflow_seq,
1014 				 subflow->map_data_len + subflow->map_data_fin,
1015 				 subflow->map_data_csum);
1016 	if (unlikely(csum)) {
1017 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
1018 		return MAPPING_BAD_CSUM;
1019 	}
1020 
1021 	subflow->valid_csum_seen = 1;
1022 	return MAPPING_OK;
1023 }
1024 
1025 static enum mapping_status get_mapping_status(struct sock *ssk,
1026 					      struct mptcp_sock *msk)
1027 {
1028 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1029 	bool csum_reqd = READ_ONCE(msk->csum_enabled);
1030 	struct mptcp_ext *mpext;
1031 	struct sk_buff *skb;
1032 	u16 data_len;
1033 	u64 map_seq;
1034 
1035 	skb = skb_peek(&ssk->sk_receive_queue);
1036 	if (!skb)
1037 		return MAPPING_EMPTY;
1038 
1039 	if (mptcp_check_fallback(ssk))
1040 		return MAPPING_DUMMY;
1041 
1042 	mpext = mptcp_get_ext(skb);
1043 	if (!mpext || !mpext->use_map) {
1044 		if (!subflow->map_valid && !skb->len) {
1045 			/* the TCP stack deliver 0 len FIN pkt to the receive
1046 			 * queue, that is the only 0len pkts ever expected here,
1047 			 * and we can admit no mapping only for 0 len pkts
1048 			 */
1049 			if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1050 				WARN_ONCE(1, "0len seq %d:%d flags %x",
1051 					  TCP_SKB_CB(skb)->seq,
1052 					  TCP_SKB_CB(skb)->end_seq,
1053 					  TCP_SKB_CB(skb)->tcp_flags);
1054 			sk_eat_skb(ssk, skb);
1055 			return MAPPING_EMPTY;
1056 		}
1057 
1058 		if (!subflow->map_valid)
1059 			return MAPPING_INVALID;
1060 
1061 		goto validate_seq;
1062 	}
1063 
1064 	trace_get_mapping_status(mpext);
1065 
1066 	data_len = mpext->data_len;
1067 	if (data_len == 0) {
1068 		pr_debug("infinite mapping received");
1069 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1070 		subflow->map_data_len = 0;
1071 		return MAPPING_INVALID;
1072 	}
1073 
1074 	if (mpext->data_fin == 1) {
1075 		if (data_len == 1) {
1076 			bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1077 								 mpext->dsn64);
1078 			pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1079 			if (subflow->map_valid) {
1080 				/* A DATA_FIN might arrive in a DSS
1081 				 * option before the previous mapping
1082 				 * has been fully consumed. Continue
1083 				 * handling the existing mapping.
1084 				 */
1085 				skb_ext_del(skb, SKB_EXT_MPTCP);
1086 				return MAPPING_OK;
1087 			} else {
1088 				if (updated)
1089 					mptcp_schedule_work((struct sock *)msk);
1090 
1091 				return MAPPING_DATA_FIN;
1092 			}
1093 		} else {
1094 			u64 data_fin_seq = mpext->data_seq + data_len - 1;
1095 
1096 			/* If mpext->data_seq is a 32-bit value, data_fin_seq
1097 			 * must also be limited to 32 bits.
1098 			 */
1099 			if (!mpext->dsn64)
1100 				data_fin_seq &= GENMASK_ULL(31, 0);
1101 
1102 			mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1103 			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1104 				 data_fin_seq, mpext->dsn64);
1105 		}
1106 
1107 		/* Adjust for DATA_FIN using 1 byte of sequence space */
1108 		data_len--;
1109 	}
1110 
1111 	map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1112 	WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1113 
1114 	if (subflow->map_valid) {
1115 		/* Allow replacing only with an identical map */
1116 		if (subflow->map_seq == map_seq &&
1117 		    subflow->map_subflow_seq == mpext->subflow_seq &&
1118 		    subflow->map_data_len == data_len &&
1119 		    subflow->map_csum_reqd == mpext->csum_reqd) {
1120 			skb_ext_del(skb, SKB_EXT_MPTCP);
1121 			goto validate_csum;
1122 		}
1123 
1124 		/* If this skb data are fully covered by the current mapping,
1125 		 * the new map would need caching, which is not supported
1126 		 */
1127 		if (skb_is_fully_mapped(ssk, skb)) {
1128 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1129 			return MAPPING_INVALID;
1130 		}
1131 
1132 		/* will validate the next map after consuming the current one */
1133 		goto validate_csum;
1134 	}
1135 
1136 	subflow->map_seq = map_seq;
1137 	subflow->map_subflow_seq = mpext->subflow_seq;
1138 	subflow->map_data_len = data_len;
1139 	subflow->map_valid = 1;
1140 	subflow->map_data_fin = mpext->data_fin;
1141 	subflow->mpc_map = mpext->mpc_map;
1142 	subflow->map_csum_reqd = mpext->csum_reqd;
1143 	subflow->map_csum_len = 0;
1144 	subflow->map_data_csum = csum_unfold(mpext->csum);
1145 
1146 	/* Cfr RFC 8684 Section 3.3.0 */
1147 	if (unlikely(subflow->map_csum_reqd != csum_reqd))
1148 		return MAPPING_INVALID;
1149 
1150 	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1151 		 subflow->map_seq, subflow->map_subflow_seq,
1152 		 subflow->map_data_len, subflow->map_csum_reqd,
1153 		 subflow->map_data_csum);
1154 
1155 validate_seq:
1156 	/* we revalidate valid mapping on new skb, because we must ensure
1157 	 * the current skb is completely covered by the available mapping
1158 	 */
1159 	if (!validate_mapping(ssk, skb)) {
1160 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1161 		return MAPPING_INVALID;
1162 	}
1163 
1164 	skb_ext_del(skb, SKB_EXT_MPTCP);
1165 
1166 validate_csum:
1167 	return validate_data_csum(ssk, skb, csum_reqd);
1168 }
1169 
1170 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1171 				       u64 limit)
1172 {
1173 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1174 	bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1175 	u32 incr;
1176 
1177 	incr = limit >= skb->len ? skb->len + fin : limit;
1178 
1179 	pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1180 		 subflow->map_subflow_seq);
1181 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1182 	tcp_sk(ssk)->copied_seq += incr;
1183 	if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1184 		sk_eat_skb(ssk, skb);
1185 	if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1186 		subflow->map_valid = 0;
1187 }
1188 
1189 /* sched mptcp worker to remove the subflow if no more data is pending */
1190 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1191 {
1192 	if (likely(ssk->sk_state != TCP_CLOSE))
1193 		return;
1194 
1195 	if (skb_queue_empty(&ssk->sk_receive_queue) &&
1196 	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1197 		mptcp_schedule_work((struct sock *)msk);
1198 }
1199 
1200 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1201 {
1202 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1203 
1204 	if (subflow->mp_join)
1205 		return false;
1206 	else if (READ_ONCE(msk->csum_enabled))
1207 		return !subflow->valid_csum_seen;
1208 	else
1209 		return !subflow->fully_established;
1210 }
1211 
1212 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1213 {
1214 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1215 	unsigned long fail_tout;
1216 
1217 	/* greceful failure can happen only on the MPC subflow */
1218 	if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1219 		return;
1220 
1221 	/* since the close timeout take precedence on the fail one,
1222 	 * no need to start the latter when the first is already set
1223 	 */
1224 	if (sock_flag((struct sock *)msk, SOCK_DEAD))
1225 		return;
1226 
1227 	/* we don't need extreme accuracy here, use a zero fail_tout as special
1228 	 * value meaning no fail timeout at all;
1229 	 */
1230 	fail_tout = jiffies + TCP_RTO_MAX;
1231 	if (!fail_tout)
1232 		fail_tout = 1;
1233 	WRITE_ONCE(subflow->fail_tout, fail_tout);
1234 	tcp_send_ack(ssk);
1235 
1236 	mptcp_reset_tout_timer(msk, subflow->fail_tout);
1237 }
1238 
1239 static bool subflow_check_data_avail(struct sock *ssk)
1240 {
1241 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1242 	enum mapping_status status;
1243 	struct mptcp_sock *msk;
1244 	struct sk_buff *skb;
1245 
1246 	if (!skb_peek(&ssk->sk_receive_queue))
1247 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1248 	if (subflow->data_avail)
1249 		return true;
1250 
1251 	msk = mptcp_sk(subflow->conn);
1252 	for (;;) {
1253 		u64 ack_seq;
1254 		u64 old_ack;
1255 
1256 		status = get_mapping_status(ssk, msk);
1257 		trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1258 		if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1259 			     status == MAPPING_BAD_CSUM))
1260 			goto fallback;
1261 
1262 		if (status != MAPPING_OK)
1263 			goto no_data;
1264 
1265 		skb = skb_peek(&ssk->sk_receive_queue);
1266 		if (WARN_ON_ONCE(!skb))
1267 			goto no_data;
1268 
1269 		if (unlikely(!READ_ONCE(msk->can_ack)))
1270 			goto fallback;
1271 
1272 		old_ack = READ_ONCE(msk->ack_seq);
1273 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1274 		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1275 			 ack_seq);
1276 		if (unlikely(before64(ack_seq, old_ack))) {
1277 			mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1278 			continue;
1279 		}
1280 
1281 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1282 		break;
1283 	}
1284 	return true;
1285 
1286 no_data:
1287 	subflow_sched_work_if_closed(msk, ssk);
1288 	return false;
1289 
1290 fallback:
1291 	if (!__mptcp_check_fallback(msk)) {
1292 		/* RFC 8684 section 3.7. */
1293 		if (status == MAPPING_BAD_CSUM &&
1294 		    (subflow->mp_join || subflow->valid_csum_seen)) {
1295 			subflow->send_mp_fail = 1;
1296 
1297 			if (!READ_ONCE(msk->allow_infinite_fallback)) {
1298 				subflow->reset_transient = 0;
1299 				subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1300 				goto reset;
1301 			}
1302 			mptcp_subflow_fail(msk, ssk);
1303 			WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1304 			return true;
1305 		}
1306 
1307 		if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1308 			/* fatal protocol error, close the socket.
1309 			 * subflow_error_report() will introduce the appropriate barriers
1310 			 */
1311 			subflow->reset_transient = 0;
1312 			subflow->reset_reason = MPTCP_RST_EMPTCP;
1313 
1314 reset:
1315 			WRITE_ONCE(ssk->sk_err, EBADMSG);
1316 			tcp_set_state(ssk, TCP_CLOSE);
1317 			while ((skb = skb_peek(&ssk->sk_receive_queue)))
1318 				sk_eat_skb(ssk, skb);
1319 			tcp_send_active_reset(ssk, GFP_ATOMIC);
1320 			WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1321 			return false;
1322 		}
1323 
1324 		mptcp_do_fallback(ssk);
1325 	}
1326 
1327 	skb = skb_peek(&ssk->sk_receive_queue);
1328 	subflow->map_valid = 1;
1329 	subflow->map_seq = READ_ONCE(msk->ack_seq);
1330 	subflow->map_data_len = skb->len;
1331 	subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1332 	WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1333 	return true;
1334 }
1335 
1336 bool mptcp_subflow_data_available(struct sock *sk)
1337 {
1338 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1339 
1340 	/* check if current mapping is still valid */
1341 	if (subflow->map_valid &&
1342 	    mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1343 		subflow->map_valid = 0;
1344 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1345 
1346 		pr_debug("Done with mapping: seq=%u data_len=%u",
1347 			 subflow->map_subflow_seq,
1348 			 subflow->map_data_len);
1349 	}
1350 
1351 	return subflow_check_data_avail(sk);
1352 }
1353 
1354 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1355  * not the ssk one.
1356  *
1357  * In mptcp, rwin is about the mptcp-level connection data.
1358  *
1359  * Data that is still on the ssk rx queue can thus be ignored,
1360  * as far as mptcp peer is concerned that data is still inflight.
1361  * DSS ACK is updated when skb is moved to the mptcp rx queue.
1362  */
1363 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1364 {
1365 	const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1366 	const struct sock *sk = subflow->conn;
1367 
1368 	*space = __mptcp_space(sk);
1369 	*full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1370 }
1371 
1372 static void subflow_error_report(struct sock *ssk)
1373 {
1374 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1375 
1376 	/* bail early if this is a no-op, so that we avoid introducing a
1377 	 * problematic lockdep dependency between TCP accept queue lock
1378 	 * and msk socket spinlock
1379 	 */
1380 	if (!sk->sk_socket)
1381 		return;
1382 
1383 	mptcp_data_lock(sk);
1384 	if (!sock_owned_by_user(sk))
1385 		__mptcp_error_report(sk);
1386 	else
1387 		__set_bit(MPTCP_ERROR_REPORT,  &mptcp_sk(sk)->cb_flags);
1388 	mptcp_data_unlock(sk);
1389 }
1390 
1391 static void subflow_data_ready(struct sock *sk)
1392 {
1393 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1394 	u16 state = 1 << inet_sk_state_load(sk);
1395 	struct sock *parent = subflow->conn;
1396 	struct mptcp_sock *msk;
1397 
1398 	trace_sk_data_ready(sk);
1399 
1400 	msk = mptcp_sk(parent);
1401 	if (state & TCPF_LISTEN) {
1402 		/* MPJ subflow are removed from accept queue before reaching here,
1403 		 * avoid stray wakeups
1404 		 */
1405 		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1406 			return;
1407 
1408 		parent->sk_data_ready(parent);
1409 		return;
1410 	}
1411 
1412 	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1413 		     !subflow->mp_join && !(state & TCPF_CLOSE));
1414 
1415 	if (mptcp_subflow_data_available(sk))
1416 		mptcp_data_ready(parent, sk);
1417 	else if (unlikely(sk->sk_err))
1418 		subflow_error_report(sk);
1419 }
1420 
1421 static void subflow_write_space(struct sock *ssk)
1422 {
1423 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1424 
1425 	mptcp_propagate_sndbuf(sk, ssk);
1426 	mptcp_write_space(sk);
1427 }
1428 
1429 static const struct inet_connection_sock_af_ops *
1430 subflow_default_af_ops(struct sock *sk)
1431 {
1432 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1433 	if (sk->sk_family == AF_INET6)
1434 		return &subflow_v6_specific;
1435 #endif
1436 	return &subflow_specific;
1437 }
1438 
1439 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1440 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1441 {
1442 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1443 	struct inet_connection_sock *icsk = inet_csk(sk);
1444 	const struct inet_connection_sock_af_ops *target;
1445 
1446 	target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1447 
1448 	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1449 		 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1450 
1451 	if (likely(icsk->icsk_af_ops == target))
1452 		return;
1453 
1454 	subflow->icsk_af_ops = icsk->icsk_af_ops;
1455 	icsk->icsk_af_ops = target;
1456 }
1457 #endif
1458 
1459 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1460 			 struct sockaddr_storage *addr,
1461 			 unsigned short family)
1462 {
1463 	memset(addr, 0, sizeof(*addr));
1464 	addr->ss_family = family;
1465 	if (addr->ss_family == AF_INET) {
1466 		struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1467 
1468 		if (info->family == AF_INET)
1469 			in_addr->sin_addr = info->addr;
1470 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1471 		else if (ipv6_addr_v4mapped(&info->addr6))
1472 			in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1473 #endif
1474 		in_addr->sin_port = info->port;
1475 	}
1476 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1477 	else if (addr->ss_family == AF_INET6) {
1478 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1479 
1480 		if (info->family == AF_INET)
1481 			ipv6_addr_set_v4mapped(info->addr.s_addr,
1482 					       &in6_addr->sin6_addr);
1483 		else
1484 			in6_addr->sin6_addr = info->addr6;
1485 		in6_addr->sin6_port = info->port;
1486 	}
1487 #endif
1488 }
1489 
1490 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1491 			    const struct mptcp_addr_info *remote)
1492 {
1493 	struct mptcp_sock *msk = mptcp_sk(sk);
1494 	struct mptcp_subflow_context *subflow;
1495 	struct sockaddr_storage addr;
1496 	int remote_id = remote->id;
1497 	int local_id = loc->id;
1498 	int err = -ENOTCONN;
1499 	struct socket *sf;
1500 	struct sock *ssk;
1501 	u32 remote_token;
1502 	int addrlen;
1503 	int ifindex;
1504 	u8 flags;
1505 
1506 	if (!mptcp_is_fully_established(sk))
1507 		goto err_out;
1508 
1509 	err = mptcp_subflow_create_socket(sk, loc->family, &sf);
1510 	if (err)
1511 		goto err_out;
1512 
1513 	ssk = sf->sk;
1514 	subflow = mptcp_subflow_ctx(ssk);
1515 	do {
1516 		get_random_bytes(&subflow->local_nonce, sizeof(u32));
1517 	} while (!subflow->local_nonce);
1518 
1519 	if (local_id)
1520 		subflow_set_local_id(subflow, local_id);
1521 
1522 	mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1523 					     &flags, &ifindex);
1524 	subflow->remote_key_valid = 1;
1525 	subflow->remote_key = msk->remote_key;
1526 	subflow->local_key = msk->local_key;
1527 	subflow->token = msk->token;
1528 	mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1529 
1530 	addrlen = sizeof(struct sockaddr_in);
1531 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1532 	if (addr.ss_family == AF_INET6)
1533 		addrlen = sizeof(struct sockaddr_in6);
1534 #endif
1535 	mptcp_sockopt_sync(msk, ssk);
1536 
1537 	ssk->sk_bound_dev_if = ifindex;
1538 	err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1539 	if (err)
1540 		goto failed;
1541 
1542 	mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1543 	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1544 		 remote_token, local_id, remote_id);
1545 	subflow->remote_token = remote_token;
1546 	subflow->remote_id = remote_id;
1547 	subflow->request_join = 1;
1548 	subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1549 	subflow->subflow_id = msk->subflow_id++;
1550 	mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1551 
1552 	sock_hold(ssk);
1553 	list_add_tail(&subflow->node, &msk->conn_list);
1554 	err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1555 	if (err && err != -EINPROGRESS)
1556 		goto failed_unlink;
1557 
1558 	/* discard the subflow socket */
1559 	mptcp_sock_graft(ssk, sk->sk_socket);
1560 	iput(SOCK_INODE(sf));
1561 	WRITE_ONCE(msk->allow_infinite_fallback, false);
1562 	mptcp_stop_tout_timer(sk);
1563 	return 0;
1564 
1565 failed_unlink:
1566 	list_del(&subflow->node);
1567 	sock_put(mptcp_subflow_tcp_sock(subflow));
1568 
1569 failed:
1570 	subflow->disposable = 1;
1571 	sock_release(sf);
1572 
1573 err_out:
1574 	/* we account subflows before the creation, and this failures will not
1575 	 * be caught by sk_state_change()
1576 	 */
1577 	mptcp_pm_close_subflow(msk);
1578 	return err;
1579 }
1580 
1581 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1582 {
1583 #ifdef CONFIG_SOCK_CGROUP_DATA
1584 	struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1585 				*child_skcd = &child->sk_cgrp_data;
1586 
1587 	/* only the additional subflows created by kworkers have to be modified */
1588 	if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1589 	    cgroup_id(sock_cgroup_ptr(child_skcd))) {
1590 #ifdef CONFIG_MEMCG
1591 		struct mem_cgroup *memcg = parent->sk_memcg;
1592 
1593 		mem_cgroup_sk_free(child);
1594 		if (memcg && css_tryget(&memcg->css))
1595 			child->sk_memcg = memcg;
1596 #endif /* CONFIG_MEMCG */
1597 
1598 		cgroup_sk_free(child_skcd);
1599 		*child_skcd = *parent_skcd;
1600 		cgroup_sk_clone(child_skcd);
1601 	}
1602 #endif /* CONFIG_SOCK_CGROUP_DATA */
1603 }
1604 
1605 static void mptcp_subflow_ops_override(struct sock *ssk)
1606 {
1607 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1608 	if (ssk->sk_prot == &tcpv6_prot)
1609 		ssk->sk_prot = &tcpv6_prot_override;
1610 	else
1611 #endif
1612 		ssk->sk_prot = &tcp_prot_override;
1613 }
1614 
1615 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1616 {
1617 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1618 	if (ssk->sk_prot == &tcpv6_prot_override)
1619 		ssk->sk_prot = &tcpv6_prot;
1620 	else
1621 #endif
1622 		ssk->sk_prot = &tcp_prot;
1623 }
1624 
1625 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
1626 				struct socket **new_sock)
1627 {
1628 	struct mptcp_subflow_context *subflow;
1629 	struct net *net = sock_net(sk);
1630 	struct socket *sf;
1631 	int err;
1632 
1633 	/* un-accepted server sockets can reach here - on bad configuration
1634 	 * bail early to avoid greater trouble later
1635 	 */
1636 	if (unlikely(!sk->sk_socket))
1637 		return -EINVAL;
1638 
1639 	err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
1640 	if (err)
1641 		return err;
1642 
1643 	lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
1644 
1645 	err = security_mptcp_add_subflow(sk, sf->sk);
1646 	if (err)
1647 		goto release_ssk;
1648 
1649 	/* the newly created socket has to be in the same cgroup as its parent */
1650 	mptcp_attach_cgroup(sk, sf->sk);
1651 
1652 	/* kernel sockets do not by default acquire net ref, but TCP timer
1653 	 * needs it.
1654 	 * Update ns_tracker to current stack trace and refcounted tracker.
1655 	 */
1656 	__netns_tracker_free(net, &sf->sk->ns_tracker, false);
1657 	sf->sk->sk_net_refcnt = 1;
1658 	get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1659 	sock_inuse_add(net, 1);
1660 	err = tcp_set_ulp(sf->sk, "mptcp");
1661 
1662 release_ssk:
1663 	release_sock(sf->sk);
1664 
1665 	if (err) {
1666 		sock_release(sf);
1667 		return err;
1668 	}
1669 
1670 	/* the newly created socket really belongs to the owning MPTCP master
1671 	 * socket, even if for additional subflows the allocation is performed
1672 	 * by a kernel workqueue. Adjust inode references, so that the
1673 	 * procfs/diag interfaces really show this one belonging to the correct
1674 	 * user.
1675 	 */
1676 	SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1677 	SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1678 	SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1679 
1680 	subflow = mptcp_subflow_ctx(sf->sk);
1681 	pr_debug("subflow=%p", subflow);
1682 
1683 	*new_sock = sf;
1684 	sock_hold(sk);
1685 	subflow->conn = sk;
1686 	mptcp_subflow_ops_override(sf->sk);
1687 
1688 	return 0;
1689 }
1690 
1691 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1692 							gfp_t priority)
1693 {
1694 	struct inet_connection_sock *icsk = inet_csk(sk);
1695 	struct mptcp_subflow_context *ctx;
1696 
1697 	ctx = kzalloc(sizeof(*ctx), priority);
1698 	if (!ctx)
1699 		return NULL;
1700 
1701 	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1702 	INIT_LIST_HEAD(&ctx->node);
1703 	INIT_LIST_HEAD(&ctx->delegated_node);
1704 
1705 	pr_debug("subflow=%p", ctx);
1706 
1707 	ctx->tcp_sock = sk;
1708 
1709 	return ctx;
1710 }
1711 
1712 static void __subflow_state_change(struct sock *sk)
1713 {
1714 	struct socket_wq *wq;
1715 
1716 	rcu_read_lock();
1717 	wq = rcu_dereference(sk->sk_wq);
1718 	if (skwq_has_sleeper(wq))
1719 		wake_up_interruptible_all(&wq->wait);
1720 	rcu_read_unlock();
1721 }
1722 
1723 static bool subflow_is_done(const struct sock *sk)
1724 {
1725 	return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1726 }
1727 
1728 static void subflow_state_change(struct sock *sk)
1729 {
1730 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1731 	struct sock *parent = subflow->conn;
1732 	struct mptcp_sock *msk;
1733 
1734 	__subflow_state_change(sk);
1735 
1736 	msk = mptcp_sk(parent);
1737 	if (subflow_simultaneous_connect(sk)) {
1738 		mptcp_do_fallback(sk);
1739 		mptcp_rcv_space_init(msk, sk);
1740 		pr_fallback(msk);
1741 		subflow->conn_finished = 1;
1742 		mptcp_propagate_state(parent, sk);
1743 	}
1744 
1745 	/* as recvmsg() does not acquire the subflow socket for ssk selection
1746 	 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1747 	 * the data available machinery here.
1748 	 */
1749 	if (mptcp_subflow_data_available(sk))
1750 		mptcp_data_ready(parent, sk);
1751 	else if (unlikely(sk->sk_err))
1752 		subflow_error_report(sk);
1753 
1754 	subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1755 
1756 	/* when the fallback subflow closes the rx side, trigger a 'dummy'
1757 	 * ingress data fin, so that the msk state will follow along
1758 	 */
1759 	if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1760 	    mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1761 		mptcp_schedule_work(parent);
1762 }
1763 
1764 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
1765 {
1766 	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
1767 	struct request_sock *req, *head, *tail;
1768 	struct mptcp_subflow_context *subflow;
1769 	struct sock *sk, *ssk;
1770 
1771 	/* Due to lock dependencies no relevant lock can be acquired under rskq_lock.
1772 	 * Splice the req list, so that accept() can not reach the pending ssk after
1773 	 * the listener socket is released below.
1774 	 */
1775 	spin_lock_bh(&queue->rskq_lock);
1776 	head = queue->rskq_accept_head;
1777 	tail = queue->rskq_accept_tail;
1778 	queue->rskq_accept_head = NULL;
1779 	queue->rskq_accept_tail = NULL;
1780 	spin_unlock_bh(&queue->rskq_lock);
1781 
1782 	if (!head)
1783 		return;
1784 
1785 	/* can't acquire the msk socket lock under the subflow one,
1786 	 * or will cause ABBA deadlock
1787 	 */
1788 	release_sock(listener_ssk);
1789 
1790 	for (req = head; req; req = req->dl_next) {
1791 		ssk = req->sk;
1792 		if (!sk_is_mptcp(ssk))
1793 			continue;
1794 
1795 		subflow = mptcp_subflow_ctx(ssk);
1796 		if (!subflow || !subflow->conn)
1797 			continue;
1798 
1799 		sk = subflow->conn;
1800 		sock_hold(sk);
1801 
1802 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1803 		__mptcp_unaccepted_force_close(sk);
1804 		release_sock(sk);
1805 
1806 		/* lockdep will report a false positive ABBA deadlock
1807 		 * between cancel_work_sync and the listener socket.
1808 		 * The involved locks belong to different sockets WRT
1809 		 * the existing AB chain.
1810 		 * Using a per socket key is problematic as key
1811 		 * deregistration requires process context and must be
1812 		 * performed at socket disposal time, in atomic
1813 		 * context.
1814 		 * Just tell lockdep to consider the listener socket
1815 		 * released here.
1816 		 */
1817 		mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
1818 		mptcp_cancel_work(sk);
1819 		mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1820 
1821 		sock_put(sk);
1822 	}
1823 
1824 	/* we are still under the listener msk socket lock */
1825 	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
1826 
1827 	/* restore the listener queue, to let the TCP code clean it up */
1828 	spin_lock_bh(&queue->rskq_lock);
1829 	WARN_ON_ONCE(queue->rskq_accept_head);
1830 	queue->rskq_accept_head = head;
1831 	queue->rskq_accept_tail = tail;
1832 	spin_unlock_bh(&queue->rskq_lock);
1833 }
1834 
1835 static int subflow_ulp_init(struct sock *sk)
1836 {
1837 	struct inet_connection_sock *icsk = inet_csk(sk);
1838 	struct mptcp_subflow_context *ctx;
1839 	struct tcp_sock *tp = tcp_sk(sk);
1840 	int err = 0;
1841 
1842 	/* disallow attaching ULP to a socket unless it has been
1843 	 * created with sock_create_kern()
1844 	 */
1845 	if (!sk->sk_kern_sock) {
1846 		err = -EOPNOTSUPP;
1847 		goto out;
1848 	}
1849 
1850 	ctx = subflow_create_ctx(sk, GFP_KERNEL);
1851 	if (!ctx) {
1852 		err = -ENOMEM;
1853 		goto out;
1854 	}
1855 
1856 	pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1857 
1858 	tp->is_mptcp = 1;
1859 	ctx->icsk_af_ops = icsk->icsk_af_ops;
1860 	icsk->icsk_af_ops = subflow_default_af_ops(sk);
1861 	ctx->tcp_state_change = sk->sk_state_change;
1862 	ctx->tcp_error_report = sk->sk_error_report;
1863 
1864 	WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1865 	WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1866 
1867 	sk->sk_data_ready = subflow_data_ready;
1868 	sk->sk_write_space = subflow_write_space;
1869 	sk->sk_state_change = subflow_state_change;
1870 	sk->sk_error_report = subflow_error_report;
1871 out:
1872 	return err;
1873 }
1874 
1875 static void subflow_ulp_release(struct sock *ssk)
1876 {
1877 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1878 	bool release = true;
1879 	struct sock *sk;
1880 
1881 	if (!ctx)
1882 		return;
1883 
1884 	sk = ctx->conn;
1885 	if (sk) {
1886 		/* if the msk has been orphaned, keep the ctx
1887 		 * alive, will be freed by __mptcp_close_ssk(),
1888 		 * when the subflow is still unaccepted
1889 		 */
1890 		release = ctx->disposable || list_empty(&ctx->node);
1891 
1892 		/* inet_child_forget() does not call sk_state_change(),
1893 		 * explicitly trigger the socket close machinery
1894 		 */
1895 		if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
1896 						  &mptcp_sk(sk)->flags))
1897 			mptcp_schedule_work(sk);
1898 		sock_put(sk);
1899 	}
1900 
1901 	mptcp_subflow_ops_undo_override(ssk);
1902 	if (release)
1903 		kfree_rcu(ctx, rcu);
1904 }
1905 
1906 static void subflow_ulp_clone(const struct request_sock *req,
1907 			      struct sock *newsk,
1908 			      const gfp_t priority)
1909 {
1910 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1911 	struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1912 	struct mptcp_subflow_context *new_ctx;
1913 
1914 	if (!tcp_rsk(req)->is_mptcp ||
1915 	    (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1916 		subflow_ulp_fallback(newsk, old_ctx);
1917 		return;
1918 	}
1919 
1920 	new_ctx = subflow_create_ctx(newsk, priority);
1921 	if (!new_ctx) {
1922 		subflow_ulp_fallback(newsk, old_ctx);
1923 		return;
1924 	}
1925 
1926 	new_ctx->conn_finished = 1;
1927 	new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1928 	new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1929 	new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1930 	new_ctx->rel_write_seq = 1;
1931 	new_ctx->tcp_sock = newsk;
1932 
1933 	if (subflow_req->mp_capable) {
1934 		/* see comments in subflow_syn_recv_sock(), MPTCP connection
1935 		 * is fully established only after we receive the remote key
1936 		 */
1937 		new_ctx->mp_capable = 1;
1938 		new_ctx->local_key = subflow_req->local_key;
1939 		new_ctx->token = subflow_req->token;
1940 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1941 		new_ctx->idsn = subflow_req->idsn;
1942 
1943 		/* this is the first subflow, id is always 0 */
1944 		new_ctx->local_id_valid = 1;
1945 	} else if (subflow_req->mp_join) {
1946 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1947 		new_ctx->mp_join = 1;
1948 		new_ctx->fully_established = 1;
1949 		new_ctx->remote_key_valid = 1;
1950 		new_ctx->backup = subflow_req->backup;
1951 		new_ctx->remote_id = subflow_req->remote_id;
1952 		new_ctx->token = subflow_req->token;
1953 		new_ctx->thmac = subflow_req->thmac;
1954 
1955 		/* the subflow req id is valid, fetched via subflow_check_req()
1956 		 * and subflow_token_join_request()
1957 		 */
1958 		subflow_set_local_id(new_ctx, subflow_req->local_id);
1959 	}
1960 }
1961 
1962 static void tcp_release_cb_override(struct sock *ssk)
1963 {
1964 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1965 	long status;
1966 
1967 	/* process and clear all the pending actions, but leave the subflow into
1968 	 * the napi queue. To respect locking, only the same CPU that originated
1969 	 * the action can touch the list. mptcp_napi_poll will take care of it.
1970 	 */
1971 	status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
1972 	if (status)
1973 		mptcp_subflow_process_delegated(ssk, status);
1974 
1975 	tcp_release_cb(ssk);
1976 }
1977 
1978 static int tcp_abort_override(struct sock *ssk, int err)
1979 {
1980 	/* closing a listener subflow requires a great deal of care.
1981 	 * keep it simple and just prevent such operation
1982 	 */
1983 	if (inet_sk_state_load(ssk) == TCP_LISTEN)
1984 		return -EINVAL;
1985 
1986 	return tcp_abort(ssk, err);
1987 }
1988 
1989 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1990 	.name		= "mptcp",
1991 	.owner		= THIS_MODULE,
1992 	.init		= subflow_ulp_init,
1993 	.release	= subflow_ulp_release,
1994 	.clone		= subflow_ulp_clone,
1995 };
1996 
1997 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1998 {
1999 	subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
2000 
2001 	subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
2002 					      subflow_ops->obj_size, 0,
2003 					      SLAB_ACCOUNT |
2004 					      SLAB_TYPESAFE_BY_RCU,
2005 					      NULL);
2006 	if (!subflow_ops->slab)
2007 		return -ENOMEM;
2008 
2009 	return 0;
2010 }
2011 
2012 void __init mptcp_subflow_init(void)
2013 {
2014 	mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
2015 	mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
2016 	mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
2017 
2018 	if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
2019 		panic("MPTCP: failed to init subflow v4 request sock ops\n");
2020 
2021 	subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
2022 	subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
2023 	subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
2024 
2025 	subflow_specific = ipv4_specific;
2026 	subflow_specific.conn_request = subflow_v4_conn_request;
2027 	subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
2028 	subflow_specific.sk_rx_dst_set = subflow_finish_connect;
2029 	subflow_specific.rebuild_header = subflow_rebuild_header;
2030 
2031 	tcp_prot_override = tcp_prot;
2032 	tcp_prot_override.release_cb = tcp_release_cb_override;
2033 	tcp_prot_override.diag_destroy = tcp_abort_override;
2034 
2035 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2036 	/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
2037 	 * structures for v4 and v6 have the same size. It should not changed in
2038 	 * the future but better to make sure to be warned if it is no longer
2039 	 * the case.
2040 	 */
2041 	BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
2042 
2043 	mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
2044 	mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
2045 	mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
2046 
2047 	if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
2048 		panic("MPTCP: failed to init subflow v6 request sock ops\n");
2049 
2050 	subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
2051 	subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
2052 	subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
2053 
2054 	subflow_v6_specific = ipv6_specific;
2055 	subflow_v6_specific.conn_request = subflow_v6_conn_request;
2056 	subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
2057 	subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
2058 	subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
2059 
2060 	subflow_v6m_specific = subflow_v6_specific;
2061 	subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
2062 	subflow_v6m_specific.send_check = ipv4_specific.send_check;
2063 	subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
2064 	subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
2065 	subflow_v6m_specific.net_frag_header_len = 0;
2066 	subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
2067 
2068 	tcpv6_prot_override = tcpv6_prot;
2069 	tcpv6_prot_override.release_cb = tcp_release_cb_override;
2070 	tcpv6_prot_override.diag_destroy = tcp_abort_override;
2071 #endif
2072 
2073 	mptcp_diag_subflow_init(&subflow_ulp_ops);
2074 
2075 	if (tcp_register_ulp(&subflow_ulp_ops) != 0)
2076 		panic("MPTCP: failed to register subflows to ULP\n");
2077 }
2078