xref: /openbmc/linux/net/mptcp/subflow.c (revision b03afaa8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #endif
22 #include <net/mptcp.h>
23 #include "protocol.h"
24 #include "mib.h"
25 
26 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
27 				  enum linux_mptcp_mib_field field)
28 {
29 	MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
30 }
31 
32 static void subflow_req_destructor(struct request_sock *req)
33 {
34 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
35 
36 	pr_debug("subflow_req=%p", subflow_req);
37 
38 	if (subflow_req->msk)
39 		sock_put((struct sock *)subflow_req->msk);
40 
41 	mptcp_token_destroy_request(req);
42 	tcp_request_sock_ops.destructor(req);
43 }
44 
45 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
46 				  void *hmac)
47 {
48 	u8 msg[8];
49 
50 	put_unaligned_be32(nonce1, &msg[0]);
51 	put_unaligned_be32(nonce2, &msg[4]);
52 
53 	mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
54 }
55 
56 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
57 {
58 	return mptcp_is_fully_established((void *)msk) &&
59 	       READ_ONCE(msk->pm.accept_subflow);
60 }
61 
62 /* validate received token and create truncated hmac and nonce for SYN-ACK */
63 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
64 						     const struct sk_buff *skb)
65 {
66 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
67 	u8 hmac[SHA256_DIGEST_SIZE];
68 	struct mptcp_sock *msk;
69 	int local_id;
70 
71 	msk = mptcp_token_get_sock(subflow_req->token);
72 	if (!msk) {
73 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
74 		return NULL;
75 	}
76 
77 	local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
78 	if (local_id < 0) {
79 		sock_put((struct sock *)msk);
80 		return NULL;
81 	}
82 	subflow_req->local_id = local_id;
83 
84 	get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
85 
86 	subflow_generate_hmac(msk->local_key, msk->remote_key,
87 			      subflow_req->local_nonce,
88 			      subflow_req->remote_nonce, hmac);
89 
90 	subflow_req->thmac = get_unaligned_be64(hmac);
91 	return msk;
92 }
93 
94 static void subflow_init_req(struct request_sock *req,
95 			     const struct sock *sk_listener,
96 			     struct sk_buff *skb)
97 {
98 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
99 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
100 	struct mptcp_options_received mp_opt;
101 
102 	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
103 
104 	mptcp_get_options(skb, &mp_opt);
105 
106 	subflow_req->mp_capable = 0;
107 	subflow_req->mp_join = 0;
108 	subflow_req->msk = NULL;
109 	mptcp_token_init_request(req);
110 
111 #ifdef CONFIG_TCP_MD5SIG
112 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
113 	 * TCP option space.
114 	 */
115 	if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
116 		return;
117 #endif
118 
119 	if (mp_opt.mp_capable) {
120 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
121 
122 		if (mp_opt.mp_join)
123 			return;
124 	} else if (mp_opt.mp_join) {
125 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
126 	}
127 
128 	if (mp_opt.mp_capable && listener->request_mptcp) {
129 		int err;
130 
131 		err = mptcp_token_new_request(req);
132 		if (err == 0)
133 			subflow_req->mp_capable = 1;
134 
135 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
136 	} else if (mp_opt.mp_join && listener->request_mptcp) {
137 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
138 		subflow_req->mp_join = 1;
139 		subflow_req->backup = mp_opt.backup;
140 		subflow_req->remote_id = mp_opt.join_id;
141 		subflow_req->token = mp_opt.token;
142 		subflow_req->remote_nonce = mp_opt.nonce;
143 		subflow_req->msk = subflow_token_join_request(req, skb);
144 		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
145 			 subflow_req->remote_nonce, subflow_req->msk);
146 	}
147 }
148 
149 static void subflow_v4_init_req(struct request_sock *req,
150 				const struct sock *sk_listener,
151 				struct sk_buff *skb)
152 {
153 	tcp_rsk(req)->is_mptcp = 1;
154 
155 	tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
156 
157 	subflow_init_req(req, sk_listener, skb);
158 }
159 
160 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
161 static void subflow_v6_init_req(struct request_sock *req,
162 				const struct sock *sk_listener,
163 				struct sk_buff *skb)
164 {
165 	tcp_rsk(req)->is_mptcp = 1;
166 
167 	tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
168 
169 	subflow_init_req(req, sk_listener, skb);
170 }
171 #endif
172 
173 /* validate received truncated hmac and create hmac for third ACK */
174 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
175 {
176 	u8 hmac[SHA256_DIGEST_SIZE];
177 	u64 thmac;
178 
179 	subflow_generate_hmac(subflow->remote_key, subflow->local_key,
180 			      subflow->remote_nonce, subflow->local_nonce,
181 			      hmac);
182 
183 	thmac = get_unaligned_be64(hmac);
184 	pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
185 		 subflow, subflow->token,
186 		 (unsigned long long)thmac,
187 		 (unsigned long long)subflow->thmac);
188 
189 	return thmac == subflow->thmac;
190 }
191 
192 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
193 {
194 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
195 	struct mptcp_options_received mp_opt;
196 	struct sock *parent = subflow->conn;
197 
198 	subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
199 
200 	if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
201 		inet_sk_state_store(parent, TCP_ESTABLISHED);
202 		parent->sk_state_change(parent);
203 	}
204 
205 	/* be sure no special action on any packet other than syn-ack */
206 	if (subflow->conn_finished)
207 		return;
208 
209 	subflow->rel_write_seq = 1;
210 	subflow->conn_finished = 1;
211 	subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
212 	pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
213 
214 	mptcp_get_options(skb, &mp_opt);
215 	if (subflow->request_mptcp) {
216 		if (!mp_opt.mp_capable) {
217 			MPTCP_INC_STATS(sock_net(sk),
218 					MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
219 			mptcp_do_fallback(sk);
220 			pr_fallback(mptcp_sk(subflow->conn));
221 			goto fallback;
222 		}
223 
224 		subflow->mp_capable = 1;
225 		subflow->can_ack = 1;
226 		subflow->remote_key = mp_opt.sndr_key;
227 		pr_debug("subflow=%p, remote_key=%llu", subflow,
228 			 subflow->remote_key);
229 		mptcp_finish_connect(sk);
230 	} else if (subflow->request_join) {
231 		u8 hmac[SHA256_DIGEST_SIZE];
232 
233 		if (!mp_opt.mp_join)
234 			goto do_reset;
235 
236 		subflow->thmac = mp_opt.thmac;
237 		subflow->remote_nonce = mp_opt.nonce;
238 		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
239 			 subflow->thmac, subflow->remote_nonce);
240 
241 		if (!subflow_thmac_valid(subflow)) {
242 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
243 			goto do_reset;
244 		}
245 
246 		subflow_generate_hmac(subflow->local_key, subflow->remote_key,
247 				      subflow->local_nonce,
248 				      subflow->remote_nonce,
249 				      hmac);
250 		memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
251 
252 		if (!mptcp_finish_join(sk))
253 			goto do_reset;
254 
255 		subflow->mp_join = 1;
256 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
257 	} else if (mptcp_check_fallback(sk)) {
258 fallback:
259 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
260 	}
261 	return;
262 
263 do_reset:
264 	tcp_send_active_reset(sk, GFP_ATOMIC);
265 	tcp_done(sk);
266 }
267 
268 static struct request_sock_ops subflow_request_sock_ops;
269 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
270 
271 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
272 {
273 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
274 
275 	pr_debug("subflow=%p", subflow);
276 
277 	/* Never answer to SYNs sent to broadcast or multicast */
278 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
279 		goto drop;
280 
281 	return tcp_conn_request(&subflow_request_sock_ops,
282 				&subflow_request_sock_ipv4_ops,
283 				sk, skb);
284 drop:
285 	tcp_listendrop(sk);
286 	return 0;
287 }
288 
289 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
290 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
291 static struct inet_connection_sock_af_ops subflow_v6_specific;
292 static struct inet_connection_sock_af_ops subflow_v6m_specific;
293 
294 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
295 {
296 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
297 
298 	pr_debug("subflow=%p", subflow);
299 
300 	if (skb->protocol == htons(ETH_P_IP))
301 		return subflow_v4_conn_request(sk, skb);
302 
303 	if (!ipv6_unicast_destination(skb))
304 		goto drop;
305 
306 	return tcp_conn_request(&subflow_request_sock_ops,
307 				&subflow_request_sock_ipv6_ops, sk, skb);
308 
309 drop:
310 	tcp_listendrop(sk);
311 	return 0; /* don't send reset */
312 }
313 #endif
314 
315 /* validate hmac received in third ACK */
316 static bool subflow_hmac_valid(const struct request_sock *req,
317 			       const struct mptcp_options_received *mp_opt)
318 {
319 	const struct mptcp_subflow_request_sock *subflow_req;
320 	u8 hmac[SHA256_DIGEST_SIZE];
321 	struct mptcp_sock *msk;
322 
323 	subflow_req = mptcp_subflow_rsk(req);
324 	msk = subflow_req->msk;
325 	if (!msk)
326 		return false;
327 
328 	subflow_generate_hmac(msk->remote_key, msk->local_key,
329 			      subflow_req->remote_nonce,
330 			      subflow_req->local_nonce, hmac);
331 
332 	return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
333 }
334 
335 static void mptcp_sock_destruct(struct sock *sk)
336 {
337 	/* if new mptcp socket isn't accepted, it is free'd
338 	 * from the tcp listener sockets request queue, linked
339 	 * from req->sk.  The tcp socket is released.
340 	 * This calls the ULP release function which will
341 	 * also remove the mptcp socket, via
342 	 * sock_put(ctx->conn).
343 	 *
344 	 * Problem is that the mptcp socket will not be in
345 	 * SYN_RECV state and doesn't have SOCK_DEAD flag.
346 	 * Both result in warnings from inet_sock_destruct.
347 	 */
348 
349 	if (sk->sk_state == TCP_SYN_RECV) {
350 		sk->sk_state = TCP_CLOSE;
351 		WARN_ON_ONCE(sk->sk_socket);
352 		sock_orphan(sk);
353 	}
354 
355 	mptcp_token_destroy(mptcp_sk(sk));
356 	inet_sock_destruct(sk);
357 }
358 
359 static void mptcp_force_close(struct sock *sk)
360 {
361 	inet_sk_state_store(sk, TCP_CLOSE);
362 	sk_common_release(sk);
363 }
364 
365 static void subflow_ulp_fallback(struct sock *sk,
366 				 struct mptcp_subflow_context *old_ctx)
367 {
368 	struct inet_connection_sock *icsk = inet_csk(sk);
369 
370 	mptcp_subflow_tcp_fallback(sk, old_ctx);
371 	icsk->icsk_ulp_ops = NULL;
372 	rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
373 	tcp_sk(sk)->is_mptcp = 0;
374 }
375 
376 static void subflow_drop_ctx(struct sock *ssk)
377 {
378 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
379 
380 	if (!ctx)
381 		return;
382 
383 	subflow_ulp_fallback(ssk, ctx);
384 	if (ctx->conn)
385 		sock_put(ctx->conn);
386 
387 	kfree_rcu(ctx, rcu);
388 }
389 
390 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
391 				     struct mptcp_options_received *mp_opt)
392 {
393 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
394 
395 	subflow->remote_key = mp_opt->sndr_key;
396 	subflow->fully_established = 1;
397 	subflow->can_ack = 1;
398 	WRITE_ONCE(msk->fully_established, true);
399 }
400 
401 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
402 					  struct sk_buff *skb,
403 					  struct request_sock *req,
404 					  struct dst_entry *dst,
405 					  struct request_sock *req_unhash,
406 					  bool *own_req)
407 {
408 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
409 	struct mptcp_subflow_request_sock *subflow_req;
410 	struct mptcp_options_received mp_opt;
411 	bool fallback, fallback_is_fatal;
412 	struct sock *new_msk = NULL;
413 	struct sock *child;
414 
415 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
416 
417 	/* After child creation we must look for 'mp_capable' even when options
418 	 * are not parsed
419 	 */
420 	mp_opt.mp_capable = 0;
421 
422 	/* hopefully temporary handling for MP_JOIN+syncookie */
423 	subflow_req = mptcp_subflow_rsk(req);
424 	fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
425 	fallback = !tcp_rsk(req)->is_mptcp;
426 	if (fallback)
427 		goto create_child;
428 
429 	/* if the sk is MP_CAPABLE, we try to fetch the client key */
430 	if (subflow_req->mp_capable) {
431 		if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
432 			/* here we can receive and accept an in-window,
433 			 * out-of-order pkt, which will not carry the MP_CAPABLE
434 			 * opt even on mptcp enabled paths
435 			 */
436 			goto create_msk;
437 		}
438 
439 		mptcp_get_options(skb, &mp_opt);
440 		if (!mp_opt.mp_capable) {
441 			fallback = true;
442 			goto create_child;
443 		}
444 
445 create_msk:
446 		new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
447 		if (!new_msk)
448 			fallback = true;
449 	} else if (subflow_req->mp_join) {
450 		mptcp_get_options(skb, &mp_opt);
451 		if (!mp_opt.mp_join ||
452 		    !mptcp_can_accept_new_subflow(subflow_req->msk) ||
453 		    !subflow_hmac_valid(req, &mp_opt)) {
454 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
455 			fallback = true;
456 		}
457 	}
458 
459 create_child:
460 	child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
461 						     req_unhash, own_req);
462 
463 	if (child && *own_req) {
464 		struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
465 
466 		tcp_rsk(req)->drop_req = false;
467 
468 		/* we need to fallback on ctx allocation failure and on pre-reqs
469 		 * checking above. In the latter scenario we additionally need
470 		 * to reset the context to non MPTCP status.
471 		 */
472 		if (!ctx || fallback) {
473 			if (fallback_is_fatal)
474 				goto dispose_child;
475 
476 			subflow_drop_ctx(child);
477 			goto out;
478 		}
479 
480 		if (ctx->mp_capable) {
481 			/* this can't race with mptcp_close(), as the msk is
482 			 * not yet exposted to user-space
483 			 */
484 			inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
485 
486 			/* new mpc subflow takes ownership of the newly
487 			 * created mptcp socket
488 			 */
489 			new_msk->sk_destruct = mptcp_sock_destruct;
490 			mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
491 			mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
492 			ctx->conn = new_msk;
493 			new_msk = NULL;
494 
495 			/* with OoO packets we can reach here without ingress
496 			 * mpc option
497 			 */
498 			if (mp_opt.mp_capable)
499 				mptcp_subflow_fully_established(ctx, &mp_opt);
500 		} else if (ctx->mp_join) {
501 			struct mptcp_sock *owner;
502 
503 			owner = subflow_req->msk;
504 			if (!owner)
505 				goto dispose_child;
506 
507 			/* move the msk reference ownership to the subflow */
508 			subflow_req->msk = NULL;
509 			ctx->conn = (struct sock *)owner;
510 			if (!mptcp_finish_join(child))
511 				goto dispose_child;
512 
513 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
514 			tcp_rsk(req)->drop_req = true;
515 		}
516 	}
517 
518 out:
519 	/* dispose of the left over mptcp master, if any */
520 	if (unlikely(new_msk))
521 		mptcp_force_close(new_msk);
522 
523 	/* check for expected invariant - should never trigger, just help
524 	 * catching eariler subtle bugs
525 	 */
526 	WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
527 		     (!mptcp_subflow_ctx(child) ||
528 		      !mptcp_subflow_ctx(child)->conn));
529 	return child;
530 
531 dispose_child:
532 	subflow_drop_ctx(child);
533 	tcp_rsk(req)->drop_req = true;
534 	inet_csk_prepare_for_destroy_sock(child);
535 	tcp_done(child);
536 	req->rsk_ops->send_reset(sk, skb);
537 
538 	/* The last child reference will be released by the caller */
539 	return child;
540 }
541 
542 static struct inet_connection_sock_af_ops subflow_specific;
543 
544 enum mapping_status {
545 	MAPPING_OK,
546 	MAPPING_INVALID,
547 	MAPPING_EMPTY,
548 	MAPPING_DATA_FIN,
549 	MAPPING_DUMMY
550 };
551 
552 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
553 {
554 	if ((u32)seq == (u32)old_seq)
555 		return old_seq;
556 
557 	/* Assume map covers data not mapped yet. */
558 	return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
559 }
560 
561 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
562 {
563 	WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
564 		  ssn, subflow->map_subflow_seq, subflow->map_data_len);
565 }
566 
567 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
568 {
569 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
570 	unsigned int skb_consumed;
571 
572 	skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
573 	if (WARN_ON_ONCE(skb_consumed >= skb->len))
574 		return true;
575 
576 	return skb->len - skb_consumed <= subflow->map_data_len -
577 					  mptcp_subflow_get_map_offset(subflow);
578 }
579 
580 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
581 {
582 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
583 	u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
584 
585 	if (unlikely(before(ssn, subflow->map_subflow_seq))) {
586 		/* Mapping covers data later in the subflow stream,
587 		 * currently unsupported.
588 		 */
589 		warn_bad_map(subflow, ssn);
590 		return false;
591 	}
592 	if (unlikely(!before(ssn, subflow->map_subflow_seq +
593 				  subflow->map_data_len))) {
594 		/* Mapping does covers past subflow data, invalid */
595 		warn_bad_map(subflow, ssn + skb->len);
596 		return false;
597 	}
598 	return true;
599 }
600 
601 static enum mapping_status get_mapping_status(struct sock *ssk)
602 {
603 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
604 	struct mptcp_ext *mpext;
605 	struct sk_buff *skb;
606 	u16 data_len;
607 	u64 map_seq;
608 
609 	skb = skb_peek(&ssk->sk_receive_queue);
610 	if (!skb)
611 		return MAPPING_EMPTY;
612 
613 	if (mptcp_check_fallback(ssk))
614 		return MAPPING_DUMMY;
615 
616 	mpext = mptcp_get_ext(skb);
617 	if (!mpext || !mpext->use_map) {
618 		if (!subflow->map_valid && !skb->len) {
619 			/* the TCP stack deliver 0 len FIN pkt to the receive
620 			 * queue, that is the only 0len pkts ever expected here,
621 			 * and we can admit no mapping only for 0 len pkts
622 			 */
623 			if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
624 				WARN_ONCE(1, "0len seq %d:%d flags %x",
625 					  TCP_SKB_CB(skb)->seq,
626 					  TCP_SKB_CB(skb)->end_seq,
627 					  TCP_SKB_CB(skb)->tcp_flags);
628 			sk_eat_skb(ssk, skb);
629 			return MAPPING_EMPTY;
630 		}
631 
632 		if (!subflow->map_valid)
633 			return MAPPING_INVALID;
634 
635 		goto validate_seq;
636 	}
637 
638 	pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
639 		 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
640 		 mpext->data_len, mpext->data_fin);
641 
642 	data_len = mpext->data_len;
643 	if (data_len == 0) {
644 		pr_err("Infinite mapping not handled");
645 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
646 		return MAPPING_INVALID;
647 	}
648 
649 	if (mpext->data_fin == 1) {
650 		if (data_len == 1) {
651 			pr_debug("DATA_FIN with no payload");
652 			if (subflow->map_valid) {
653 				/* A DATA_FIN might arrive in a DSS
654 				 * option before the previous mapping
655 				 * has been fully consumed. Continue
656 				 * handling the existing mapping.
657 				 */
658 				skb_ext_del(skb, SKB_EXT_MPTCP);
659 				return MAPPING_OK;
660 			} else {
661 				return MAPPING_DATA_FIN;
662 			}
663 		}
664 
665 		/* Adjust for DATA_FIN using 1 byte of sequence space */
666 		data_len--;
667 	}
668 
669 	if (!mpext->dsn64) {
670 		map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
671 				     mpext->data_seq);
672 		subflow->use_64bit_ack = 0;
673 		pr_debug("expanded seq=%llu", subflow->map_seq);
674 	} else {
675 		map_seq = mpext->data_seq;
676 		subflow->use_64bit_ack = 1;
677 	}
678 
679 	if (subflow->map_valid) {
680 		/* Allow replacing only with an identical map */
681 		if (subflow->map_seq == map_seq &&
682 		    subflow->map_subflow_seq == mpext->subflow_seq &&
683 		    subflow->map_data_len == data_len) {
684 			skb_ext_del(skb, SKB_EXT_MPTCP);
685 			return MAPPING_OK;
686 		}
687 
688 		/* If this skb data are fully covered by the current mapping,
689 		 * the new map would need caching, which is not supported
690 		 */
691 		if (skb_is_fully_mapped(ssk, skb)) {
692 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
693 			return MAPPING_INVALID;
694 		}
695 
696 		/* will validate the next map after consuming the current one */
697 		return MAPPING_OK;
698 	}
699 
700 	subflow->map_seq = map_seq;
701 	subflow->map_subflow_seq = mpext->subflow_seq;
702 	subflow->map_data_len = data_len;
703 	subflow->map_valid = 1;
704 	subflow->mpc_map = mpext->mpc_map;
705 	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
706 		 subflow->map_seq, subflow->map_subflow_seq,
707 		 subflow->map_data_len);
708 
709 validate_seq:
710 	/* we revalidate valid mapping on new skb, because we must ensure
711 	 * the current skb is completely covered by the available mapping
712 	 */
713 	if (!validate_mapping(ssk, skb))
714 		return MAPPING_INVALID;
715 
716 	skb_ext_del(skb, SKB_EXT_MPTCP);
717 	return MAPPING_OK;
718 }
719 
720 static int subflow_read_actor(read_descriptor_t *desc,
721 			      struct sk_buff *skb,
722 			      unsigned int offset, size_t len)
723 {
724 	size_t copy_len = min(desc->count, len);
725 
726 	desc->count -= copy_len;
727 
728 	pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
729 	return copy_len;
730 }
731 
732 static bool subflow_check_data_avail(struct sock *ssk)
733 {
734 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
735 	enum mapping_status status;
736 	struct mptcp_sock *msk;
737 	struct sk_buff *skb;
738 
739 	pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
740 		 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
741 	if (subflow->data_avail)
742 		return true;
743 
744 	msk = mptcp_sk(subflow->conn);
745 	for (;;) {
746 		u32 map_remaining;
747 		size_t delta;
748 		u64 ack_seq;
749 		u64 old_ack;
750 
751 		status = get_mapping_status(ssk);
752 		pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
753 		if (status == MAPPING_INVALID) {
754 			ssk->sk_err = EBADMSG;
755 			goto fatal;
756 		}
757 		if (status == MAPPING_DUMMY) {
758 			__mptcp_do_fallback(msk);
759 			skb = skb_peek(&ssk->sk_receive_queue);
760 			subflow->map_valid = 1;
761 			subflow->map_seq = READ_ONCE(msk->ack_seq);
762 			subflow->map_data_len = skb->len;
763 			subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
764 						   subflow->ssn_offset;
765 			return true;
766 		}
767 
768 		if (status != MAPPING_OK)
769 			return false;
770 
771 		skb = skb_peek(&ssk->sk_receive_queue);
772 		if (WARN_ON_ONCE(!skb))
773 			return false;
774 
775 		/* if msk lacks the remote key, this subflow must provide an
776 		 * MP_CAPABLE-based mapping
777 		 */
778 		if (unlikely(!READ_ONCE(msk->can_ack))) {
779 			if (!subflow->mpc_map) {
780 				ssk->sk_err = EBADMSG;
781 				goto fatal;
782 			}
783 			WRITE_ONCE(msk->remote_key, subflow->remote_key);
784 			WRITE_ONCE(msk->ack_seq, subflow->map_seq);
785 			WRITE_ONCE(msk->can_ack, true);
786 		}
787 
788 		old_ack = READ_ONCE(msk->ack_seq);
789 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
790 		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
791 			 ack_seq);
792 		if (ack_seq == old_ack)
793 			break;
794 
795 		/* only accept in-sequence mapping. Old values are spurious
796 		 * retransmission; we can hit "future" values on active backup
797 		 * subflow switch, we relay on retransmissions to get
798 		 * in-sequence data.
799 		 * Cuncurrent subflows support will require subflow data
800 		 * reordering
801 		 */
802 		map_remaining = subflow->map_data_len -
803 				mptcp_subflow_get_map_offset(subflow);
804 		if (before64(ack_seq, old_ack))
805 			delta = min_t(size_t, old_ack - ack_seq, map_remaining);
806 		else
807 			delta = min_t(size_t, ack_seq - old_ack, map_remaining);
808 
809 		/* discard mapped data */
810 		pr_debug("discarding %zu bytes, current map len=%d", delta,
811 			 map_remaining);
812 		if (delta) {
813 			read_descriptor_t desc = {
814 				.count = delta,
815 			};
816 			int ret;
817 
818 			ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
819 			if (ret < 0) {
820 				ssk->sk_err = -ret;
821 				goto fatal;
822 			}
823 			if (ret < delta)
824 				return false;
825 			if (delta == map_remaining)
826 				subflow->map_valid = 0;
827 		}
828 	}
829 	return true;
830 
831 fatal:
832 	/* fatal protocol error, close the socket */
833 	/* This barrier is coupled with smp_rmb() in tcp_poll() */
834 	smp_wmb();
835 	ssk->sk_error_report(ssk);
836 	tcp_set_state(ssk, TCP_CLOSE);
837 	tcp_send_active_reset(ssk, GFP_ATOMIC);
838 	return false;
839 }
840 
841 bool mptcp_subflow_data_available(struct sock *sk)
842 {
843 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
844 	struct sk_buff *skb;
845 
846 	/* check if current mapping is still valid */
847 	if (subflow->map_valid &&
848 	    mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
849 		subflow->map_valid = 0;
850 		subflow->data_avail = 0;
851 
852 		pr_debug("Done with mapping: seq=%u data_len=%u",
853 			 subflow->map_subflow_seq,
854 			 subflow->map_data_len);
855 	}
856 
857 	if (!subflow_check_data_avail(sk)) {
858 		subflow->data_avail = 0;
859 		return false;
860 	}
861 
862 	skb = skb_peek(&sk->sk_receive_queue);
863 	subflow->data_avail = skb &&
864 		       before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
865 	return subflow->data_avail;
866 }
867 
868 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
869  * not the ssk one.
870  *
871  * In mptcp, rwin is about the mptcp-level connection data.
872  *
873  * Data that is still on the ssk rx queue can thus be ignored,
874  * as far as mptcp peer is concerened that data is still inflight.
875  * DSS ACK is updated when skb is moved to the mptcp rx queue.
876  */
877 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
878 {
879 	const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
880 	const struct sock *sk = subflow->conn;
881 
882 	*space = tcp_space(sk);
883 	*full_space = tcp_full_space(sk);
884 }
885 
886 static void subflow_data_ready(struct sock *sk)
887 {
888 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
889 	u16 state = 1 << inet_sk_state_load(sk);
890 	struct sock *parent = subflow->conn;
891 	struct mptcp_sock *msk;
892 
893 	msk = mptcp_sk(parent);
894 	if (state & TCPF_LISTEN) {
895 		set_bit(MPTCP_DATA_READY, &msk->flags);
896 		parent->sk_data_ready(parent);
897 		return;
898 	}
899 
900 	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
901 		     !subflow->mp_join && !(state & TCPF_CLOSE));
902 
903 	if (mptcp_subflow_data_available(sk))
904 		mptcp_data_ready(parent, sk);
905 }
906 
907 static void subflow_write_space(struct sock *sk)
908 {
909 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
910 	struct sock *parent = subflow->conn;
911 
912 	sk_stream_write_space(sk);
913 	if (sk_stream_is_writeable(sk)) {
914 		set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
915 		smp_mb__after_atomic();
916 		/* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
917 		sk_stream_write_space(parent);
918 	}
919 }
920 
921 static struct inet_connection_sock_af_ops *
922 subflow_default_af_ops(struct sock *sk)
923 {
924 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
925 	if (sk->sk_family == AF_INET6)
926 		return &subflow_v6_specific;
927 #endif
928 	return &subflow_specific;
929 }
930 
931 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
932 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
933 {
934 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
935 	struct inet_connection_sock *icsk = inet_csk(sk);
936 	struct inet_connection_sock_af_ops *target;
937 
938 	target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
939 
940 	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
941 		 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
942 
943 	if (likely(icsk->icsk_af_ops == target))
944 		return;
945 
946 	subflow->icsk_af_ops = icsk->icsk_af_ops;
947 	icsk->icsk_af_ops = target;
948 }
949 #endif
950 
951 static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
952 				struct sockaddr_storage *addr)
953 {
954 	memset(addr, 0, sizeof(*addr));
955 	addr->ss_family = info->family;
956 	if (addr->ss_family == AF_INET) {
957 		struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
958 
959 		in_addr->sin_addr = info->addr;
960 		in_addr->sin_port = info->port;
961 	}
962 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
963 	else if (addr->ss_family == AF_INET6) {
964 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
965 
966 		in6_addr->sin6_addr = info->addr6;
967 		in6_addr->sin6_port = info->port;
968 	}
969 #endif
970 }
971 
972 int __mptcp_subflow_connect(struct sock *sk, int ifindex,
973 			    const struct mptcp_addr_info *loc,
974 			    const struct mptcp_addr_info *remote)
975 {
976 	struct mptcp_sock *msk = mptcp_sk(sk);
977 	struct mptcp_subflow_context *subflow;
978 	struct sockaddr_storage addr;
979 	int local_id = loc->id;
980 	struct socket *sf;
981 	struct sock *ssk;
982 	u32 remote_token;
983 	int addrlen;
984 	int err;
985 
986 	if (!mptcp_is_fully_established(sk))
987 		return -ENOTCONN;
988 
989 	err = mptcp_subflow_create_socket(sk, &sf);
990 	if (err)
991 		return err;
992 
993 	ssk = sf->sk;
994 	subflow = mptcp_subflow_ctx(ssk);
995 	do {
996 		get_random_bytes(&subflow->local_nonce, sizeof(u32));
997 	} while (!subflow->local_nonce);
998 
999 	if (!local_id) {
1000 		err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1001 		if (err < 0)
1002 			goto failed;
1003 
1004 		local_id = err;
1005 	}
1006 
1007 	subflow->remote_key = msk->remote_key;
1008 	subflow->local_key = msk->local_key;
1009 	subflow->token = msk->token;
1010 	mptcp_info2sockaddr(loc, &addr);
1011 
1012 	addrlen = sizeof(struct sockaddr_in);
1013 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1014 	if (loc->family == AF_INET6)
1015 		addrlen = sizeof(struct sockaddr_in6);
1016 #endif
1017 	ssk->sk_bound_dev_if = ifindex;
1018 	err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1019 	if (err)
1020 		goto failed;
1021 
1022 	mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1023 	pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token,
1024 		 local_id);
1025 	subflow->remote_token = remote_token;
1026 	subflow->local_id = local_id;
1027 	subflow->request_join = 1;
1028 	subflow->request_bkup = 1;
1029 	mptcp_info2sockaddr(remote, &addr);
1030 
1031 	err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1032 	if (err && err != -EINPROGRESS)
1033 		goto failed;
1034 
1035 	spin_lock_bh(&msk->join_list_lock);
1036 	list_add_tail(&subflow->node, &msk->join_list);
1037 	spin_unlock_bh(&msk->join_list_lock);
1038 
1039 	return err;
1040 
1041 failed:
1042 	sock_release(sf);
1043 	return err;
1044 }
1045 
1046 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1047 {
1048 	struct mptcp_subflow_context *subflow;
1049 	struct net *net = sock_net(sk);
1050 	struct socket *sf;
1051 	int err;
1052 
1053 	err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1054 			       &sf);
1055 	if (err)
1056 		return err;
1057 
1058 	lock_sock(sf->sk);
1059 
1060 	/* kernel sockets do not by default acquire net ref, but TCP timer
1061 	 * needs it.
1062 	 */
1063 	sf->sk->sk_net_refcnt = 1;
1064 	get_net(net);
1065 #ifdef CONFIG_PROC_FS
1066 	this_cpu_add(*net->core.sock_inuse, 1);
1067 #endif
1068 	err = tcp_set_ulp(sf->sk, "mptcp");
1069 	release_sock(sf->sk);
1070 
1071 	if (err) {
1072 		sock_release(sf);
1073 		return err;
1074 	}
1075 
1076 	/* the newly created socket really belongs to the owning MPTCP master
1077 	 * socket, even if for additional subflows the allocation is performed
1078 	 * by a kernel workqueue. Adjust inode references, so that the
1079 	 * procfs/diag interaces really show this one belonging to the correct
1080 	 * user.
1081 	 */
1082 	SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1083 	SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1084 	SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1085 
1086 	subflow = mptcp_subflow_ctx(sf->sk);
1087 	pr_debug("subflow=%p", subflow);
1088 
1089 	*new_sock = sf;
1090 	sock_hold(sk);
1091 	subflow->conn = sk;
1092 
1093 	return 0;
1094 }
1095 
1096 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1097 							gfp_t priority)
1098 {
1099 	struct inet_connection_sock *icsk = inet_csk(sk);
1100 	struct mptcp_subflow_context *ctx;
1101 
1102 	ctx = kzalloc(sizeof(*ctx), priority);
1103 	if (!ctx)
1104 		return NULL;
1105 
1106 	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1107 	INIT_LIST_HEAD(&ctx->node);
1108 
1109 	pr_debug("subflow=%p", ctx);
1110 
1111 	ctx->tcp_sock = sk;
1112 
1113 	return ctx;
1114 }
1115 
1116 static void __subflow_state_change(struct sock *sk)
1117 {
1118 	struct socket_wq *wq;
1119 
1120 	rcu_read_lock();
1121 	wq = rcu_dereference(sk->sk_wq);
1122 	if (skwq_has_sleeper(wq))
1123 		wake_up_interruptible_all(&wq->wait);
1124 	rcu_read_unlock();
1125 }
1126 
1127 static bool subflow_is_done(const struct sock *sk)
1128 {
1129 	return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1130 }
1131 
1132 static void subflow_state_change(struct sock *sk)
1133 {
1134 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1135 	struct sock *parent = subflow->conn;
1136 
1137 	__subflow_state_change(sk);
1138 
1139 	if (subflow_simultaneous_connect(sk)) {
1140 		mptcp_do_fallback(sk);
1141 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
1142 		pr_fallback(mptcp_sk(parent));
1143 		subflow->conn_finished = 1;
1144 		if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1145 			inet_sk_state_store(parent, TCP_ESTABLISHED);
1146 			parent->sk_state_change(parent);
1147 		}
1148 	}
1149 
1150 	/* as recvmsg() does not acquire the subflow socket for ssk selection
1151 	 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1152 	 * the data available machinery here.
1153 	 */
1154 	if (mptcp_subflow_data_available(sk))
1155 		mptcp_data_ready(parent, sk);
1156 
1157 	if (!(parent->sk_shutdown & RCV_SHUTDOWN) &&
1158 	    !subflow->rx_eof && subflow_is_done(sk)) {
1159 		subflow->rx_eof = 1;
1160 		mptcp_subflow_eof(parent);
1161 	}
1162 }
1163 
1164 static int subflow_ulp_init(struct sock *sk)
1165 {
1166 	struct inet_connection_sock *icsk = inet_csk(sk);
1167 	struct mptcp_subflow_context *ctx;
1168 	struct tcp_sock *tp = tcp_sk(sk);
1169 	int err = 0;
1170 
1171 	/* disallow attaching ULP to a socket unless it has been
1172 	 * created with sock_create_kern()
1173 	 */
1174 	if (!sk->sk_kern_sock) {
1175 		err = -EOPNOTSUPP;
1176 		goto out;
1177 	}
1178 
1179 	ctx = subflow_create_ctx(sk, GFP_KERNEL);
1180 	if (!ctx) {
1181 		err = -ENOMEM;
1182 		goto out;
1183 	}
1184 
1185 	pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1186 
1187 	tp->is_mptcp = 1;
1188 	ctx->icsk_af_ops = icsk->icsk_af_ops;
1189 	icsk->icsk_af_ops = subflow_default_af_ops(sk);
1190 	ctx->tcp_data_ready = sk->sk_data_ready;
1191 	ctx->tcp_state_change = sk->sk_state_change;
1192 	ctx->tcp_write_space = sk->sk_write_space;
1193 	sk->sk_data_ready = subflow_data_ready;
1194 	sk->sk_write_space = subflow_write_space;
1195 	sk->sk_state_change = subflow_state_change;
1196 out:
1197 	return err;
1198 }
1199 
1200 static void subflow_ulp_release(struct sock *sk)
1201 {
1202 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1203 
1204 	if (!ctx)
1205 		return;
1206 
1207 	if (ctx->conn)
1208 		sock_put(ctx->conn);
1209 
1210 	kfree_rcu(ctx, rcu);
1211 }
1212 
1213 static void subflow_ulp_clone(const struct request_sock *req,
1214 			      struct sock *newsk,
1215 			      const gfp_t priority)
1216 {
1217 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1218 	struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1219 	struct mptcp_subflow_context *new_ctx;
1220 
1221 	if (!tcp_rsk(req)->is_mptcp ||
1222 	    (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1223 		subflow_ulp_fallback(newsk, old_ctx);
1224 		return;
1225 	}
1226 
1227 	new_ctx = subflow_create_ctx(newsk, priority);
1228 	if (!new_ctx) {
1229 		subflow_ulp_fallback(newsk, old_ctx);
1230 		return;
1231 	}
1232 
1233 	new_ctx->conn_finished = 1;
1234 	new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1235 	new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1236 	new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1237 	new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1238 	new_ctx->rel_write_seq = 1;
1239 	new_ctx->tcp_sock = newsk;
1240 
1241 	if (subflow_req->mp_capable) {
1242 		/* see comments in subflow_syn_recv_sock(), MPTCP connection
1243 		 * is fully established only after we receive the remote key
1244 		 */
1245 		new_ctx->mp_capable = 1;
1246 		new_ctx->local_key = subflow_req->local_key;
1247 		new_ctx->token = subflow_req->token;
1248 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1249 		new_ctx->idsn = subflow_req->idsn;
1250 	} else if (subflow_req->mp_join) {
1251 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1252 		new_ctx->mp_join = 1;
1253 		new_ctx->fully_established = 1;
1254 		new_ctx->backup = subflow_req->backup;
1255 		new_ctx->local_id = subflow_req->local_id;
1256 		new_ctx->token = subflow_req->token;
1257 		new_ctx->thmac = subflow_req->thmac;
1258 	}
1259 }
1260 
1261 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1262 	.name		= "mptcp",
1263 	.owner		= THIS_MODULE,
1264 	.init		= subflow_ulp_init,
1265 	.release	= subflow_ulp_release,
1266 	.clone		= subflow_ulp_clone,
1267 };
1268 
1269 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1270 {
1271 	subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1272 	subflow_ops->slab_name = "request_sock_subflow";
1273 
1274 	subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1275 					      subflow_ops->obj_size, 0,
1276 					      SLAB_ACCOUNT |
1277 					      SLAB_TYPESAFE_BY_RCU,
1278 					      NULL);
1279 	if (!subflow_ops->slab)
1280 		return -ENOMEM;
1281 
1282 	subflow_ops->destructor = subflow_req_destructor;
1283 
1284 	return 0;
1285 }
1286 
1287 void __init mptcp_subflow_init(void)
1288 {
1289 	subflow_request_sock_ops = tcp_request_sock_ops;
1290 	if (subflow_ops_init(&subflow_request_sock_ops) != 0)
1291 		panic("MPTCP: failed to init subflow request sock ops\n");
1292 
1293 	subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1294 	subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1295 
1296 	subflow_specific = ipv4_specific;
1297 	subflow_specific.conn_request = subflow_v4_conn_request;
1298 	subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1299 	subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1300 
1301 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1302 	subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1303 	subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1304 
1305 	subflow_v6_specific = ipv6_specific;
1306 	subflow_v6_specific.conn_request = subflow_v6_conn_request;
1307 	subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1308 	subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1309 
1310 	subflow_v6m_specific = subflow_v6_specific;
1311 	subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1312 	subflow_v6m_specific.send_check = ipv4_specific.send_check;
1313 	subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1314 	subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1315 	subflow_v6m_specific.net_frag_header_len = 0;
1316 #endif
1317 
1318 	mptcp_diag_subflow_init(&subflow_ulp_ops);
1319 
1320 	if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1321 		panic("MPTCP: failed to register subflows to ULP\n");
1322 }
1323