xref: /openbmc/linux/net/mptcp/protocol.c (revision 89b15863)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #include <net/tcp_states.h>
20 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
21 #include <net/transp_v6.h>
22 #endif
23 #include <net/mptcp.h>
24 #include <net/xfrm.h>
25 #include "protocol.h"
26 #include "mib.h"
27 
28 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
29 struct mptcp6_sock {
30 	struct mptcp_sock msk;
31 	struct ipv6_pinfo np;
32 };
33 #endif
34 
35 struct mptcp_skb_cb {
36 	u64 map_seq;
37 	u64 end_seq;
38 	u32 offset;
39 };
40 
41 #define MPTCP_SKB_CB(__skb)	((struct mptcp_skb_cb *)&((__skb)->cb[0]))
42 
43 static struct percpu_counter mptcp_sockets_allocated;
44 
45 static void __mptcp_destroy_sock(struct sock *sk);
46 static void __mptcp_check_send_data_fin(struct sock *sk);
47 
48 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
49  * completed yet or has failed, return the subflow socket.
50  * Otherwise return NULL.
51  */
52 static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
53 {
54 	if (!msk->subflow || READ_ONCE(msk->can_ack))
55 		return NULL;
56 
57 	return msk->subflow;
58 }
59 
60 /* Returns end sequence number of the receiver's advertised window */
61 static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
62 {
63 	return READ_ONCE(msk->wnd_end);
64 }
65 
66 static bool mptcp_is_tcpsk(struct sock *sk)
67 {
68 	struct socket *sock = sk->sk_socket;
69 
70 	if (unlikely(sk->sk_prot == &tcp_prot)) {
71 		/* we are being invoked after mptcp_accept() has
72 		 * accepted a non-mp-capable flow: sk is a tcp_sk,
73 		 * not an mptcp one.
74 		 *
75 		 * Hand the socket over to tcp so all further socket ops
76 		 * bypass mptcp.
77 		 */
78 		sock->ops = &inet_stream_ops;
79 		return true;
80 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
81 	} else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
82 		sock->ops = &inet6_stream_ops;
83 		return true;
84 #endif
85 	}
86 
87 	return false;
88 }
89 
90 static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
91 {
92 	sock_owned_by_me((const struct sock *)msk);
93 
94 	if (likely(!__mptcp_check_fallback(msk)))
95 		return NULL;
96 
97 	return msk->first;
98 }
99 
100 static int __mptcp_socket_create(struct mptcp_sock *msk)
101 {
102 	struct mptcp_subflow_context *subflow;
103 	struct sock *sk = (struct sock *)msk;
104 	struct socket *ssock;
105 	int err;
106 
107 	err = mptcp_subflow_create_socket(sk, &ssock);
108 	if (err)
109 		return err;
110 
111 	msk->first = ssock->sk;
112 	msk->subflow = ssock;
113 	subflow = mptcp_subflow_ctx(ssock->sk);
114 	list_add(&subflow->node, &msk->conn_list);
115 	sock_hold(ssock->sk);
116 	subflow->request_mptcp = 1;
117 
118 	/* accept() will wait on first subflow sk_wq, and we always wakes up
119 	 * via msk->sk_socket
120 	 */
121 	RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
122 
123 	return 0;
124 }
125 
126 static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
127 {
128 	sk_drops_add(sk, skb);
129 	__kfree_skb(skb);
130 }
131 
132 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
133 			       struct sk_buff *from)
134 {
135 	bool fragstolen;
136 	int delta;
137 
138 	if (MPTCP_SKB_CB(from)->offset ||
139 	    !skb_try_coalesce(to, from, &fragstolen, &delta))
140 		return false;
141 
142 	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
143 		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
144 		 to->len, MPTCP_SKB_CB(from)->end_seq);
145 	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
146 	kfree_skb_partial(from, fragstolen);
147 	atomic_add(delta, &sk->sk_rmem_alloc);
148 	sk_mem_charge(sk, delta);
149 	return true;
150 }
151 
152 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
153 				   struct sk_buff *from)
154 {
155 	if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
156 		return false;
157 
158 	return mptcp_try_coalesce((struct sock *)msk, to, from);
159 }
160 
161 /* "inspired" by tcp_data_queue_ofo(), main differences:
162  * - use mptcp seqs
163  * - don't cope with sacks
164  */
165 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
166 {
167 	struct sock *sk = (struct sock *)msk;
168 	struct rb_node **p, *parent;
169 	u64 seq, end_seq, max_seq;
170 	struct sk_buff *skb1;
171 
172 	seq = MPTCP_SKB_CB(skb)->map_seq;
173 	end_seq = MPTCP_SKB_CB(skb)->end_seq;
174 	max_seq = READ_ONCE(msk->rcv_wnd_sent);
175 
176 	pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
177 		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
178 	if (after64(end_seq, max_seq)) {
179 		/* out of window */
180 		mptcp_drop(sk, skb);
181 		pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
182 			 (unsigned long long)end_seq - (unsigned long)max_seq,
183 			 (unsigned long long)msk->rcv_wnd_sent);
184 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
185 		return;
186 	}
187 
188 	p = &msk->out_of_order_queue.rb_node;
189 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
190 	if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
191 		rb_link_node(&skb->rbnode, NULL, p);
192 		rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
193 		msk->ooo_last_skb = skb;
194 		goto end;
195 	}
196 
197 	/* with 2 subflows, adding at end of ooo queue is quite likely
198 	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
199 	 */
200 	if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
201 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
202 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
203 		return;
204 	}
205 
206 	/* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
207 	if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
208 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
209 		parent = &msk->ooo_last_skb->rbnode;
210 		p = &parent->rb_right;
211 		goto insert;
212 	}
213 
214 	/* Find place to insert this segment. Handle overlaps on the way. */
215 	parent = NULL;
216 	while (*p) {
217 		parent = *p;
218 		skb1 = rb_to_skb(parent);
219 		if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
220 			p = &parent->rb_left;
221 			continue;
222 		}
223 		if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
224 			if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
225 				/* All the bits are present. Drop. */
226 				mptcp_drop(sk, skb);
227 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
228 				return;
229 			}
230 			if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
231 				/* partial overlap:
232 				 *     |     skb      |
233 				 *  |     skb1    |
234 				 * continue traversing
235 				 */
236 			} else {
237 				/* skb's seq == skb1's seq and skb covers skb1.
238 				 * Replace skb1 with skb.
239 				 */
240 				rb_replace_node(&skb1->rbnode, &skb->rbnode,
241 						&msk->out_of_order_queue);
242 				mptcp_drop(sk, skb1);
243 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
244 				goto merge_right;
245 			}
246 		} else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
247 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
248 			return;
249 		}
250 		p = &parent->rb_right;
251 	}
252 
253 insert:
254 	/* Insert segment into RB tree. */
255 	rb_link_node(&skb->rbnode, parent, p);
256 	rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
257 
258 merge_right:
259 	/* Remove other segments covered by skb. */
260 	while ((skb1 = skb_rb_next(skb)) != NULL) {
261 		if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
262 			break;
263 		rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
264 		mptcp_drop(sk, skb1);
265 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
266 	}
267 	/* If there is no skb after us, we are the last_skb ! */
268 	if (!skb1)
269 		msk->ooo_last_skb = skb;
270 
271 end:
272 	skb_condense(skb);
273 	skb_set_owner_r(skb, sk);
274 }
275 
276 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
277 			     struct sk_buff *skb, unsigned int offset,
278 			     size_t copy_len)
279 {
280 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
281 	struct sock *sk = (struct sock *)msk;
282 	struct sk_buff *tail;
283 
284 	__skb_unlink(skb, &ssk->sk_receive_queue);
285 
286 	skb_ext_reset(skb);
287 	skb_orphan(skb);
288 
289 	/* try to fetch required memory from subflow */
290 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
291 		if (ssk->sk_forward_alloc < skb->truesize)
292 			goto drop;
293 		__sk_mem_reclaim(ssk, skb->truesize);
294 		if (!sk_rmem_schedule(sk, skb, skb->truesize))
295 			goto drop;
296 	}
297 
298 	/* the skb map_seq accounts for the skb offset:
299 	 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
300 	 * value
301 	 */
302 	MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
303 	MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
304 	MPTCP_SKB_CB(skb)->offset = offset;
305 
306 	if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
307 		/* in sequence */
308 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
309 		tail = skb_peek_tail(&sk->sk_receive_queue);
310 		if (tail && mptcp_try_coalesce(sk, tail, skb))
311 			return true;
312 
313 		skb_set_owner_r(skb, sk);
314 		__skb_queue_tail(&sk->sk_receive_queue, skb);
315 		return true;
316 	} else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
317 		mptcp_data_queue_ofo(msk, skb);
318 		return false;
319 	}
320 
321 	/* old data, keep it simple and drop the whole pkt, sender
322 	 * will retransmit as needed, if needed.
323 	 */
324 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
325 drop:
326 	mptcp_drop(sk, skb);
327 	return false;
328 }
329 
330 static void mptcp_stop_timer(struct sock *sk)
331 {
332 	struct inet_connection_sock *icsk = inet_csk(sk);
333 
334 	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
335 	mptcp_sk(sk)->timer_ival = 0;
336 }
337 
338 static void mptcp_close_wake_up(struct sock *sk)
339 {
340 	if (sock_flag(sk, SOCK_DEAD))
341 		return;
342 
343 	sk->sk_state_change(sk);
344 	if (sk->sk_shutdown == SHUTDOWN_MASK ||
345 	    sk->sk_state == TCP_CLOSE)
346 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
347 	else
348 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
349 }
350 
351 static bool mptcp_pending_data_fin_ack(struct sock *sk)
352 {
353 	struct mptcp_sock *msk = mptcp_sk(sk);
354 
355 	return !__mptcp_check_fallback(msk) &&
356 	       ((1 << sk->sk_state) &
357 		(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
358 	       msk->write_seq == READ_ONCE(msk->snd_una);
359 }
360 
361 static void mptcp_check_data_fin_ack(struct sock *sk)
362 {
363 	struct mptcp_sock *msk = mptcp_sk(sk);
364 
365 	/* Look for an acknowledged DATA_FIN */
366 	if (mptcp_pending_data_fin_ack(sk)) {
367 		mptcp_stop_timer(sk);
368 
369 		WRITE_ONCE(msk->snd_data_fin_enable, 0);
370 
371 		switch (sk->sk_state) {
372 		case TCP_FIN_WAIT1:
373 			inet_sk_state_store(sk, TCP_FIN_WAIT2);
374 			break;
375 		case TCP_CLOSING:
376 		case TCP_LAST_ACK:
377 			inet_sk_state_store(sk, TCP_CLOSE);
378 			break;
379 		}
380 
381 		mptcp_close_wake_up(sk);
382 	}
383 }
384 
385 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
386 {
387 	struct mptcp_sock *msk = mptcp_sk(sk);
388 
389 	if (READ_ONCE(msk->rcv_data_fin) &&
390 	    ((1 << sk->sk_state) &
391 	     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
392 		u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
393 
394 		if (msk->ack_seq == rcv_data_fin_seq) {
395 			if (seq)
396 				*seq = rcv_data_fin_seq;
397 
398 			return true;
399 		}
400 	}
401 
402 	return false;
403 }
404 
405 static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
406 {
407 	long tout = ssk && inet_csk(ssk)->icsk_pending ?
408 				      inet_csk(ssk)->icsk_timeout - jiffies : 0;
409 
410 	if (tout <= 0)
411 		tout = mptcp_sk(sk)->timer_ival;
412 	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
413 }
414 
415 static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
416 {
417 	struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
418 
419 	/* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
420 	if (subflow->request_join && !subflow->fully_established)
421 		return false;
422 
423 	/* only send if our side has not closed yet */
424 	return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
425 }
426 
427 static bool tcp_can_send_ack(const struct sock *ssk)
428 {
429 	return !((1 << inet_sk_state_load(ssk)) &
430 	       (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE));
431 }
432 
433 static void mptcp_send_ack(struct mptcp_sock *msk)
434 {
435 	struct mptcp_subflow_context *subflow;
436 
437 	mptcp_for_each_subflow(msk, subflow) {
438 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
439 
440 		lock_sock(ssk);
441 		if (tcp_can_send_ack(ssk))
442 			tcp_send_ack(ssk);
443 		release_sock(ssk);
444 	}
445 }
446 
447 static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
448 {
449 	int ret;
450 
451 	lock_sock(ssk);
452 	ret = tcp_can_send_ack(ssk);
453 	if (ret)
454 		tcp_cleanup_rbuf(ssk, 1);
455 	release_sock(ssk);
456 	return ret;
457 }
458 
459 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
460 {
461 	struct sock *ack_hint = READ_ONCE(msk->ack_hint);
462 	struct mptcp_subflow_context *subflow;
463 
464 	/* if the hinted ssk is still active, try to use it */
465 	if (likely(ack_hint)) {
466 		mptcp_for_each_subflow(msk, subflow) {
467 			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
468 
469 			if (ack_hint == ssk && mptcp_subflow_cleanup_rbuf(ssk))
470 				return;
471 		}
472 	}
473 
474 	/* otherwise pick the first active subflow */
475 	mptcp_for_each_subflow(msk, subflow)
476 		if (mptcp_subflow_cleanup_rbuf(mptcp_subflow_tcp_sock(subflow)))
477 			return;
478 }
479 
480 static bool mptcp_check_data_fin(struct sock *sk)
481 {
482 	struct mptcp_sock *msk = mptcp_sk(sk);
483 	u64 rcv_data_fin_seq;
484 	bool ret = false;
485 
486 	if (__mptcp_check_fallback(msk) || !msk->first)
487 		return ret;
488 
489 	/* Need to ack a DATA_FIN received from a peer while this side
490 	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
491 	 * msk->rcv_data_fin was set when parsing the incoming options
492 	 * at the subflow level and the msk lock was not held, so this
493 	 * is the first opportunity to act on the DATA_FIN and change
494 	 * the msk state.
495 	 *
496 	 * If we are caught up to the sequence number of the incoming
497 	 * DATA_FIN, send the DATA_ACK now and do state transition.  If
498 	 * not caught up, do nothing and let the recv code send DATA_ACK
499 	 * when catching up.
500 	 */
501 
502 	if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
503 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
504 		WRITE_ONCE(msk->rcv_data_fin, 0);
505 
506 		sk->sk_shutdown |= RCV_SHUTDOWN;
507 		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
508 		set_bit(MPTCP_DATA_READY, &msk->flags);
509 
510 		switch (sk->sk_state) {
511 		case TCP_ESTABLISHED:
512 			inet_sk_state_store(sk, TCP_CLOSE_WAIT);
513 			break;
514 		case TCP_FIN_WAIT1:
515 			inet_sk_state_store(sk, TCP_CLOSING);
516 			break;
517 		case TCP_FIN_WAIT2:
518 			inet_sk_state_store(sk, TCP_CLOSE);
519 			break;
520 		default:
521 			/* Other states not expected */
522 			WARN_ON_ONCE(1);
523 			break;
524 		}
525 
526 		ret = true;
527 		mptcp_set_timeout(sk, NULL);
528 		mptcp_send_ack(msk);
529 		mptcp_close_wake_up(sk);
530 	}
531 	return ret;
532 }
533 
534 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
535 					   struct sock *ssk,
536 					   unsigned int *bytes)
537 {
538 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
539 	struct sock *sk = (struct sock *)msk;
540 	unsigned int moved = 0;
541 	bool more_data_avail;
542 	struct tcp_sock *tp;
543 	bool done = false;
544 	int sk_rbuf;
545 
546 	sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
547 
548 	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
549 		int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
550 
551 		if (unlikely(ssk_rbuf > sk_rbuf)) {
552 			WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf);
553 			sk_rbuf = ssk_rbuf;
554 		}
555 	}
556 
557 	pr_debug("msk=%p ssk=%p", msk, ssk);
558 	tp = tcp_sk(ssk);
559 	do {
560 		u32 map_remaining, offset;
561 		u32 seq = tp->copied_seq;
562 		struct sk_buff *skb;
563 		bool fin;
564 
565 		/* try to move as much data as available */
566 		map_remaining = subflow->map_data_len -
567 				mptcp_subflow_get_map_offset(subflow);
568 
569 		skb = skb_peek(&ssk->sk_receive_queue);
570 		if (!skb) {
571 			/* if no data is found, a racing workqueue/recvmsg
572 			 * already processed the new data, stop here or we
573 			 * can enter an infinite loop
574 			 */
575 			if (!moved)
576 				done = true;
577 			break;
578 		}
579 
580 		if (__mptcp_check_fallback(msk)) {
581 			/* if we are running under the workqueue, TCP could have
582 			 * collapsed skbs between dummy map creation and now
583 			 * be sure to adjust the size
584 			 */
585 			map_remaining = skb->len;
586 			subflow->map_data_len = skb->len;
587 		}
588 
589 		offset = seq - TCP_SKB_CB(skb)->seq;
590 		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
591 		if (fin) {
592 			done = true;
593 			seq++;
594 		}
595 
596 		if (offset < skb->len) {
597 			size_t len = skb->len - offset;
598 
599 			if (tp->urg_data)
600 				done = true;
601 
602 			if (__mptcp_move_skb(msk, ssk, skb, offset, len))
603 				moved += len;
604 			seq += len;
605 
606 			if (WARN_ON_ONCE(map_remaining < len))
607 				break;
608 		} else {
609 			WARN_ON_ONCE(!fin);
610 			sk_eat_skb(ssk, skb);
611 			done = true;
612 		}
613 
614 		WRITE_ONCE(tp->copied_seq, seq);
615 		more_data_avail = mptcp_subflow_data_available(ssk);
616 
617 		if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) {
618 			done = true;
619 			break;
620 		}
621 	} while (more_data_avail);
622 	WRITE_ONCE(msk->ack_hint, ssk);
623 
624 	*bytes += moved;
625 	return done;
626 }
627 
628 static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
629 {
630 	struct sock *sk = (struct sock *)msk;
631 	struct sk_buff *skb, *tail;
632 	bool moved = false;
633 	struct rb_node *p;
634 	u64 end_seq;
635 
636 	p = rb_first(&msk->out_of_order_queue);
637 	pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
638 	while (p) {
639 		skb = rb_to_skb(p);
640 		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
641 			break;
642 
643 		p = rb_next(p);
644 		rb_erase(&skb->rbnode, &msk->out_of_order_queue);
645 
646 		if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
647 				      msk->ack_seq))) {
648 			mptcp_drop(sk, skb);
649 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
650 			continue;
651 		}
652 
653 		end_seq = MPTCP_SKB_CB(skb)->end_seq;
654 		tail = skb_peek_tail(&sk->sk_receive_queue);
655 		if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
656 			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
657 
658 			/* skip overlapping data, if any */
659 			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
660 				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
661 				 delta);
662 			MPTCP_SKB_CB(skb)->offset += delta;
663 			__skb_queue_tail(&sk->sk_receive_queue, skb);
664 		}
665 		msk->ack_seq = end_seq;
666 		moved = true;
667 	}
668 	return moved;
669 }
670 
671 /* In most cases we will be able to lock the mptcp socket.  If its already
672  * owned, we need to defer to the work queue to avoid ABBA deadlock.
673  */
674 static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
675 {
676 	struct sock *sk = (struct sock *)msk;
677 	unsigned int moved = 0;
678 
679 	if (inet_sk_state_load(sk) == TCP_CLOSE)
680 		return;
681 
682 	mptcp_data_lock(sk);
683 
684 	__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
685 	__mptcp_ofo_queue(msk);
686 
687 	/* If the moves have caught up with the DATA_FIN sequence number
688 	 * it's time to ack the DATA_FIN and change socket state, but
689 	 * this is not a good place to change state. Let the workqueue
690 	 * do it.
691 	 */
692 	if (mptcp_pending_data_fin(sk, NULL))
693 		mptcp_schedule_work(sk);
694 	mptcp_data_unlock(sk);
695 }
696 
697 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
698 {
699 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
700 	struct mptcp_sock *msk = mptcp_sk(sk);
701 	int sk_rbuf, ssk_rbuf;
702 	bool wake;
703 
704 	/* The peer can send data while we are shutting down this
705 	 * subflow at msk destruction time, but we must avoid enqueuing
706 	 * more data to the msk receive queue
707 	 */
708 	if (unlikely(subflow->disposable))
709 		return;
710 
711 	/* move_skbs_to_msk below can legitly clear the data_avail flag,
712 	 * but we will need later to properly woke the reader, cache its
713 	 * value
714 	 */
715 	wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
716 	if (wake)
717 		set_bit(MPTCP_DATA_READY, &msk->flags);
718 
719 	ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
720 	sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
721 	if (unlikely(ssk_rbuf > sk_rbuf))
722 		sk_rbuf = ssk_rbuf;
723 
724 	/* over limit? can't append more skbs to msk */
725 	if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
726 		goto wake;
727 
728 	move_skbs_to_msk(msk, ssk);
729 
730 wake:
731 	if (wake)
732 		sk->sk_data_ready(sk);
733 }
734 
735 void __mptcp_flush_join_list(struct mptcp_sock *msk)
736 {
737 	if (likely(list_empty(&msk->join_list)))
738 		return;
739 
740 	spin_lock_bh(&msk->join_list_lock);
741 	list_splice_tail_init(&msk->join_list, &msk->conn_list);
742 	spin_unlock_bh(&msk->join_list_lock);
743 }
744 
745 static bool mptcp_timer_pending(struct sock *sk)
746 {
747 	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
748 }
749 
750 static void mptcp_reset_timer(struct sock *sk)
751 {
752 	struct inet_connection_sock *icsk = inet_csk(sk);
753 	unsigned long tout;
754 
755 	/* prevent rescheduling on close */
756 	if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
757 		return;
758 
759 	/* should never be called with mptcp level timer cleared */
760 	tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
761 	if (WARN_ON_ONCE(!tout))
762 		tout = TCP_RTO_MIN;
763 	sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
764 }
765 
766 bool mptcp_schedule_work(struct sock *sk)
767 {
768 	if (inet_sk_state_load(sk) != TCP_CLOSE &&
769 	    schedule_work(&mptcp_sk(sk)->work)) {
770 		/* each subflow already holds a reference to the sk, and the
771 		 * workqueue is invoked by a subflow, so sk can't go away here.
772 		 */
773 		sock_hold(sk);
774 		return true;
775 	}
776 	return false;
777 }
778 
779 void mptcp_subflow_eof(struct sock *sk)
780 {
781 	if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags))
782 		mptcp_schedule_work(sk);
783 }
784 
785 static void mptcp_check_for_eof(struct mptcp_sock *msk)
786 {
787 	struct mptcp_subflow_context *subflow;
788 	struct sock *sk = (struct sock *)msk;
789 	int receivers = 0;
790 
791 	mptcp_for_each_subflow(msk, subflow)
792 		receivers += !subflow->rx_eof;
793 	if (receivers)
794 		return;
795 
796 	if (!(sk->sk_shutdown & RCV_SHUTDOWN)) {
797 		/* hopefully temporary hack: propagate shutdown status
798 		 * to msk, when all subflows agree on it
799 		 */
800 		sk->sk_shutdown |= RCV_SHUTDOWN;
801 
802 		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
803 		set_bit(MPTCP_DATA_READY, &msk->flags);
804 		sk->sk_data_ready(sk);
805 	}
806 
807 	switch (sk->sk_state) {
808 	case TCP_ESTABLISHED:
809 		inet_sk_state_store(sk, TCP_CLOSE_WAIT);
810 		break;
811 	case TCP_FIN_WAIT1:
812 		inet_sk_state_store(sk, TCP_CLOSING);
813 		break;
814 	case TCP_FIN_WAIT2:
815 		inet_sk_state_store(sk, TCP_CLOSE);
816 		break;
817 	default:
818 		return;
819 	}
820 	mptcp_close_wake_up(sk);
821 }
822 
823 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
824 {
825 	struct mptcp_subflow_context *subflow;
826 	struct sock *sk = (struct sock *)msk;
827 
828 	sock_owned_by_me(sk);
829 
830 	mptcp_for_each_subflow(msk, subflow) {
831 		if (subflow->data_avail)
832 			return mptcp_subflow_tcp_sock(subflow);
833 	}
834 
835 	return NULL;
836 }
837 
838 static bool mptcp_skb_can_collapse_to(u64 write_seq,
839 				      const struct sk_buff *skb,
840 				      const struct mptcp_ext *mpext)
841 {
842 	if (!tcp_skb_can_collapse_to(skb))
843 		return false;
844 
845 	/* can collapse only if MPTCP level sequence is in order and this
846 	 * mapping has not been xmitted yet
847 	 */
848 	return mpext && mpext->data_seq + mpext->data_len == write_seq &&
849 	       !mpext->frozen;
850 }
851 
852 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
853 				       const struct page_frag *pfrag,
854 				       const struct mptcp_data_frag *df)
855 {
856 	return df && pfrag->page == df->page &&
857 		pfrag->size - pfrag->offset > 0 &&
858 		df->data_seq + df->data_len == msk->write_seq;
859 }
860 
861 static int mptcp_wmem_with_overhead(struct sock *sk, int size)
862 {
863 	struct mptcp_sock *msk = mptcp_sk(sk);
864 	int ret, skbs;
865 
866 	ret = size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
867 	skbs = (msk->tx_pending_data + size) / msk->size_goal_cache;
868 	if (skbs < msk->skb_tx_cache.qlen)
869 		return ret;
870 
871 	return ret + (skbs - msk->skb_tx_cache.qlen) * SKB_TRUESIZE(MAX_TCP_HEADER);
872 }
873 
874 static void __mptcp_wmem_reserve(struct sock *sk, int size)
875 {
876 	int amount = mptcp_wmem_with_overhead(sk, size);
877 	struct mptcp_sock *msk = mptcp_sk(sk);
878 
879 	WARN_ON_ONCE(msk->wmem_reserved);
880 	if (amount <= sk->sk_forward_alloc)
881 		goto reserve;
882 
883 	/* under memory pressure try to reserve at most a single page
884 	 * otherwise try to reserve the full estimate and fallback
885 	 * to a single page before entering the error path
886 	 */
887 	if ((tcp_under_memory_pressure(sk) && amount > PAGE_SIZE) ||
888 	    !sk_wmem_schedule(sk, amount)) {
889 		if (amount <= PAGE_SIZE)
890 			goto nomem;
891 
892 		amount = PAGE_SIZE;
893 		if (!sk_wmem_schedule(sk, amount))
894 			goto nomem;
895 	}
896 
897 reserve:
898 	msk->wmem_reserved = amount;
899 	sk->sk_forward_alloc -= amount;
900 	return;
901 
902 nomem:
903 	/* we will wait for memory on next allocation */
904 	msk->wmem_reserved = -1;
905 }
906 
907 static void __mptcp_update_wmem(struct sock *sk)
908 {
909 	struct mptcp_sock *msk = mptcp_sk(sk);
910 
911 	if (!msk->wmem_reserved)
912 		return;
913 
914 	if (msk->wmem_reserved < 0)
915 		msk->wmem_reserved = 0;
916 	if (msk->wmem_reserved > 0) {
917 		sk->sk_forward_alloc += msk->wmem_reserved;
918 		msk->wmem_reserved = 0;
919 	}
920 }
921 
922 static bool mptcp_wmem_alloc(struct sock *sk, int size)
923 {
924 	struct mptcp_sock *msk = mptcp_sk(sk);
925 
926 	/* check for pre-existing error condition */
927 	if (msk->wmem_reserved < 0)
928 		return false;
929 
930 	if (msk->wmem_reserved >= size)
931 		goto account;
932 
933 	mptcp_data_lock(sk);
934 	if (!sk_wmem_schedule(sk, size)) {
935 		mptcp_data_unlock(sk);
936 		return false;
937 	}
938 
939 	sk->sk_forward_alloc -= size;
940 	msk->wmem_reserved += size;
941 	mptcp_data_unlock(sk);
942 
943 account:
944 	msk->wmem_reserved -= size;
945 	return true;
946 }
947 
948 static void mptcp_wmem_uncharge(struct sock *sk, int size)
949 {
950 	struct mptcp_sock *msk = mptcp_sk(sk);
951 
952 	if (msk->wmem_reserved < 0)
953 		msk->wmem_reserved = 0;
954 	msk->wmem_reserved += size;
955 }
956 
957 static void mptcp_mem_reclaim_partial(struct sock *sk)
958 {
959 	struct mptcp_sock *msk = mptcp_sk(sk);
960 
961 	/* if we are experiencing a transint allocation error,
962 	 * the forward allocation memory has been already
963 	 * released
964 	 */
965 	if (msk->wmem_reserved < 0)
966 		return;
967 
968 	mptcp_data_lock(sk);
969 	sk->sk_forward_alloc += msk->wmem_reserved;
970 	sk_mem_reclaim_partial(sk);
971 	msk->wmem_reserved = sk->sk_forward_alloc;
972 	sk->sk_forward_alloc = 0;
973 	mptcp_data_unlock(sk);
974 }
975 
976 static void dfrag_uncharge(struct sock *sk, int len)
977 {
978 	sk_mem_uncharge(sk, len);
979 	sk_wmem_queued_add(sk, -len);
980 }
981 
982 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
983 {
984 	int len = dfrag->data_len + dfrag->overhead;
985 
986 	list_del(&dfrag->list);
987 	dfrag_uncharge(sk, len);
988 	put_page(dfrag->page);
989 }
990 
991 static void __mptcp_clean_una(struct sock *sk)
992 {
993 	struct mptcp_sock *msk = mptcp_sk(sk);
994 	struct mptcp_data_frag *dtmp, *dfrag;
995 	bool cleaned = false;
996 	u64 snd_una;
997 
998 	/* on fallback we just need to ignore snd_una, as this is really
999 	 * plain TCP
1000 	 */
1001 	if (__mptcp_check_fallback(msk))
1002 		msk->snd_una = READ_ONCE(msk->snd_nxt);
1003 
1004 	snd_una = msk->snd_una;
1005 	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1006 		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1007 			break;
1008 
1009 		if (WARN_ON_ONCE(dfrag == msk->first_pending))
1010 			break;
1011 		dfrag_clear(sk, dfrag);
1012 		cleaned = true;
1013 	}
1014 
1015 	dfrag = mptcp_rtx_head(sk);
1016 	if (dfrag && after64(snd_una, dfrag->data_seq)) {
1017 		u64 delta = snd_una - dfrag->data_seq;
1018 
1019 		if (WARN_ON_ONCE(delta > dfrag->already_sent))
1020 			goto out;
1021 
1022 		dfrag->data_seq += delta;
1023 		dfrag->offset += delta;
1024 		dfrag->data_len -= delta;
1025 		dfrag->already_sent -= delta;
1026 
1027 		dfrag_uncharge(sk, delta);
1028 		cleaned = true;
1029 	}
1030 
1031 out:
1032 	if (cleaned) {
1033 		if (tcp_under_memory_pressure(sk)) {
1034 			__mptcp_update_wmem(sk);
1035 			sk_mem_reclaim_partial(sk);
1036 		}
1037 
1038 		if (sk_stream_is_writeable(sk)) {
1039 			/* pairs with memory barrier in mptcp_poll */
1040 			smp_mb();
1041 			if (test_and_clear_bit(MPTCP_NOSPACE, &msk->flags))
1042 				sk_stream_write_space(sk);
1043 		}
1044 	}
1045 
1046 	if (snd_una == READ_ONCE(msk->snd_nxt)) {
1047 		if (msk->timer_ival)
1048 			mptcp_stop_timer(sk);
1049 	} else {
1050 		mptcp_reset_timer(sk);
1051 	}
1052 }
1053 
1054 static void mptcp_enter_memory_pressure(struct sock *sk)
1055 {
1056 	struct mptcp_subflow_context *subflow;
1057 	struct mptcp_sock *msk = mptcp_sk(sk);
1058 	bool first = true;
1059 
1060 	sk_stream_moderate_sndbuf(sk);
1061 	mptcp_for_each_subflow(msk, subflow) {
1062 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1063 
1064 		if (first)
1065 			tcp_enter_memory_pressure(ssk);
1066 		sk_stream_moderate_sndbuf(ssk);
1067 		first = false;
1068 	}
1069 }
1070 
1071 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1072  * data
1073  */
1074 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1075 {
1076 	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
1077 					pfrag, sk->sk_allocation)))
1078 		return true;
1079 
1080 	mptcp_enter_memory_pressure(sk);
1081 	return false;
1082 }
1083 
1084 static struct mptcp_data_frag *
1085 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1086 		      int orig_offset)
1087 {
1088 	int offset = ALIGN(orig_offset, sizeof(long));
1089 	struct mptcp_data_frag *dfrag;
1090 
1091 	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
1092 	dfrag->data_len = 0;
1093 	dfrag->data_seq = msk->write_seq;
1094 	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
1095 	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
1096 	dfrag->already_sent = 0;
1097 	dfrag->page = pfrag->page;
1098 
1099 	return dfrag;
1100 }
1101 
1102 struct mptcp_sendmsg_info {
1103 	int mss_now;
1104 	int size_goal;
1105 	u16 limit;
1106 	u16 sent;
1107 	unsigned int flags;
1108 };
1109 
1110 static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq,
1111 				    int avail_size)
1112 {
1113 	u64 window_end = mptcp_wnd_end(msk);
1114 
1115 	if (__mptcp_check_fallback(msk))
1116 		return avail_size;
1117 
1118 	if (!before64(data_seq + avail_size, window_end)) {
1119 		u64 allowed_size = window_end - data_seq;
1120 
1121 		return min_t(unsigned int, allowed_size, avail_size);
1122 	}
1123 
1124 	return avail_size;
1125 }
1126 
1127 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
1128 {
1129 	struct skb_ext *mpext = __skb_ext_alloc(gfp);
1130 
1131 	if (!mpext)
1132 		return false;
1133 	__skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
1134 	return true;
1135 }
1136 
1137 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
1138 {
1139 	struct sk_buff *skb;
1140 
1141 	skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
1142 	if (likely(skb)) {
1143 		if (likely(__mptcp_add_ext(skb, gfp))) {
1144 			skb_reserve(skb, MAX_TCP_HEADER);
1145 			skb->reserved_tailroom = skb->end - skb->tail;
1146 			return skb;
1147 		}
1148 		__kfree_skb(skb);
1149 	} else {
1150 		mptcp_enter_memory_pressure(sk);
1151 	}
1152 	return NULL;
1153 }
1154 
1155 static bool mptcp_tx_cache_refill(struct sock *sk, int size,
1156 				  struct sk_buff_head *skbs, int *total_ts)
1157 {
1158 	struct mptcp_sock *msk = mptcp_sk(sk);
1159 	struct sk_buff *skb;
1160 	int space_needed;
1161 
1162 	if (unlikely(tcp_under_memory_pressure(sk))) {
1163 		mptcp_mem_reclaim_partial(sk);
1164 
1165 		/* under pressure pre-allocate at most a single skb */
1166 		if (msk->skb_tx_cache.qlen)
1167 			return true;
1168 		space_needed = msk->size_goal_cache;
1169 	} else {
1170 		space_needed = msk->tx_pending_data + size -
1171 			       msk->skb_tx_cache.qlen * msk->size_goal_cache;
1172 	}
1173 
1174 	while (space_needed > 0) {
1175 		skb = __mptcp_do_alloc_tx_skb(sk, sk->sk_allocation);
1176 		if (unlikely(!skb)) {
1177 			/* under memory pressure, try to pass the caller a
1178 			 * single skb to allow forward progress
1179 			 */
1180 			while (skbs->qlen > 1) {
1181 				skb = __skb_dequeue_tail(skbs);
1182 				__kfree_skb(skb);
1183 			}
1184 			return skbs->qlen > 0;
1185 		}
1186 
1187 		*total_ts += skb->truesize;
1188 		__skb_queue_tail(skbs, skb);
1189 		space_needed -= msk->size_goal_cache;
1190 	}
1191 	return true;
1192 }
1193 
1194 static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1195 {
1196 	struct mptcp_sock *msk = mptcp_sk(sk);
1197 	struct sk_buff *skb;
1198 
1199 	if (ssk->sk_tx_skb_cache) {
1200 		skb = ssk->sk_tx_skb_cache;
1201 		if (unlikely(!skb_ext_find(skb, SKB_EXT_MPTCP) &&
1202 			     !__mptcp_add_ext(skb, gfp)))
1203 			return false;
1204 		return true;
1205 	}
1206 
1207 	skb = skb_peek(&msk->skb_tx_cache);
1208 	if (skb) {
1209 		if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1210 			skb = __skb_dequeue(&msk->skb_tx_cache);
1211 			if (WARN_ON_ONCE(!skb))
1212 				return false;
1213 
1214 			mptcp_wmem_uncharge(sk, skb->truesize);
1215 			ssk->sk_tx_skb_cache = skb;
1216 			return true;
1217 		}
1218 
1219 		/* over memory limit, no point to try to allocate a new skb */
1220 		return false;
1221 	}
1222 
1223 	skb = __mptcp_do_alloc_tx_skb(sk, gfp);
1224 	if (!skb)
1225 		return false;
1226 
1227 	if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1228 		ssk->sk_tx_skb_cache = skb;
1229 		return true;
1230 	}
1231 	kfree_skb(skb);
1232 	return false;
1233 }
1234 
1235 static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
1236 {
1237 	return !ssk->sk_tx_skb_cache &&
1238 	       !skb_peek(&mptcp_sk(sk)->skb_tx_cache) &&
1239 	       tcp_under_memory_pressure(sk);
1240 }
1241 
1242 static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk)
1243 {
1244 	if (unlikely(mptcp_must_reclaim_memory(sk, ssk)))
1245 		mptcp_mem_reclaim_partial(sk);
1246 	return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation);
1247 }
1248 
1249 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1250 			      struct mptcp_data_frag *dfrag,
1251 			      struct mptcp_sendmsg_info *info)
1252 {
1253 	u64 data_seq = dfrag->data_seq + info->sent;
1254 	struct mptcp_sock *msk = mptcp_sk(sk);
1255 	bool zero_window_probe = false;
1256 	struct mptcp_ext *mpext = NULL;
1257 	struct sk_buff *skb, *tail;
1258 	bool can_collapse = false;
1259 	int size_bias = 0;
1260 	int avail_size;
1261 	size_t ret = 0;
1262 
1263 	pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d",
1264 		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1265 
1266 	/* compute send limit */
1267 	info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1268 	avail_size = info->size_goal;
1269 	msk->size_goal_cache = info->size_goal;
1270 	skb = tcp_write_queue_tail(ssk);
1271 	if (skb) {
1272 		/* Limit the write to the size available in the
1273 		 * current skb, if any, so that we create at most a new skb.
1274 		 * Explicitly tells TCP internals to avoid collapsing on later
1275 		 * queue management operation, to avoid breaking the ext <->
1276 		 * SSN association set here
1277 		 */
1278 		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
1279 		can_collapse = (info->size_goal - skb->len > 0) &&
1280 			 mptcp_skb_can_collapse_to(data_seq, skb, mpext);
1281 		if (!can_collapse) {
1282 			TCP_SKB_CB(skb)->eor = 1;
1283 		} else {
1284 			size_bias = skb->len;
1285 			avail_size = info->size_goal - skb->len;
1286 		}
1287 	}
1288 
1289 	/* Zero window and all data acked? Probe. */
1290 	avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
1291 	if (avail_size == 0) {
1292 		u64 snd_una = READ_ONCE(msk->snd_una);
1293 
1294 		if (skb || snd_una != msk->snd_nxt)
1295 			return 0;
1296 		zero_window_probe = true;
1297 		data_seq = snd_una - 1;
1298 		avail_size = 1;
1299 	}
1300 
1301 	if (WARN_ON_ONCE(info->sent > info->limit ||
1302 			 info->limit > dfrag->data_len))
1303 		return 0;
1304 
1305 	ret = info->limit - info->sent;
1306 	tail = tcp_build_frag(ssk, avail_size + size_bias, info->flags,
1307 			      dfrag->page, dfrag->offset + info->sent, &ret);
1308 	if (!tail) {
1309 		tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk));
1310 		return -ENOMEM;
1311 	}
1312 
1313 	/* if the tail skb is still the cached one, collapsing really happened.
1314 	 */
1315 	if (skb == tail) {
1316 		TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
1317 		mpext->data_len += ret;
1318 		WARN_ON_ONCE(!can_collapse);
1319 		WARN_ON_ONCE(zero_window_probe);
1320 		goto out;
1321 	}
1322 
1323 	mpext = skb_ext_find(tail, SKB_EXT_MPTCP);
1324 	if (WARN_ON_ONCE(!mpext)) {
1325 		/* should never reach here, stream corrupted */
1326 		return -EINVAL;
1327 	}
1328 
1329 	memset(mpext, 0, sizeof(*mpext));
1330 	mpext->data_seq = data_seq;
1331 	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1332 	mpext->data_len = ret;
1333 	mpext->use_map = 1;
1334 	mpext->dsn64 = 1;
1335 
1336 	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
1337 		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1338 		 mpext->dsn64);
1339 
1340 	if (zero_window_probe) {
1341 		mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
1342 		mpext->frozen = 1;
1343 		ret = 0;
1344 		tcp_push_pending_frames(ssk);
1345 	}
1346 out:
1347 	mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
1348 	return ret;
1349 }
1350 
1351 #define MPTCP_SEND_BURST_SIZE		((1 << 16) - \
1352 					 sizeof(struct tcphdr) - \
1353 					 MAX_TCP_OPTION_SPACE - \
1354 					 sizeof(struct ipv6hdr) - \
1355 					 sizeof(struct frag_hdr))
1356 
1357 struct subflow_send_info {
1358 	struct sock *ssk;
1359 	u64 ratio;
1360 };
1361 
1362 static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
1363 					   u32 *sndbuf)
1364 {
1365 	struct subflow_send_info send_info[2];
1366 	struct mptcp_subflow_context *subflow;
1367 	int i, nr_active = 0;
1368 	struct sock *ssk;
1369 	u64 ratio;
1370 	u32 pace;
1371 
1372 	sock_owned_by_me((struct sock *)msk);
1373 
1374 	*sndbuf = 0;
1375 	if (__mptcp_check_fallback(msk)) {
1376 		if (!msk->first)
1377 			return NULL;
1378 		*sndbuf = msk->first->sk_sndbuf;
1379 		return sk_stream_memory_free(msk->first) ? msk->first : NULL;
1380 	}
1381 
1382 	/* re-use last subflow, if the burst allow that */
1383 	if (msk->last_snd && msk->snd_burst > 0 &&
1384 	    sk_stream_memory_free(msk->last_snd) &&
1385 	    mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
1386 		mptcp_for_each_subflow(msk, subflow) {
1387 			ssk =  mptcp_subflow_tcp_sock(subflow);
1388 			*sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
1389 		}
1390 		return msk->last_snd;
1391 	}
1392 
1393 	/* pick the subflow with the lower wmem/wspace ratio */
1394 	for (i = 0; i < 2; ++i) {
1395 		send_info[i].ssk = NULL;
1396 		send_info[i].ratio = -1;
1397 	}
1398 	mptcp_for_each_subflow(msk, subflow) {
1399 		ssk =  mptcp_subflow_tcp_sock(subflow);
1400 		if (!mptcp_subflow_active(subflow))
1401 			continue;
1402 
1403 		nr_active += !subflow->backup;
1404 		*sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
1405 		if (!sk_stream_memory_free(subflow->tcp_sock))
1406 			continue;
1407 
1408 		pace = READ_ONCE(ssk->sk_pacing_rate);
1409 		if (!pace)
1410 			continue;
1411 
1412 		ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
1413 				pace);
1414 		if (ratio < send_info[subflow->backup].ratio) {
1415 			send_info[subflow->backup].ssk = ssk;
1416 			send_info[subflow->backup].ratio = ratio;
1417 		}
1418 	}
1419 
1420 	pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld",
1421 		 msk, nr_active, send_info[0].ssk, send_info[0].ratio,
1422 		 send_info[1].ssk, send_info[1].ratio);
1423 
1424 	/* pick the best backup if no other subflow is active */
1425 	if (!nr_active)
1426 		send_info[0].ssk = send_info[1].ssk;
1427 
1428 	if (send_info[0].ssk) {
1429 		msk->last_snd = send_info[0].ssk;
1430 		msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
1431 				       sk_stream_wspace(msk->last_snd));
1432 		return msk->last_snd;
1433 	}
1434 	return NULL;
1435 }
1436 
1437 static void mptcp_push_release(struct sock *sk, struct sock *ssk,
1438 			       struct mptcp_sendmsg_info *info)
1439 {
1440 	mptcp_set_timeout(sk, ssk);
1441 	tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1442 	release_sock(ssk);
1443 }
1444 
1445 static void mptcp_push_pending(struct sock *sk, unsigned int flags)
1446 {
1447 	struct sock *prev_ssk = NULL, *ssk = NULL;
1448 	struct mptcp_sock *msk = mptcp_sk(sk);
1449 	struct mptcp_sendmsg_info info = {
1450 				.flags = flags,
1451 	};
1452 	struct mptcp_data_frag *dfrag;
1453 	int len, copied = 0;
1454 	u32 sndbuf;
1455 
1456 	while ((dfrag = mptcp_send_head(sk))) {
1457 		info.sent = dfrag->already_sent;
1458 		info.limit = dfrag->data_len;
1459 		len = dfrag->data_len - dfrag->already_sent;
1460 		while (len > 0) {
1461 			int ret = 0;
1462 
1463 			prev_ssk = ssk;
1464 			__mptcp_flush_join_list(msk);
1465 			ssk = mptcp_subflow_get_send(msk, &sndbuf);
1466 
1467 			/* do auto tuning */
1468 			if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
1469 			    sndbuf > READ_ONCE(sk->sk_sndbuf))
1470 				WRITE_ONCE(sk->sk_sndbuf, sndbuf);
1471 
1472 			/* try to keep the subflow socket lock across
1473 			 * consecutive xmit on the same socket
1474 			 */
1475 			if (ssk != prev_ssk && prev_ssk)
1476 				mptcp_push_release(sk, prev_ssk, &info);
1477 			if (!ssk)
1478 				goto out;
1479 
1480 			if (ssk != prev_ssk || !prev_ssk)
1481 				lock_sock(ssk);
1482 
1483 			/* keep it simple and always provide a new skb for the
1484 			 * subflow, even if we will not use it when collapsing
1485 			 * on the pending one
1486 			 */
1487 			if (!mptcp_alloc_tx_skb(sk, ssk)) {
1488 				mptcp_push_release(sk, ssk, &info);
1489 				goto out;
1490 			}
1491 
1492 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
1493 			if (ret <= 0) {
1494 				mptcp_push_release(sk, ssk, &info);
1495 				goto out;
1496 			}
1497 
1498 			info.sent += ret;
1499 			dfrag->already_sent += ret;
1500 			msk->snd_nxt += ret;
1501 			msk->snd_burst -= ret;
1502 			msk->tx_pending_data -= ret;
1503 			copied += ret;
1504 			len -= ret;
1505 		}
1506 		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1507 	}
1508 
1509 	/* at this point we held the socket lock for the last subflow we used */
1510 	if (ssk)
1511 		mptcp_push_release(sk, ssk, &info);
1512 
1513 out:
1514 	if (copied) {
1515 		/* start the timer, if it's not pending */
1516 		if (!mptcp_timer_pending(sk))
1517 			mptcp_reset_timer(sk);
1518 		__mptcp_check_send_data_fin(sk);
1519 	}
1520 }
1521 
1522 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
1523 {
1524 	struct mptcp_sock *msk = mptcp_sk(sk);
1525 	struct mptcp_sendmsg_info info;
1526 	struct mptcp_data_frag *dfrag;
1527 	int len, copied = 0;
1528 
1529 	info.flags = 0;
1530 	while ((dfrag = mptcp_send_head(sk))) {
1531 		info.sent = dfrag->already_sent;
1532 		info.limit = dfrag->data_len;
1533 		len = dfrag->data_len - dfrag->already_sent;
1534 		while (len > 0) {
1535 			int ret = 0;
1536 
1537 			/* do auto tuning */
1538 			if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
1539 			    ssk->sk_sndbuf > READ_ONCE(sk->sk_sndbuf))
1540 				WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf);
1541 
1542 			if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) {
1543 				__mptcp_update_wmem(sk);
1544 				sk_mem_reclaim_partial(sk);
1545 			}
1546 			if (!__mptcp_alloc_tx_skb(sk, ssk, GFP_ATOMIC))
1547 				goto out;
1548 
1549 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
1550 			if (ret <= 0)
1551 				goto out;
1552 
1553 			info.sent += ret;
1554 			dfrag->already_sent += ret;
1555 			msk->snd_nxt += ret;
1556 			msk->snd_burst -= ret;
1557 			msk->tx_pending_data -= ret;
1558 			copied += ret;
1559 			len -= ret;
1560 		}
1561 		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1562 	}
1563 
1564 out:
1565 	/* __mptcp_alloc_tx_skb could have released some wmem and we are
1566 	 * not going to flush it via release_sock()
1567 	 */
1568 	__mptcp_update_wmem(sk);
1569 	if (copied) {
1570 		mptcp_set_timeout(sk, ssk);
1571 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1572 			 info.size_goal);
1573 		if (msk->snd_data_fin_enable &&
1574 		    msk->snd_nxt + 1 == msk->write_seq)
1575 			mptcp_schedule_work(sk);
1576 	}
1577 }
1578 
1579 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1580 {
1581 	struct mptcp_sock *msk = mptcp_sk(sk);
1582 	struct page_frag *pfrag;
1583 	size_t copied = 0;
1584 	int ret = 0;
1585 	long timeo;
1586 
1587 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
1588 		return -EOPNOTSUPP;
1589 
1590 	mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, len));
1591 
1592 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1593 
1594 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1595 		ret = sk_stream_wait_connect(sk, &timeo);
1596 		if (ret)
1597 			goto out;
1598 	}
1599 
1600 	pfrag = sk_page_frag(sk);
1601 
1602 	while (msg_data_left(msg)) {
1603 		int total_ts, frag_truesize = 0;
1604 		struct mptcp_data_frag *dfrag;
1605 		struct sk_buff_head skbs;
1606 		bool dfrag_collapsed;
1607 		size_t psize, offset;
1608 
1609 		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
1610 			ret = -EPIPE;
1611 			goto out;
1612 		}
1613 
1614 		/* reuse tail pfrag, if possible, or carve a new one from the
1615 		 * page allocator
1616 		 */
1617 		dfrag = mptcp_pending_tail(sk);
1618 		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1619 		if (!dfrag_collapsed) {
1620 			if (!sk_stream_memory_free(sk))
1621 				goto wait_for_memory;
1622 
1623 			if (!mptcp_page_frag_refill(sk, pfrag))
1624 				goto wait_for_memory;
1625 
1626 			dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1627 			frag_truesize = dfrag->overhead;
1628 		}
1629 
1630 		/* we do not bound vs wspace, to allow a single packet.
1631 		 * memory accounting will prevent execessive memory usage
1632 		 * anyway
1633 		 */
1634 		offset = dfrag->offset + dfrag->data_len;
1635 		psize = pfrag->size - offset;
1636 		psize = min_t(size_t, psize, msg_data_left(msg));
1637 		total_ts = psize + frag_truesize;
1638 		__skb_queue_head_init(&skbs);
1639 		if (!mptcp_tx_cache_refill(sk, psize, &skbs, &total_ts))
1640 			goto wait_for_memory;
1641 
1642 		if (!mptcp_wmem_alloc(sk, total_ts)) {
1643 			__skb_queue_purge(&skbs);
1644 			goto wait_for_memory;
1645 		}
1646 
1647 		skb_queue_splice_tail(&skbs, &msk->skb_tx_cache);
1648 		if (copy_page_from_iter(dfrag->page, offset, psize,
1649 					&msg->msg_iter) != psize) {
1650 			mptcp_wmem_uncharge(sk, psize + frag_truesize);
1651 			ret = -EFAULT;
1652 			goto out;
1653 		}
1654 
1655 		/* data successfully copied into the write queue */
1656 		copied += psize;
1657 		dfrag->data_len += psize;
1658 		frag_truesize += psize;
1659 		pfrag->offset += frag_truesize;
1660 		WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
1661 
1662 		/* charge data on mptcp pending queue to the msk socket
1663 		 * Note: we charge such data both to sk and ssk
1664 		 */
1665 		sk_wmem_queued_add(sk, frag_truesize);
1666 		if (!dfrag_collapsed) {
1667 			get_page(dfrag->page);
1668 			list_add_tail(&dfrag->list, &msk->rtx_queue);
1669 			if (!msk->first_pending)
1670 				WRITE_ONCE(msk->first_pending, dfrag);
1671 		}
1672 		pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk,
1673 			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
1674 			 !dfrag_collapsed);
1675 
1676 		continue;
1677 
1678 wait_for_memory:
1679 		set_bit(MPTCP_NOSPACE, &msk->flags);
1680 		mptcp_push_pending(sk, msg->msg_flags);
1681 		ret = sk_stream_wait_memory(sk, &timeo);
1682 		if (ret)
1683 			goto out;
1684 	}
1685 
1686 	if (copied) {
1687 		msk->tx_pending_data += copied;
1688 		mptcp_push_pending(sk, msg->msg_flags);
1689 	}
1690 
1691 out:
1692 	release_sock(sk);
1693 	return copied ? : ret;
1694 }
1695 
1696 static void mptcp_wait_data(struct sock *sk, long *timeo)
1697 {
1698 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1699 	struct mptcp_sock *msk = mptcp_sk(sk);
1700 
1701 	add_wait_queue(sk_sleep(sk), &wait);
1702 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1703 
1704 	sk_wait_event(sk, timeo,
1705 		      test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
1706 
1707 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1708 	remove_wait_queue(sk_sleep(sk), &wait);
1709 }
1710 
1711 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
1712 				struct msghdr *msg,
1713 				size_t len)
1714 {
1715 	struct sk_buff *skb;
1716 	int copied = 0;
1717 
1718 	while ((skb = skb_peek(&msk->receive_queue)) != NULL) {
1719 		u32 offset = MPTCP_SKB_CB(skb)->offset;
1720 		u32 data_len = skb->len - offset;
1721 		u32 count = min_t(size_t, len - copied, data_len);
1722 		int err;
1723 
1724 		err = skb_copy_datagram_msg(skb, offset, msg, count);
1725 		if (unlikely(err < 0)) {
1726 			if (!copied)
1727 				return err;
1728 			break;
1729 		}
1730 
1731 		copied += count;
1732 
1733 		if (count < data_len) {
1734 			MPTCP_SKB_CB(skb)->offset += count;
1735 			break;
1736 		}
1737 
1738 		/* we will bulk release the skb memory later */
1739 		skb->destructor = NULL;
1740 		msk->rmem_released += skb->truesize;
1741 		__skb_unlink(skb, &msk->receive_queue);
1742 		__kfree_skb(skb);
1743 
1744 		if (copied >= len)
1745 			break;
1746 	}
1747 
1748 	return copied;
1749 }
1750 
1751 /* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
1752  *
1753  * Only difference: Use highest rtt estimate of the subflows in use.
1754  */
1755 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1756 {
1757 	struct mptcp_subflow_context *subflow;
1758 	struct sock *sk = (struct sock *)msk;
1759 	u32 time, advmss = 1;
1760 	u64 rtt_us, mstamp;
1761 
1762 	sock_owned_by_me(sk);
1763 
1764 	if (copied <= 0)
1765 		return;
1766 
1767 	msk->rcvq_space.copied += copied;
1768 
1769 	mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
1770 	time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1771 
1772 	rtt_us = msk->rcvq_space.rtt_us;
1773 	if (rtt_us && time < (rtt_us >> 3))
1774 		return;
1775 
1776 	rtt_us = 0;
1777 	mptcp_for_each_subflow(msk, subflow) {
1778 		const struct tcp_sock *tp;
1779 		u64 sf_rtt_us;
1780 		u32 sf_advmss;
1781 
1782 		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
1783 
1784 		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
1785 		sf_advmss = READ_ONCE(tp->advmss);
1786 
1787 		rtt_us = max(sf_rtt_us, rtt_us);
1788 		advmss = max(sf_advmss, advmss);
1789 	}
1790 
1791 	msk->rcvq_space.rtt_us = rtt_us;
1792 	if (time < (rtt_us >> 3) || rtt_us == 0)
1793 		return;
1794 
1795 	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
1796 		goto new_measure;
1797 
1798 	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
1799 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
1800 		int rcvmem, rcvbuf;
1801 		u64 rcvwin, grow;
1802 
1803 		rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
1804 
1805 		grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
1806 
1807 		do_div(grow, msk->rcvq_space.space);
1808 		rcvwin += (grow << 1);
1809 
1810 		rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
1811 		while (tcp_win_from_space(sk, rcvmem) < advmss)
1812 			rcvmem += 128;
1813 
1814 		do_div(rcvwin, advmss);
1815 		rcvbuf = min_t(u64, rcvwin * rcvmem,
1816 			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
1817 
1818 		if (rcvbuf > sk->sk_rcvbuf) {
1819 			u32 window_clamp;
1820 
1821 			window_clamp = tcp_win_from_space(sk, rcvbuf);
1822 			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
1823 
1824 			/* Make subflows follow along.  If we do not do this, we
1825 			 * get drops at subflow level if skbs can't be moved to
1826 			 * the mptcp rx queue fast enough (announced rcv_win can
1827 			 * exceed ssk->sk_rcvbuf).
1828 			 */
1829 			mptcp_for_each_subflow(msk, subflow) {
1830 				struct sock *ssk;
1831 				bool slow;
1832 
1833 				ssk = mptcp_subflow_tcp_sock(subflow);
1834 				slow = lock_sock_fast(ssk);
1835 				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
1836 				tcp_sk(ssk)->window_clamp = window_clamp;
1837 				tcp_cleanup_rbuf(ssk, 1);
1838 				unlock_sock_fast(ssk, slow);
1839 			}
1840 		}
1841 	}
1842 
1843 	msk->rcvq_space.space = msk->rcvq_space.copied;
1844 new_measure:
1845 	msk->rcvq_space.copied = 0;
1846 	msk->rcvq_space.time = mstamp;
1847 }
1848 
1849 static void __mptcp_update_rmem(struct sock *sk)
1850 {
1851 	struct mptcp_sock *msk = mptcp_sk(sk);
1852 
1853 	if (!msk->rmem_released)
1854 		return;
1855 
1856 	atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
1857 	sk_mem_uncharge(sk, msk->rmem_released);
1858 	msk->rmem_released = 0;
1859 }
1860 
1861 static void __mptcp_splice_receive_queue(struct sock *sk)
1862 {
1863 	struct mptcp_sock *msk = mptcp_sk(sk);
1864 
1865 	skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
1866 }
1867 
1868 static bool __mptcp_move_skbs(struct mptcp_sock *msk, unsigned int rcv)
1869 {
1870 	struct sock *sk = (struct sock *)msk;
1871 	unsigned int moved = 0;
1872 	bool ret, done;
1873 
1874 	__mptcp_flush_join_list(msk);
1875 	do {
1876 		struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1877 		bool slowpath;
1878 
1879 		/* we can have data pending in the subflows only if the msk
1880 		 * receive buffer was full at subflow_data_ready() time,
1881 		 * that is an unlikely slow path.
1882 		 */
1883 		if (likely(!ssk))
1884 			break;
1885 
1886 		slowpath = lock_sock_fast(ssk);
1887 		mptcp_data_lock(sk);
1888 		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
1889 		mptcp_data_unlock(sk);
1890 		if (moved && rcv) {
1891 			WRITE_ONCE(msk->rmem_pending, min(rcv, moved));
1892 			tcp_cleanup_rbuf(ssk, 1);
1893 			WRITE_ONCE(msk->rmem_pending, 0);
1894 		}
1895 		unlock_sock_fast(ssk, slowpath);
1896 	} while (!done);
1897 
1898 	/* acquire the data lock only if some input data is pending */
1899 	ret = moved > 0;
1900 	if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
1901 	    !skb_queue_empty_lockless(&sk->sk_receive_queue)) {
1902 		mptcp_data_lock(sk);
1903 		__mptcp_update_rmem(sk);
1904 		ret |= __mptcp_ofo_queue(msk);
1905 		__mptcp_splice_receive_queue(sk);
1906 		mptcp_data_unlock(sk);
1907 	}
1908 	if (ret)
1909 		mptcp_check_data_fin((struct sock *)msk);
1910 	return !skb_queue_empty(&msk->receive_queue);
1911 }
1912 
1913 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1914 			 int nonblock, int flags, int *addr_len)
1915 {
1916 	struct mptcp_sock *msk = mptcp_sk(sk);
1917 	int copied = 0;
1918 	int target;
1919 	long timeo;
1920 
1921 	if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
1922 		return -EOPNOTSUPP;
1923 
1924 	mptcp_lock_sock(sk, __mptcp_splice_receive_queue(sk));
1925 	if (unlikely(sk->sk_state == TCP_LISTEN)) {
1926 		copied = -ENOTCONN;
1927 		goto out_err;
1928 	}
1929 
1930 	timeo = sock_rcvtimeo(sk, nonblock);
1931 
1932 	len = min_t(size_t, len, INT_MAX);
1933 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1934 
1935 	while (copied < len) {
1936 		int bytes_read, old_space;
1937 
1938 		bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
1939 		if (unlikely(bytes_read < 0)) {
1940 			if (!copied)
1941 				copied = bytes_read;
1942 			goto out_err;
1943 		}
1944 
1945 		copied += bytes_read;
1946 
1947 		if (skb_queue_empty(&msk->receive_queue) &&
1948 		    __mptcp_move_skbs(msk, len - copied))
1949 			continue;
1950 
1951 		/* be sure to advertise window change */
1952 		old_space = READ_ONCE(msk->old_wspace);
1953 		if ((tcp_space(sk) - old_space) >= old_space)
1954 			mptcp_cleanup_rbuf(msk);
1955 
1956 		/* only the master socket status is relevant here. The exit
1957 		 * conditions mirror closely tcp_recvmsg()
1958 		 */
1959 		if (copied >= target)
1960 			break;
1961 
1962 		if (copied) {
1963 			if (sk->sk_err ||
1964 			    sk->sk_state == TCP_CLOSE ||
1965 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1966 			    !timeo ||
1967 			    signal_pending(current))
1968 				break;
1969 		} else {
1970 			if (sk->sk_err) {
1971 				copied = sock_error(sk);
1972 				break;
1973 			}
1974 
1975 			if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1976 				mptcp_check_for_eof(msk);
1977 
1978 			if (sk->sk_shutdown & RCV_SHUTDOWN) {
1979 				/* race breaker: the shutdown could be after the
1980 				 * previous receive queue check
1981 				 */
1982 				if (__mptcp_move_skbs(msk, len - copied))
1983 					continue;
1984 				break;
1985 			}
1986 
1987 			if (sk->sk_state == TCP_CLOSE) {
1988 				copied = -ENOTCONN;
1989 				break;
1990 			}
1991 
1992 			if (!timeo) {
1993 				copied = -EAGAIN;
1994 				break;
1995 			}
1996 
1997 			if (signal_pending(current)) {
1998 				copied = sock_intr_errno(timeo);
1999 				break;
2000 			}
2001 		}
2002 
2003 		pr_debug("block timeout %ld", timeo);
2004 		mptcp_wait_data(sk, &timeo);
2005 	}
2006 
2007 	if (skb_queue_empty_lockless(&sk->sk_receive_queue) &&
2008 	    skb_queue_empty(&msk->receive_queue)) {
2009 		/* entire backlog drained, clear DATA_READY. */
2010 		clear_bit(MPTCP_DATA_READY, &msk->flags);
2011 
2012 		/* .. race-breaker: ssk might have gotten new data
2013 		 * after last __mptcp_move_skbs() returned false.
2014 		 */
2015 		if (unlikely(__mptcp_move_skbs(msk, 0)))
2016 			set_bit(MPTCP_DATA_READY, &msk->flags);
2017 	} else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
2018 		/* data to read but mptcp_wait_data() cleared DATA_READY */
2019 		set_bit(MPTCP_DATA_READY, &msk->flags);
2020 	}
2021 out_err:
2022 	pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
2023 		 msk, test_bit(MPTCP_DATA_READY, &msk->flags),
2024 		 skb_queue_empty_lockless(&sk->sk_receive_queue), copied);
2025 	mptcp_rcv_space_adjust(msk, copied);
2026 
2027 	release_sock(sk);
2028 	return copied;
2029 }
2030 
2031 static void mptcp_retransmit_handler(struct sock *sk)
2032 {
2033 	struct mptcp_sock *msk = mptcp_sk(sk);
2034 
2035 	set_bit(MPTCP_WORK_RTX, &msk->flags);
2036 	mptcp_schedule_work(sk);
2037 }
2038 
2039 static void mptcp_retransmit_timer(struct timer_list *t)
2040 {
2041 	struct inet_connection_sock *icsk = from_timer(icsk, t,
2042 						       icsk_retransmit_timer);
2043 	struct sock *sk = &icsk->icsk_inet.sk;
2044 
2045 	bh_lock_sock(sk);
2046 	if (!sock_owned_by_user(sk)) {
2047 		mptcp_retransmit_handler(sk);
2048 	} else {
2049 		/* delegate our work to tcp_release_cb() */
2050 		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED,
2051 				      &sk->sk_tsq_flags))
2052 			sock_hold(sk);
2053 	}
2054 	bh_unlock_sock(sk);
2055 	sock_put(sk);
2056 }
2057 
2058 static void mptcp_timeout_timer(struct timer_list *t)
2059 {
2060 	struct sock *sk = from_timer(sk, t, sk_timer);
2061 
2062 	mptcp_schedule_work(sk);
2063 	sock_put(sk);
2064 }
2065 
2066 /* Find an idle subflow.  Return NULL if there is unacked data at tcp
2067  * level.
2068  *
2069  * A backup subflow is returned only if that is the only kind available.
2070  */
2071 static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
2072 {
2073 	struct mptcp_subflow_context *subflow;
2074 	struct sock *backup = NULL;
2075 
2076 	sock_owned_by_me((const struct sock *)msk);
2077 
2078 	if (__mptcp_check_fallback(msk))
2079 		return NULL;
2080 
2081 	mptcp_for_each_subflow(msk, subflow) {
2082 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2083 
2084 		if (!mptcp_subflow_active(subflow))
2085 			continue;
2086 
2087 		/* still data outstanding at TCP level?  Don't retransmit. */
2088 		if (!tcp_write_queue_empty(ssk)) {
2089 			if (inet_csk(ssk)->icsk_ca_state >= TCP_CA_Loss)
2090 				continue;
2091 			return NULL;
2092 		}
2093 
2094 		if (subflow->backup) {
2095 			if (!backup)
2096 				backup = ssk;
2097 			continue;
2098 		}
2099 
2100 		return ssk;
2101 	}
2102 
2103 	return backup;
2104 }
2105 
2106 /* subflow sockets can be either outgoing (connect) or incoming
2107  * (accept).
2108  *
2109  * Outgoing subflows use in-kernel sockets.
2110  * Incoming subflows do not have their own 'struct socket' allocated,
2111  * so we need to use tcp_close() after detaching them from the mptcp
2112  * parent socket.
2113  */
2114 void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2115 		       struct mptcp_subflow_context *subflow)
2116 {
2117 	bool dispose_socket = false;
2118 	struct socket *sock;
2119 
2120 	list_del(&subflow->node);
2121 
2122 	lock_sock(ssk);
2123 
2124 	/* if we are invoked by the msk cleanup code, the subflow is
2125 	 * already orphaned
2126 	 */
2127 	sock = ssk->sk_socket;
2128 	if (sock) {
2129 		dispose_socket = sock != sk->sk_socket;
2130 		sock_orphan(ssk);
2131 	}
2132 
2133 	subflow->disposable = 1;
2134 
2135 	/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2136 	 * the ssk has been already destroyed, we just need to release the
2137 	 * reference owned by msk;
2138 	 */
2139 	if (!inet_csk(ssk)->icsk_ulp_ops) {
2140 		kfree_rcu(subflow, rcu);
2141 	} else {
2142 		/* otherwise tcp will dispose of the ssk and subflow ctx */
2143 		__tcp_close(ssk, 0);
2144 
2145 		/* close acquired an extra ref */
2146 		__sock_put(ssk);
2147 	}
2148 	release_sock(ssk);
2149 	if (dispose_socket)
2150 		iput(SOCK_INODE(sock));
2151 
2152 	sock_put(ssk);
2153 }
2154 
2155 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
2156 {
2157 	return 0;
2158 }
2159 
2160 static void pm_work(struct mptcp_sock *msk)
2161 {
2162 	struct mptcp_pm_data *pm = &msk->pm;
2163 
2164 	spin_lock_bh(&msk->pm.lock);
2165 
2166 	pr_debug("msk=%p status=%x", msk, pm->status);
2167 	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
2168 		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
2169 		mptcp_pm_nl_add_addr_received(msk);
2170 	}
2171 	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) {
2172 		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK);
2173 		mptcp_pm_nl_add_addr_send_ack(msk);
2174 	}
2175 	if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
2176 		pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
2177 		mptcp_pm_nl_rm_addr_received(msk);
2178 	}
2179 	if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
2180 		pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
2181 		mptcp_pm_nl_fully_established(msk);
2182 	}
2183 	if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
2184 		pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
2185 		mptcp_pm_nl_subflow_established(msk);
2186 	}
2187 
2188 	spin_unlock_bh(&msk->pm.lock);
2189 }
2190 
2191 static void __mptcp_close_subflow(struct mptcp_sock *msk)
2192 {
2193 	struct mptcp_subflow_context *subflow, *tmp;
2194 
2195 	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
2196 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2197 
2198 		if (inet_sk_state_load(ssk) != TCP_CLOSE)
2199 			continue;
2200 
2201 		__mptcp_close_ssk((struct sock *)msk, ssk, subflow);
2202 	}
2203 }
2204 
2205 static bool mptcp_check_close_timeout(const struct sock *sk)
2206 {
2207 	s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
2208 	struct mptcp_subflow_context *subflow;
2209 
2210 	if (delta >= TCP_TIMEWAIT_LEN)
2211 		return true;
2212 
2213 	/* if all subflows are in closed status don't bother with additional
2214 	 * timeout
2215 	 */
2216 	mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
2217 		if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
2218 		    TCP_CLOSE)
2219 			return false;
2220 	}
2221 	return true;
2222 }
2223 
2224 static void mptcp_check_fastclose(struct mptcp_sock *msk)
2225 {
2226 	struct mptcp_subflow_context *subflow, *tmp;
2227 	struct sock *sk = &msk->sk.icsk_inet.sk;
2228 
2229 	if (likely(!READ_ONCE(msk->rcv_fastclose)))
2230 		return;
2231 
2232 	mptcp_token_destroy(msk);
2233 
2234 	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
2235 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2236 
2237 		lock_sock(tcp_sk);
2238 		if (tcp_sk->sk_state != TCP_CLOSE) {
2239 			tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
2240 			tcp_set_state(tcp_sk, TCP_CLOSE);
2241 		}
2242 		release_sock(tcp_sk);
2243 	}
2244 
2245 	inet_sk_state_store(sk, TCP_CLOSE);
2246 	sk->sk_shutdown = SHUTDOWN_MASK;
2247 	smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2248 	set_bit(MPTCP_DATA_READY, &msk->flags);
2249 	set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2250 
2251 	mptcp_close_wake_up(sk);
2252 }
2253 
2254 static void mptcp_worker(struct work_struct *work)
2255 {
2256 	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2257 	struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
2258 	struct mptcp_sendmsg_info info = {};
2259 	struct mptcp_data_frag *dfrag;
2260 	size_t copied = 0;
2261 	int state, ret;
2262 
2263 	lock_sock(sk);
2264 	state = sk->sk_state;
2265 	if (unlikely(state == TCP_CLOSE))
2266 		goto unlock;
2267 
2268 	mptcp_check_data_fin_ack(sk);
2269 	__mptcp_flush_join_list(msk);
2270 
2271 	mptcp_check_fastclose(msk);
2272 
2273 	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2274 		__mptcp_close_subflow(msk);
2275 
2276 	if (msk->pm.status)
2277 		pm_work(msk);
2278 
2279 	if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
2280 		mptcp_check_for_eof(msk);
2281 
2282 	__mptcp_check_send_data_fin(sk);
2283 	mptcp_check_data_fin(sk);
2284 
2285 	/* if the msk data is completely acked, or the socket timedout,
2286 	 * there is no point in keeping around an orphaned sk
2287 	 */
2288 	if (sock_flag(sk, SOCK_DEAD) &&
2289 	    (mptcp_check_close_timeout(sk) ||
2290 	    (state != sk->sk_state &&
2291 	    ((1 << inet_sk_state_load(sk)) & (TCPF_CLOSE | TCPF_FIN_WAIT2))))) {
2292 		inet_sk_state_store(sk, TCP_CLOSE);
2293 		__mptcp_destroy_sock(sk);
2294 		goto unlock;
2295 	}
2296 
2297 	if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
2298 		goto unlock;
2299 
2300 	dfrag = mptcp_rtx_head(sk);
2301 	if (!dfrag)
2302 		goto unlock;
2303 
2304 	ssk = mptcp_subflow_get_retrans(msk);
2305 	if (!ssk)
2306 		goto reset_unlock;
2307 
2308 	lock_sock(ssk);
2309 
2310 	/* limit retransmission to the bytes already sent on some subflows */
2311 	info.sent = 0;
2312 	info.limit = dfrag->already_sent;
2313 	while (info.sent < dfrag->already_sent) {
2314 		if (!mptcp_alloc_tx_skb(sk, ssk))
2315 			break;
2316 
2317 		ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2318 		if (ret <= 0)
2319 			break;
2320 
2321 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2322 		copied += ret;
2323 		info.sent += ret;
2324 	}
2325 	if (copied)
2326 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2327 			 info.size_goal);
2328 
2329 	mptcp_set_timeout(sk, ssk);
2330 	release_sock(ssk);
2331 
2332 reset_unlock:
2333 	if (!mptcp_timer_pending(sk))
2334 		mptcp_reset_timer(sk);
2335 
2336 unlock:
2337 	release_sock(sk);
2338 	sock_put(sk);
2339 }
2340 
2341 static int __mptcp_init_sock(struct sock *sk)
2342 {
2343 	struct mptcp_sock *msk = mptcp_sk(sk);
2344 
2345 	spin_lock_init(&msk->join_list_lock);
2346 
2347 	INIT_LIST_HEAD(&msk->conn_list);
2348 	INIT_LIST_HEAD(&msk->join_list);
2349 	INIT_LIST_HEAD(&msk->rtx_queue);
2350 	INIT_WORK(&msk->work, mptcp_worker);
2351 	__skb_queue_head_init(&msk->receive_queue);
2352 	__skb_queue_head_init(&msk->skb_tx_cache);
2353 	msk->out_of_order_queue = RB_ROOT;
2354 	msk->first_pending = NULL;
2355 	msk->wmem_reserved = 0;
2356 	msk->rmem_released = 0;
2357 	msk->tx_pending_data = 0;
2358 	msk->size_goal_cache = TCP_BASE_MSS;
2359 
2360 	msk->ack_hint = NULL;
2361 	msk->first = NULL;
2362 	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
2363 
2364 	mptcp_pm_data_init(msk);
2365 
2366 	/* re-use the csk retrans timer for MPTCP-level retrans */
2367 	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
2368 	timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
2369 	return 0;
2370 }
2371 
2372 static int mptcp_init_sock(struct sock *sk)
2373 {
2374 	struct net *net = sock_net(sk);
2375 	int ret;
2376 
2377 	ret = __mptcp_init_sock(sk);
2378 	if (ret)
2379 		return ret;
2380 
2381 	if (!mptcp_is_enabled(net))
2382 		return -ENOPROTOOPT;
2383 
2384 	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
2385 		return -ENOMEM;
2386 
2387 	ret = __mptcp_socket_create(mptcp_sk(sk));
2388 	if (ret)
2389 		return ret;
2390 
2391 	sk_sockets_allocated_inc(sk);
2392 	sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
2393 	sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
2394 
2395 	return 0;
2396 }
2397 
2398 static void __mptcp_clear_xmit(struct sock *sk)
2399 {
2400 	struct mptcp_sock *msk = mptcp_sk(sk);
2401 	struct mptcp_data_frag *dtmp, *dfrag;
2402 	struct sk_buff *skb;
2403 
2404 	WRITE_ONCE(msk->first_pending, NULL);
2405 	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
2406 		dfrag_clear(sk, dfrag);
2407 	while ((skb = __skb_dequeue(&msk->skb_tx_cache)) != NULL) {
2408 		sk->sk_forward_alloc += skb->truesize;
2409 		kfree_skb(skb);
2410 	}
2411 }
2412 
2413 static void mptcp_cancel_work(struct sock *sk)
2414 {
2415 	struct mptcp_sock *msk = mptcp_sk(sk);
2416 
2417 	if (cancel_work_sync(&msk->work))
2418 		__sock_put(sk);
2419 }
2420 
2421 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
2422 {
2423 	lock_sock(ssk);
2424 
2425 	switch (ssk->sk_state) {
2426 	case TCP_LISTEN:
2427 		if (!(how & RCV_SHUTDOWN))
2428 			break;
2429 		fallthrough;
2430 	case TCP_SYN_SENT:
2431 		tcp_disconnect(ssk, O_NONBLOCK);
2432 		break;
2433 	default:
2434 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
2435 			pr_debug("Fallback");
2436 			ssk->sk_shutdown |= how;
2437 			tcp_shutdown(ssk, how);
2438 		} else {
2439 			pr_debug("Sending DATA_FIN on subflow %p", ssk);
2440 			mptcp_set_timeout(sk, ssk);
2441 			tcp_send_ack(ssk);
2442 		}
2443 		break;
2444 	}
2445 
2446 	release_sock(ssk);
2447 }
2448 
2449 static const unsigned char new_state[16] = {
2450 	/* current state:     new state:      action:	*/
2451 	[0 /* (Invalid) */] = TCP_CLOSE,
2452 	[TCP_ESTABLISHED]   = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2453 	[TCP_SYN_SENT]      = TCP_CLOSE,
2454 	[TCP_SYN_RECV]      = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2455 	[TCP_FIN_WAIT1]     = TCP_FIN_WAIT1,
2456 	[TCP_FIN_WAIT2]     = TCP_FIN_WAIT2,
2457 	[TCP_TIME_WAIT]     = TCP_CLOSE,	/* should not happen ! */
2458 	[TCP_CLOSE]         = TCP_CLOSE,
2459 	[TCP_CLOSE_WAIT]    = TCP_LAST_ACK  | TCP_ACTION_FIN,
2460 	[TCP_LAST_ACK]      = TCP_LAST_ACK,
2461 	[TCP_LISTEN]        = TCP_CLOSE,
2462 	[TCP_CLOSING]       = TCP_CLOSING,
2463 	[TCP_NEW_SYN_RECV]  = TCP_CLOSE,	/* should not happen ! */
2464 };
2465 
2466 static int mptcp_close_state(struct sock *sk)
2467 {
2468 	int next = (int)new_state[sk->sk_state];
2469 	int ns = next & TCP_STATE_MASK;
2470 
2471 	inet_sk_state_store(sk, ns);
2472 
2473 	return next & TCP_ACTION_FIN;
2474 }
2475 
2476 static void __mptcp_check_send_data_fin(struct sock *sk)
2477 {
2478 	struct mptcp_subflow_context *subflow;
2479 	struct mptcp_sock *msk = mptcp_sk(sk);
2480 
2481 	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
2482 		 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
2483 		 msk->snd_nxt, msk->write_seq);
2484 
2485 	/* we still need to enqueue subflows or not really shutting down,
2486 	 * skip this
2487 	 */
2488 	if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
2489 	    mptcp_send_head(sk))
2490 		return;
2491 
2492 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
2493 
2494 	/* fallback socket will not get data_fin/ack, can move to the next
2495 	 * state now
2496 	 */
2497 	if (__mptcp_check_fallback(msk)) {
2498 		if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
2499 			inet_sk_state_store(sk, TCP_CLOSE);
2500 			mptcp_close_wake_up(sk);
2501 		} else if (sk->sk_state == TCP_FIN_WAIT1) {
2502 			inet_sk_state_store(sk, TCP_FIN_WAIT2);
2503 		}
2504 	}
2505 
2506 	__mptcp_flush_join_list(msk);
2507 	mptcp_for_each_subflow(msk, subflow) {
2508 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2509 
2510 		mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
2511 	}
2512 }
2513 
2514 static void __mptcp_wr_shutdown(struct sock *sk)
2515 {
2516 	struct mptcp_sock *msk = mptcp_sk(sk);
2517 
2518 	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
2519 		 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
2520 		 !!mptcp_send_head(sk));
2521 
2522 	/* will be ignored by fallback sockets */
2523 	WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2524 	WRITE_ONCE(msk->snd_data_fin_enable, 1);
2525 
2526 	__mptcp_check_send_data_fin(sk);
2527 }
2528 
2529 static void __mptcp_destroy_sock(struct sock *sk)
2530 {
2531 	struct mptcp_subflow_context *subflow, *tmp;
2532 	struct mptcp_sock *msk = mptcp_sk(sk);
2533 	LIST_HEAD(conn_list);
2534 
2535 	pr_debug("msk=%p", msk);
2536 
2537 	/* be sure to always acquire the join list lock, to sync vs
2538 	 * mptcp_finish_join().
2539 	 */
2540 	spin_lock_bh(&msk->join_list_lock);
2541 	list_splice_tail_init(&msk->join_list, &msk->conn_list);
2542 	spin_unlock_bh(&msk->join_list_lock);
2543 	list_splice_init(&msk->conn_list, &conn_list);
2544 
2545 	sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
2546 	sk_stop_timer(sk, &sk->sk_timer);
2547 	msk->pm.status = 0;
2548 
2549 	list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
2550 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2551 		__mptcp_close_ssk(sk, ssk, subflow);
2552 	}
2553 
2554 	sk->sk_prot->destroy(sk);
2555 
2556 	WARN_ON_ONCE(msk->wmem_reserved);
2557 	WARN_ON_ONCE(msk->rmem_released);
2558 	sk_stream_kill_queues(sk);
2559 	xfrm_sk_free_policy(sk);
2560 	sk_refcnt_debug_release(sk);
2561 	sock_put(sk);
2562 }
2563 
2564 static void mptcp_close(struct sock *sk, long timeout)
2565 {
2566 	struct mptcp_subflow_context *subflow;
2567 	bool do_cancel_work = false;
2568 
2569 	lock_sock(sk);
2570 	sk->sk_shutdown = SHUTDOWN_MASK;
2571 
2572 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
2573 		inet_sk_state_store(sk, TCP_CLOSE);
2574 		goto cleanup;
2575 	}
2576 
2577 	if (mptcp_close_state(sk))
2578 		__mptcp_wr_shutdown(sk);
2579 
2580 	sk_stream_wait_close(sk, timeout);
2581 
2582 cleanup:
2583 	/* orphan all the subflows */
2584 	inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
2585 	list_for_each_entry(subflow, &mptcp_sk(sk)->conn_list, node) {
2586 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2587 		bool slow, dispose_socket;
2588 		struct socket *sock;
2589 
2590 		slow = lock_sock_fast(ssk);
2591 		sock = ssk->sk_socket;
2592 		dispose_socket = sock && sock != sk->sk_socket;
2593 		sock_orphan(ssk);
2594 		unlock_sock_fast(ssk, slow);
2595 
2596 		/* for the outgoing subflows we additionally need to free
2597 		 * the associated socket
2598 		 */
2599 		if (dispose_socket)
2600 			iput(SOCK_INODE(sock));
2601 	}
2602 	sock_orphan(sk);
2603 
2604 	sock_hold(sk);
2605 	pr_debug("msk=%p state=%d", sk, sk->sk_state);
2606 	if (sk->sk_state == TCP_CLOSE) {
2607 		__mptcp_destroy_sock(sk);
2608 		do_cancel_work = true;
2609 	} else {
2610 		sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN);
2611 	}
2612 	release_sock(sk);
2613 	if (do_cancel_work)
2614 		mptcp_cancel_work(sk);
2615 	sock_put(sk);
2616 }
2617 
2618 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
2619 {
2620 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2621 	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
2622 	struct ipv6_pinfo *msk6 = inet6_sk(msk);
2623 
2624 	msk->sk_v6_daddr = ssk->sk_v6_daddr;
2625 	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
2626 
2627 	if (msk6 && ssk6) {
2628 		msk6->saddr = ssk6->saddr;
2629 		msk6->flow_label = ssk6->flow_label;
2630 	}
2631 #endif
2632 
2633 	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
2634 	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
2635 	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
2636 	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
2637 	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
2638 	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
2639 }
2640 
2641 static int mptcp_disconnect(struct sock *sk, int flags)
2642 {
2643 	/* Should never be called.
2644 	 * inet_stream_connect() calls ->disconnect, but that
2645 	 * refers to the subflow socket, not the mptcp one.
2646 	 */
2647 	WARN_ON_ONCE(1);
2648 	return 0;
2649 }
2650 
2651 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2652 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
2653 {
2654 	unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
2655 
2656 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
2657 }
2658 #endif
2659 
2660 struct sock *mptcp_sk_clone(const struct sock *sk,
2661 			    const struct mptcp_options_received *mp_opt,
2662 			    struct request_sock *req)
2663 {
2664 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
2665 	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
2666 	struct mptcp_sock *msk;
2667 	u64 ack_seq;
2668 
2669 	if (!nsk)
2670 		return NULL;
2671 
2672 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2673 	if (nsk->sk_family == AF_INET6)
2674 		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
2675 #endif
2676 
2677 	__mptcp_init_sock(nsk);
2678 
2679 	msk = mptcp_sk(nsk);
2680 	msk->local_key = subflow_req->local_key;
2681 	msk->token = subflow_req->token;
2682 	msk->subflow = NULL;
2683 	WRITE_ONCE(msk->fully_established, false);
2684 
2685 	msk->write_seq = subflow_req->idsn + 1;
2686 	msk->snd_nxt = msk->write_seq;
2687 	msk->snd_una = msk->write_seq;
2688 	msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
2689 
2690 	if (mp_opt->mp_capable) {
2691 		msk->can_ack = true;
2692 		msk->remote_key = mp_opt->sndr_key;
2693 		mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
2694 		ack_seq++;
2695 		WRITE_ONCE(msk->ack_seq, ack_seq);
2696 		WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
2697 	}
2698 
2699 	sock_reset_flag(nsk, SOCK_RCU_FREE);
2700 	/* will be fully established after successful MPC subflow creation */
2701 	inet_sk_state_store(nsk, TCP_SYN_RECV);
2702 	bh_unlock_sock(nsk);
2703 
2704 	/* keep a single reference */
2705 	__sock_put(nsk);
2706 	return nsk;
2707 }
2708 
2709 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2710 {
2711 	const struct tcp_sock *tp = tcp_sk(ssk);
2712 
2713 	msk->rcvq_space.copied = 0;
2714 	msk->rcvq_space.rtt_us = 0;
2715 
2716 	msk->rcvq_space.time = tp->tcp_mstamp;
2717 
2718 	/* initial rcv_space offering made to peer */
2719 	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
2720 				      TCP_INIT_CWND * tp->advmss);
2721 	if (msk->rcvq_space.space == 0)
2722 		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
2723 
2724 	WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
2725 }
2726 
2727 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
2728 				 bool kern)
2729 {
2730 	struct mptcp_sock *msk = mptcp_sk(sk);
2731 	struct socket *listener;
2732 	struct sock *newsk;
2733 
2734 	listener = __mptcp_nmpc_socket(msk);
2735 	if (WARN_ON_ONCE(!listener)) {
2736 		*err = -EINVAL;
2737 		return NULL;
2738 	}
2739 
2740 	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
2741 	newsk = inet_csk_accept(listener->sk, flags, err, kern);
2742 	if (!newsk)
2743 		return NULL;
2744 
2745 	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
2746 	if (sk_is_mptcp(newsk)) {
2747 		struct mptcp_subflow_context *subflow;
2748 		struct sock *new_mptcp_sock;
2749 
2750 		subflow = mptcp_subflow_ctx(newsk);
2751 		new_mptcp_sock = subflow->conn;
2752 
2753 		/* is_mptcp should be false if subflow->conn is missing, see
2754 		 * subflow_syn_recv_sock()
2755 		 */
2756 		if (WARN_ON_ONCE(!new_mptcp_sock)) {
2757 			tcp_sk(newsk)->is_mptcp = 0;
2758 			return newsk;
2759 		}
2760 
2761 		/* acquire the 2nd reference for the owning socket */
2762 		sock_hold(new_mptcp_sock);
2763 		newsk = new_mptcp_sock;
2764 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
2765 	} else {
2766 		MPTCP_INC_STATS(sock_net(sk),
2767 				MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
2768 	}
2769 
2770 	return newsk;
2771 }
2772 
2773 void mptcp_destroy_common(struct mptcp_sock *msk)
2774 {
2775 	struct sock *sk = (struct sock *)msk;
2776 
2777 	__mptcp_clear_xmit(sk);
2778 
2779 	/* move to sk_receive_queue, sk_stream_kill_queues will purge it */
2780 	skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
2781 
2782 	skb_rbtree_purge(&msk->out_of_order_queue);
2783 	mptcp_token_destroy(msk);
2784 	mptcp_pm_free_anno_list(msk);
2785 }
2786 
2787 static void mptcp_destroy(struct sock *sk)
2788 {
2789 	struct mptcp_sock *msk = mptcp_sk(sk);
2790 
2791 	mptcp_destroy_common(msk);
2792 	sk_sockets_allocated_dec(sk);
2793 }
2794 
2795 static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
2796 				       sockptr_t optval, unsigned int optlen)
2797 {
2798 	struct sock *sk = (struct sock *)msk;
2799 	struct socket *ssock;
2800 	int ret;
2801 
2802 	switch (optname) {
2803 	case SO_REUSEPORT:
2804 	case SO_REUSEADDR:
2805 		lock_sock(sk);
2806 		ssock = __mptcp_nmpc_socket(msk);
2807 		if (!ssock) {
2808 			release_sock(sk);
2809 			return -EINVAL;
2810 		}
2811 
2812 		ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
2813 		if (ret == 0) {
2814 			if (optname == SO_REUSEPORT)
2815 				sk->sk_reuseport = ssock->sk->sk_reuseport;
2816 			else if (optname == SO_REUSEADDR)
2817 				sk->sk_reuse = ssock->sk->sk_reuse;
2818 		}
2819 		release_sock(sk);
2820 		return ret;
2821 	}
2822 
2823 	return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
2824 }
2825 
2826 static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
2827 			       sockptr_t optval, unsigned int optlen)
2828 {
2829 	struct sock *sk = (struct sock *)msk;
2830 	int ret = -EOPNOTSUPP;
2831 	struct socket *ssock;
2832 
2833 	switch (optname) {
2834 	case IPV6_V6ONLY:
2835 		lock_sock(sk);
2836 		ssock = __mptcp_nmpc_socket(msk);
2837 		if (!ssock) {
2838 			release_sock(sk);
2839 			return -EINVAL;
2840 		}
2841 
2842 		ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
2843 		if (ret == 0)
2844 			sk->sk_ipv6only = ssock->sk->sk_ipv6only;
2845 
2846 		release_sock(sk);
2847 		break;
2848 	}
2849 
2850 	return ret;
2851 }
2852 
2853 static int mptcp_setsockopt(struct sock *sk, int level, int optname,
2854 			    sockptr_t optval, unsigned int optlen)
2855 {
2856 	struct mptcp_sock *msk = mptcp_sk(sk);
2857 	struct sock *ssk;
2858 
2859 	pr_debug("msk=%p", msk);
2860 
2861 	if (level == SOL_SOCKET)
2862 		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
2863 
2864 	/* @@ the meaning of setsockopt() when the socket is connected and
2865 	 * there are multiple subflows is not yet defined. It is up to the
2866 	 * MPTCP-level socket to configure the subflows until the subflow
2867 	 * is in TCP fallback, when TCP socket options are passed through
2868 	 * to the one remaining subflow.
2869 	 */
2870 	lock_sock(sk);
2871 	ssk = __mptcp_tcp_fallback(msk);
2872 	release_sock(sk);
2873 	if (ssk)
2874 		return tcp_setsockopt(ssk, level, optname, optval, optlen);
2875 
2876 	if (level == SOL_IPV6)
2877 		return mptcp_setsockopt_v6(msk, optname, optval, optlen);
2878 
2879 	return -EOPNOTSUPP;
2880 }
2881 
2882 static int mptcp_getsockopt(struct sock *sk, int level, int optname,
2883 			    char __user *optval, int __user *option)
2884 {
2885 	struct mptcp_sock *msk = mptcp_sk(sk);
2886 	struct sock *ssk;
2887 
2888 	pr_debug("msk=%p", msk);
2889 
2890 	/* @@ the meaning of setsockopt() when the socket is connected and
2891 	 * there are multiple subflows is not yet defined. It is up to the
2892 	 * MPTCP-level socket to configure the subflows until the subflow
2893 	 * is in TCP fallback, when socket options are passed through
2894 	 * to the one remaining subflow.
2895 	 */
2896 	lock_sock(sk);
2897 	ssk = __mptcp_tcp_fallback(msk);
2898 	release_sock(sk);
2899 	if (ssk)
2900 		return tcp_getsockopt(ssk, level, optname, optval, option);
2901 
2902 	return -EOPNOTSUPP;
2903 }
2904 
2905 void __mptcp_data_acked(struct sock *sk)
2906 {
2907 	if (!sock_owned_by_user(sk))
2908 		__mptcp_clean_una(sk);
2909 	else
2910 		set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags);
2911 
2912 	if (mptcp_pending_data_fin_ack(sk))
2913 		mptcp_schedule_work(sk);
2914 }
2915 
2916 void __mptcp_wnd_updated(struct sock *sk, struct sock *ssk)
2917 {
2918 	if (!mptcp_send_head(sk))
2919 		return;
2920 
2921 	if (!sock_owned_by_user(sk))
2922 		__mptcp_subflow_push_pending(sk, ssk);
2923 	else
2924 		set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
2925 }
2926 
2927 #define MPTCP_DEFERRED_ALL (TCPF_WRITE_TIMER_DEFERRED)
2928 
2929 /* processes deferred events and flush wmem */
2930 static void mptcp_release_cb(struct sock *sk)
2931 {
2932 	unsigned long flags, nflags;
2933 
2934 	/* push_pending may touch wmem_reserved, do it before the later
2935 	 * cleanup
2936 	 */
2937 	if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
2938 		__mptcp_clean_una(sk);
2939 	if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) {
2940 		/* mptcp_push_pending() acquires the subflow socket lock
2941 		 *
2942 		 * 1) can't be invoked in atomic scope
2943 		 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
2944 		 *    datapath acquires the msk socket spinlock while helding
2945 		 *    the subflow socket lock
2946 		 */
2947 
2948 		spin_unlock_bh(&sk->sk_lock.slock);
2949 		mptcp_push_pending(sk, 0);
2950 		spin_lock_bh(&sk->sk_lock.slock);
2951 	}
2952 
2953 	/* clear any wmem reservation and errors */
2954 	__mptcp_update_wmem(sk);
2955 	__mptcp_update_rmem(sk);
2956 
2957 	do {
2958 		flags = sk->sk_tsq_flags;
2959 		if (!(flags & MPTCP_DEFERRED_ALL))
2960 			return;
2961 		nflags = flags & ~MPTCP_DEFERRED_ALL;
2962 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
2963 
2964 	sock_release_ownership(sk);
2965 
2966 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
2967 		mptcp_retransmit_handler(sk);
2968 		__sock_put(sk);
2969 	}
2970 }
2971 
2972 static int mptcp_hash(struct sock *sk)
2973 {
2974 	/* should never be called,
2975 	 * we hash the TCP subflows not the master socket
2976 	 */
2977 	WARN_ON_ONCE(1);
2978 	return 0;
2979 }
2980 
2981 static void mptcp_unhash(struct sock *sk)
2982 {
2983 	/* called from sk_common_release(), but nothing to do here */
2984 }
2985 
2986 static int mptcp_get_port(struct sock *sk, unsigned short snum)
2987 {
2988 	struct mptcp_sock *msk = mptcp_sk(sk);
2989 	struct socket *ssock;
2990 
2991 	ssock = __mptcp_nmpc_socket(msk);
2992 	pr_debug("msk=%p, subflow=%p", msk, ssock);
2993 	if (WARN_ON_ONCE(!ssock))
2994 		return -EINVAL;
2995 
2996 	return inet_csk_get_port(ssock->sk, snum);
2997 }
2998 
2999 void mptcp_finish_connect(struct sock *ssk)
3000 {
3001 	struct mptcp_subflow_context *subflow;
3002 	struct mptcp_sock *msk;
3003 	struct sock *sk;
3004 	u64 ack_seq;
3005 
3006 	subflow = mptcp_subflow_ctx(ssk);
3007 	sk = subflow->conn;
3008 	msk = mptcp_sk(sk);
3009 
3010 	pr_debug("msk=%p, token=%u", sk, subflow->token);
3011 
3012 	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
3013 	ack_seq++;
3014 	subflow->map_seq = ack_seq;
3015 	subflow->map_subflow_seq = 1;
3016 
3017 	/* the socket is not connected yet, no msk/subflow ops can access/race
3018 	 * accessing the field below
3019 	 */
3020 	WRITE_ONCE(msk->remote_key, subflow->remote_key);
3021 	WRITE_ONCE(msk->local_key, subflow->local_key);
3022 	WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
3023 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3024 	WRITE_ONCE(msk->ack_seq, ack_seq);
3025 	WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
3026 	WRITE_ONCE(msk->can_ack, 1);
3027 	WRITE_ONCE(msk->snd_una, msk->write_seq);
3028 
3029 	mptcp_pm_new_connection(msk, 0);
3030 
3031 	mptcp_rcv_space_init(msk, ssk);
3032 }
3033 
3034 static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
3035 {
3036 	write_lock_bh(&sk->sk_callback_lock);
3037 	rcu_assign_pointer(sk->sk_wq, &parent->wq);
3038 	sk_set_socket(sk, parent);
3039 	sk->sk_uid = SOCK_INODE(parent)->i_uid;
3040 	write_unlock_bh(&sk->sk_callback_lock);
3041 }
3042 
3043 bool mptcp_finish_join(struct sock *ssk)
3044 {
3045 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3046 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3047 	struct sock *parent = (void *)msk;
3048 	struct socket *parent_sock;
3049 	bool ret;
3050 
3051 	pr_debug("msk=%p, subflow=%p", msk, subflow);
3052 
3053 	/* mptcp socket already closing? */
3054 	if (!mptcp_is_fully_established(parent))
3055 		return false;
3056 
3057 	if (!msk->pm.server_side)
3058 		return true;
3059 
3060 	if (!mptcp_pm_allow_new_subflow(msk))
3061 		return false;
3062 
3063 	/* active connections are already on conn_list, and we can't acquire
3064 	 * msk lock here.
3065 	 * use the join list lock as synchronization point and double-check
3066 	 * msk status to avoid racing with __mptcp_destroy_sock()
3067 	 */
3068 	spin_lock_bh(&msk->join_list_lock);
3069 	ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
3070 	if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) {
3071 		list_add_tail(&subflow->node, &msk->join_list);
3072 		sock_hold(ssk);
3073 	}
3074 	spin_unlock_bh(&msk->join_list_lock);
3075 	if (!ret)
3076 		return false;
3077 
3078 	/* attach to msk socket only after we are sure he will deal with us
3079 	 * at close time
3080 	 */
3081 	parent_sock = READ_ONCE(parent->sk_socket);
3082 	if (parent_sock && !ssk->sk_socket)
3083 		mptcp_sock_graft(ssk, parent_sock);
3084 	subflow->map_seq = READ_ONCE(msk->ack_seq);
3085 	return true;
3086 }
3087 
3088 static struct proto mptcp_prot = {
3089 	.name		= "MPTCP",
3090 	.owner		= THIS_MODULE,
3091 	.init		= mptcp_init_sock,
3092 	.disconnect	= mptcp_disconnect,
3093 	.close		= mptcp_close,
3094 	.accept		= mptcp_accept,
3095 	.setsockopt	= mptcp_setsockopt,
3096 	.getsockopt	= mptcp_getsockopt,
3097 	.shutdown	= tcp_shutdown,
3098 	.destroy	= mptcp_destroy,
3099 	.sendmsg	= mptcp_sendmsg,
3100 	.recvmsg	= mptcp_recvmsg,
3101 	.release_cb	= mptcp_release_cb,
3102 	.hash		= mptcp_hash,
3103 	.unhash		= mptcp_unhash,
3104 	.get_port	= mptcp_get_port,
3105 	.sockets_allocated	= &mptcp_sockets_allocated,
3106 	.memory_allocated	= &tcp_memory_allocated,
3107 	.memory_pressure	= &tcp_memory_pressure,
3108 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
3109 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
3110 	.sysctl_mem	= sysctl_tcp_mem,
3111 	.obj_size	= sizeof(struct mptcp_sock),
3112 	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
3113 	.no_autobind	= true,
3114 };
3115 
3116 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3117 {
3118 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3119 	struct socket *ssock;
3120 	int err;
3121 
3122 	lock_sock(sock->sk);
3123 	ssock = __mptcp_nmpc_socket(msk);
3124 	if (!ssock) {
3125 		err = -EINVAL;
3126 		goto unlock;
3127 	}
3128 
3129 	err = ssock->ops->bind(ssock, uaddr, addr_len);
3130 	if (!err)
3131 		mptcp_copy_inaddrs(sock->sk, ssock->sk);
3132 
3133 unlock:
3134 	release_sock(sock->sk);
3135 	return err;
3136 }
3137 
3138 static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
3139 					 struct mptcp_subflow_context *subflow)
3140 {
3141 	subflow->request_mptcp = 0;
3142 	__mptcp_do_fallback(msk);
3143 }
3144 
3145 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
3146 				int addr_len, int flags)
3147 {
3148 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3149 	struct mptcp_subflow_context *subflow;
3150 	struct socket *ssock;
3151 	int err;
3152 
3153 	lock_sock(sock->sk);
3154 	if (sock->state != SS_UNCONNECTED && msk->subflow) {
3155 		/* pending connection or invalid state, let existing subflow
3156 		 * cope with that
3157 		 */
3158 		ssock = msk->subflow;
3159 		goto do_connect;
3160 	}
3161 
3162 	ssock = __mptcp_nmpc_socket(msk);
3163 	if (!ssock) {
3164 		err = -EINVAL;
3165 		goto unlock;
3166 	}
3167 
3168 	mptcp_token_destroy(msk);
3169 	inet_sk_state_store(sock->sk, TCP_SYN_SENT);
3170 	subflow = mptcp_subflow_ctx(ssock->sk);
3171 #ifdef CONFIG_TCP_MD5SIG
3172 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3173 	 * TCP option space.
3174 	 */
3175 	if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
3176 		mptcp_subflow_early_fallback(msk, subflow);
3177 #endif
3178 	if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
3179 		mptcp_subflow_early_fallback(msk, subflow);
3180 
3181 do_connect:
3182 	err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
3183 	sock->state = ssock->state;
3184 
3185 	/* on successful connect, the msk state will be moved to established by
3186 	 * subflow_finish_connect()
3187 	 */
3188 	if (!err || err == -EINPROGRESS)
3189 		mptcp_copy_inaddrs(sock->sk, ssock->sk);
3190 	else
3191 		inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
3192 
3193 unlock:
3194 	release_sock(sock->sk);
3195 	return err;
3196 }
3197 
3198 static int mptcp_listen(struct socket *sock, int backlog)
3199 {
3200 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3201 	struct socket *ssock;
3202 	int err;
3203 
3204 	pr_debug("msk=%p", msk);
3205 
3206 	lock_sock(sock->sk);
3207 	ssock = __mptcp_nmpc_socket(msk);
3208 	if (!ssock) {
3209 		err = -EINVAL;
3210 		goto unlock;
3211 	}
3212 
3213 	mptcp_token_destroy(msk);
3214 	inet_sk_state_store(sock->sk, TCP_LISTEN);
3215 	sock_set_flag(sock->sk, SOCK_RCU_FREE);
3216 
3217 	err = ssock->ops->listen(ssock, backlog);
3218 	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
3219 	if (!err)
3220 		mptcp_copy_inaddrs(sock->sk, ssock->sk);
3221 
3222 unlock:
3223 	release_sock(sock->sk);
3224 	return err;
3225 }
3226 
3227 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
3228 			       int flags, bool kern)
3229 {
3230 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3231 	struct socket *ssock;
3232 	int err;
3233 
3234 	pr_debug("msk=%p", msk);
3235 
3236 	lock_sock(sock->sk);
3237 	if (sock->sk->sk_state != TCP_LISTEN)
3238 		goto unlock_fail;
3239 
3240 	ssock = __mptcp_nmpc_socket(msk);
3241 	if (!ssock)
3242 		goto unlock_fail;
3243 
3244 	clear_bit(MPTCP_DATA_READY, &msk->flags);
3245 	sock_hold(ssock->sk);
3246 	release_sock(sock->sk);
3247 
3248 	err = ssock->ops->accept(sock, newsock, flags, kern);
3249 	if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
3250 		struct mptcp_sock *msk = mptcp_sk(newsock->sk);
3251 		struct mptcp_subflow_context *subflow;
3252 		struct sock *newsk = newsock->sk;
3253 		bool slowpath;
3254 
3255 		slowpath = lock_sock_fast(newsk);
3256 
3257 		/* PM/worker can now acquire the first subflow socket
3258 		 * lock without racing with listener queue cleanup,
3259 		 * we can notify it, if needed.
3260 		 */
3261 		subflow = mptcp_subflow_ctx(msk->first);
3262 		list_add(&subflow->node, &msk->conn_list);
3263 		sock_hold(msk->first);
3264 		if (mptcp_is_fully_established(newsk))
3265 			mptcp_pm_fully_established(msk);
3266 
3267 		mptcp_copy_inaddrs(newsk, msk->first);
3268 		mptcp_rcv_space_init(msk, msk->first);
3269 
3270 		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
3271 		 * This is needed so NOSPACE flag can be set from tcp stack.
3272 		 */
3273 		__mptcp_flush_join_list(msk);
3274 		mptcp_for_each_subflow(msk, subflow) {
3275 			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3276 
3277 			if (!ssk->sk_socket)
3278 				mptcp_sock_graft(ssk, newsock);
3279 		}
3280 		unlock_sock_fast(newsk, slowpath);
3281 	}
3282 
3283 	if (inet_csk_listen_poll(ssock->sk))
3284 		set_bit(MPTCP_DATA_READY, &msk->flags);
3285 	sock_put(ssock->sk);
3286 	return err;
3287 
3288 unlock_fail:
3289 	release_sock(sock->sk);
3290 	return -EINVAL;
3291 }
3292 
3293 static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
3294 {
3295 	return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
3296 	       0;
3297 }
3298 
3299 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
3300 {
3301 	struct sock *sk = (struct sock *)msk;
3302 
3303 	if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
3304 		return 0;
3305 
3306 	if (sk_stream_is_writeable(sk))
3307 		return EPOLLOUT | EPOLLWRNORM;
3308 
3309 	set_bit(MPTCP_NOSPACE, &msk->flags);
3310 	smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
3311 	if (sk_stream_is_writeable(sk))
3312 		return EPOLLOUT | EPOLLWRNORM;
3313 
3314 	return 0;
3315 }
3316 
3317 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
3318 			   struct poll_table_struct *wait)
3319 {
3320 	struct sock *sk = sock->sk;
3321 	struct mptcp_sock *msk;
3322 	__poll_t mask = 0;
3323 	int state;
3324 
3325 	msk = mptcp_sk(sk);
3326 	sock_poll_wait(file, sock, wait);
3327 
3328 	state = inet_sk_state_load(sk);
3329 	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
3330 	if (state == TCP_LISTEN)
3331 		return mptcp_check_readable(msk);
3332 
3333 	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
3334 		mask |= mptcp_check_readable(msk);
3335 		mask |= mptcp_check_writeable(msk);
3336 	}
3337 	if (sk->sk_shutdown & RCV_SHUTDOWN)
3338 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
3339 
3340 	return mask;
3341 }
3342 
3343 static int mptcp_shutdown(struct socket *sock, int how)
3344 {
3345 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3346 	struct sock *sk = sock->sk;
3347 	int ret = 0;
3348 
3349 	pr_debug("sk=%p, how=%d", msk, how);
3350 
3351 	lock_sock(sk);
3352 
3353 	how++;
3354 	if ((how & ~SHUTDOWN_MASK) || !how) {
3355 		ret = -EINVAL;
3356 		goto out_unlock;
3357 	}
3358 
3359 	if (sock->state == SS_CONNECTING) {
3360 		if ((1 << sk->sk_state) &
3361 		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
3362 			sock->state = SS_DISCONNECTING;
3363 		else
3364 			sock->state = SS_CONNECTED;
3365 	}
3366 
3367 	sk->sk_shutdown |= how;
3368 	if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
3369 		__mptcp_wr_shutdown(sk);
3370 
3371 	/* Wake up anyone sleeping in poll. */
3372 	sk->sk_state_change(sk);
3373 
3374 out_unlock:
3375 	release_sock(sk);
3376 
3377 	return ret;
3378 }
3379 
3380 static const struct proto_ops mptcp_stream_ops = {
3381 	.family		   = PF_INET,
3382 	.owner		   = THIS_MODULE,
3383 	.release	   = inet_release,
3384 	.bind		   = mptcp_bind,
3385 	.connect	   = mptcp_stream_connect,
3386 	.socketpair	   = sock_no_socketpair,
3387 	.accept		   = mptcp_stream_accept,
3388 	.getname	   = inet_getname,
3389 	.poll		   = mptcp_poll,
3390 	.ioctl		   = inet_ioctl,
3391 	.gettstamp	   = sock_gettstamp,
3392 	.listen		   = mptcp_listen,
3393 	.shutdown	   = mptcp_shutdown,
3394 	.setsockopt	   = sock_common_setsockopt,
3395 	.getsockopt	   = sock_common_getsockopt,
3396 	.sendmsg	   = inet_sendmsg,
3397 	.recvmsg	   = inet_recvmsg,
3398 	.mmap		   = sock_no_mmap,
3399 	.sendpage	   = inet_sendpage,
3400 };
3401 
3402 static struct inet_protosw mptcp_protosw = {
3403 	.type		= SOCK_STREAM,
3404 	.protocol	= IPPROTO_MPTCP,
3405 	.prot		= &mptcp_prot,
3406 	.ops		= &mptcp_stream_ops,
3407 	.flags		= INET_PROTOSW_ICSK,
3408 };
3409 
3410 void __init mptcp_proto_init(void)
3411 {
3412 	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
3413 
3414 	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
3415 		panic("Failed to allocate MPTCP pcpu counter\n");
3416 
3417 	mptcp_subflow_init();
3418 	mptcp_pm_init();
3419 	mptcp_token_init();
3420 
3421 	if (proto_register(&mptcp_prot, 1) != 0)
3422 		panic("Failed to register MPTCP proto.\n");
3423 
3424 	inet_register_protosw(&mptcp_protosw);
3425 
3426 	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
3427 }
3428 
3429 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3430 static const struct proto_ops mptcp_v6_stream_ops = {
3431 	.family		   = PF_INET6,
3432 	.owner		   = THIS_MODULE,
3433 	.release	   = inet6_release,
3434 	.bind		   = mptcp_bind,
3435 	.connect	   = mptcp_stream_connect,
3436 	.socketpair	   = sock_no_socketpair,
3437 	.accept		   = mptcp_stream_accept,
3438 	.getname	   = inet6_getname,
3439 	.poll		   = mptcp_poll,
3440 	.ioctl		   = inet6_ioctl,
3441 	.gettstamp	   = sock_gettstamp,
3442 	.listen		   = mptcp_listen,
3443 	.shutdown	   = mptcp_shutdown,
3444 	.setsockopt	   = sock_common_setsockopt,
3445 	.getsockopt	   = sock_common_getsockopt,
3446 	.sendmsg	   = inet6_sendmsg,
3447 	.recvmsg	   = inet6_recvmsg,
3448 	.mmap		   = sock_no_mmap,
3449 	.sendpage	   = inet_sendpage,
3450 #ifdef CONFIG_COMPAT
3451 	.compat_ioctl	   = inet6_compat_ioctl,
3452 #endif
3453 };
3454 
3455 static struct proto mptcp_v6_prot;
3456 
3457 static void mptcp_v6_destroy(struct sock *sk)
3458 {
3459 	mptcp_destroy(sk);
3460 	inet6_destroy_sock(sk);
3461 }
3462 
3463 static struct inet_protosw mptcp_v6_protosw = {
3464 	.type		= SOCK_STREAM,
3465 	.protocol	= IPPROTO_MPTCP,
3466 	.prot		= &mptcp_v6_prot,
3467 	.ops		= &mptcp_v6_stream_ops,
3468 	.flags		= INET_PROTOSW_ICSK,
3469 };
3470 
3471 int __init mptcp_proto_v6_init(void)
3472 {
3473 	int err;
3474 
3475 	mptcp_v6_prot = mptcp_prot;
3476 	strcpy(mptcp_v6_prot.name, "MPTCPv6");
3477 	mptcp_v6_prot.slab = NULL;
3478 	mptcp_v6_prot.destroy = mptcp_v6_destroy;
3479 	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
3480 
3481 	err = proto_register(&mptcp_v6_prot, 1);
3482 	if (err)
3483 		return err;
3484 
3485 	err = inet6_register_protosw(&mptcp_v6_protosw);
3486 	if (err)
3487 		proto_unregister(&mptcp_v6_prot);
3488 
3489 	return err;
3490 }
3491 #endif
3492