xref: /openbmc/linux/net/mptcp/protocol.c (revision bcf3cf93f64597fd3ccdcf79000f064b0c7dc943)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #include <net/tcp_states.h>
20 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
21 #include <net/transp_v6.h>
22 #endif
23 #include <net/mptcp.h>
24 #include <net/xfrm.h>
25 #include <asm/ioctls.h>
26 #include "protocol.h"
27 #include "mib.h"
28 
29 #define CREATE_TRACE_POINTS
30 #include <trace/events/mptcp.h>
31 
32 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
33 struct mptcp6_sock {
34 	struct mptcp_sock msk;
35 	struct ipv6_pinfo np;
36 };
37 #endif
38 
39 struct mptcp_skb_cb {
40 	u64 map_seq;
41 	u64 end_seq;
42 	u32 offset;
43 	u8  has_rxtstamp:1;
44 };
45 
46 #define MPTCP_SKB_CB(__skb)	((struct mptcp_skb_cb *)&((__skb)->cb[0]))
47 
48 enum {
49 	MPTCP_CMSG_TS = BIT(0),
50 	MPTCP_CMSG_INQ = BIT(1),
51 };
52 
53 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
54 
55 static void __mptcp_destroy_sock(struct sock *sk);
56 static void __mptcp_check_send_data_fin(struct sock *sk);
57 
58 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
59 static struct net_device mptcp_napi_dev;
60 
61 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
62  * completed yet or has failed, return the subflow socket.
63  * Otherwise return NULL.
64  */
65 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
66 {
67 	if (!msk->subflow || READ_ONCE(msk->can_ack))
68 		return NULL;
69 
70 	return msk->subflow;
71 }
72 
73 /* Returns end sequence number of the receiver's advertised window */
74 static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
75 {
76 	return READ_ONCE(msk->wnd_end);
77 }
78 
79 static bool mptcp_is_tcpsk(struct sock *sk)
80 {
81 	struct socket *sock = sk->sk_socket;
82 
83 	if (unlikely(sk->sk_prot == &tcp_prot)) {
84 		/* we are being invoked after mptcp_accept() has
85 		 * accepted a non-mp-capable flow: sk is a tcp_sk,
86 		 * not an mptcp one.
87 		 *
88 		 * Hand the socket over to tcp so all further socket ops
89 		 * bypass mptcp.
90 		 */
91 		sock->ops = &inet_stream_ops;
92 		return true;
93 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
94 	} else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
95 		sock->ops = &inet6_stream_ops;
96 		return true;
97 #endif
98 	}
99 
100 	return false;
101 }
102 
103 static int __mptcp_socket_create(struct mptcp_sock *msk)
104 {
105 	struct mptcp_subflow_context *subflow;
106 	struct sock *sk = (struct sock *)msk;
107 	struct socket *ssock;
108 	int err;
109 
110 	err = mptcp_subflow_create_socket(sk, &ssock);
111 	if (err)
112 		return err;
113 
114 	msk->first = ssock->sk;
115 	msk->subflow = ssock;
116 	subflow = mptcp_subflow_ctx(ssock->sk);
117 	list_add(&subflow->node, &msk->conn_list);
118 	sock_hold(ssock->sk);
119 	subflow->request_mptcp = 1;
120 
121 	/* This is the first subflow, always with id 0 */
122 	subflow->local_id_valid = 1;
123 	mptcp_sock_graft(msk->first, sk->sk_socket);
124 
125 	return 0;
126 }
127 
128 static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
129 {
130 	sk_drops_add(sk, skb);
131 	__kfree_skb(skb);
132 }
133 
134 static void mptcp_rmem_charge(struct sock *sk, int size)
135 {
136 	mptcp_sk(sk)->rmem_fwd_alloc -= size;
137 }
138 
139 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
140 			       struct sk_buff *from)
141 {
142 	bool fragstolen;
143 	int delta;
144 
145 	if (MPTCP_SKB_CB(from)->offset ||
146 	    !skb_try_coalesce(to, from, &fragstolen, &delta))
147 		return false;
148 
149 	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
150 		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
151 		 to->len, MPTCP_SKB_CB(from)->end_seq);
152 	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
153 	kfree_skb_partial(from, fragstolen);
154 	atomic_add(delta, &sk->sk_rmem_alloc);
155 	mptcp_rmem_charge(sk, delta);
156 	return true;
157 }
158 
159 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
160 				   struct sk_buff *from)
161 {
162 	if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
163 		return false;
164 
165 	return mptcp_try_coalesce((struct sock *)msk, to, from);
166 }
167 
168 static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
169 {
170 	amount >>= SK_MEM_QUANTUM_SHIFT;
171 	mptcp_sk(sk)->rmem_fwd_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
172 	__sk_mem_reduce_allocated(sk, amount);
173 }
174 
175 static void mptcp_rmem_uncharge(struct sock *sk, int size)
176 {
177 	struct mptcp_sock *msk = mptcp_sk(sk);
178 	int reclaimable;
179 
180 	msk->rmem_fwd_alloc += size;
181 	reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
182 
183 	/* see sk_mem_uncharge() for the rationale behind the following schema */
184 	if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
185 		__mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK);
186 }
187 
188 static void mptcp_rfree(struct sk_buff *skb)
189 {
190 	unsigned int len = skb->truesize;
191 	struct sock *sk = skb->sk;
192 
193 	atomic_sub(len, &sk->sk_rmem_alloc);
194 	mptcp_rmem_uncharge(sk, len);
195 }
196 
197 static void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
198 {
199 	skb_orphan(skb);
200 	skb->sk = sk;
201 	skb->destructor = mptcp_rfree;
202 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
203 	mptcp_rmem_charge(sk, skb->truesize);
204 }
205 
206 /* "inspired" by tcp_data_queue_ofo(), main differences:
207  * - use mptcp seqs
208  * - don't cope with sacks
209  */
210 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
211 {
212 	struct sock *sk = (struct sock *)msk;
213 	struct rb_node **p, *parent;
214 	u64 seq, end_seq, max_seq;
215 	struct sk_buff *skb1;
216 
217 	seq = MPTCP_SKB_CB(skb)->map_seq;
218 	end_seq = MPTCP_SKB_CB(skb)->end_seq;
219 	max_seq = READ_ONCE(msk->rcv_wnd_sent);
220 
221 	pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
222 		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
223 	if (after64(end_seq, max_seq)) {
224 		/* out of window */
225 		mptcp_drop(sk, skb);
226 		pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
227 			 (unsigned long long)end_seq - (unsigned long)max_seq,
228 			 (unsigned long long)msk->rcv_wnd_sent);
229 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
230 		return;
231 	}
232 
233 	p = &msk->out_of_order_queue.rb_node;
234 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
235 	if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
236 		rb_link_node(&skb->rbnode, NULL, p);
237 		rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
238 		msk->ooo_last_skb = skb;
239 		goto end;
240 	}
241 
242 	/* with 2 subflows, adding at end of ooo queue is quite likely
243 	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
244 	 */
245 	if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
246 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
247 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
248 		return;
249 	}
250 
251 	/* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
252 	if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
253 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
254 		parent = &msk->ooo_last_skb->rbnode;
255 		p = &parent->rb_right;
256 		goto insert;
257 	}
258 
259 	/* Find place to insert this segment. Handle overlaps on the way. */
260 	parent = NULL;
261 	while (*p) {
262 		parent = *p;
263 		skb1 = rb_to_skb(parent);
264 		if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
265 			p = &parent->rb_left;
266 			continue;
267 		}
268 		if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
269 			if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
270 				/* All the bits are present. Drop. */
271 				mptcp_drop(sk, skb);
272 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
273 				return;
274 			}
275 			if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
276 				/* partial overlap:
277 				 *     |     skb      |
278 				 *  |     skb1    |
279 				 * continue traversing
280 				 */
281 			} else {
282 				/* skb's seq == skb1's seq and skb covers skb1.
283 				 * Replace skb1 with skb.
284 				 */
285 				rb_replace_node(&skb1->rbnode, &skb->rbnode,
286 						&msk->out_of_order_queue);
287 				mptcp_drop(sk, skb1);
288 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
289 				goto merge_right;
290 			}
291 		} else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
292 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
293 			return;
294 		}
295 		p = &parent->rb_right;
296 	}
297 
298 insert:
299 	/* Insert segment into RB tree. */
300 	rb_link_node(&skb->rbnode, parent, p);
301 	rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
302 
303 merge_right:
304 	/* Remove other segments covered by skb. */
305 	while ((skb1 = skb_rb_next(skb)) != NULL) {
306 		if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
307 			break;
308 		rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
309 		mptcp_drop(sk, skb1);
310 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
311 	}
312 	/* If there is no skb after us, we are the last_skb ! */
313 	if (!skb1)
314 		msk->ooo_last_skb = skb;
315 
316 end:
317 	skb_condense(skb);
318 	mptcp_set_owner_r(skb, sk);
319 }
320 
321 static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
322 {
323 	struct mptcp_sock *msk = mptcp_sk(sk);
324 	int amt, amount;
325 
326 	if (size < msk->rmem_fwd_alloc)
327 		return true;
328 
329 	amt = sk_mem_pages(size);
330 	amount = amt << SK_MEM_QUANTUM_SHIFT;
331 	msk->rmem_fwd_alloc += amount;
332 	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) {
333 		if (ssk->sk_forward_alloc < amount) {
334 			msk->rmem_fwd_alloc -= amount;
335 			return false;
336 		}
337 
338 		ssk->sk_forward_alloc -= amount;
339 	}
340 	return true;
341 }
342 
343 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
344 			     struct sk_buff *skb, unsigned int offset,
345 			     size_t copy_len)
346 {
347 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
348 	struct sock *sk = (struct sock *)msk;
349 	struct sk_buff *tail;
350 	bool has_rxtstamp;
351 
352 	__skb_unlink(skb, &ssk->sk_receive_queue);
353 
354 	skb_ext_reset(skb);
355 	skb_orphan(skb);
356 
357 	/* try to fetch required memory from subflow */
358 	if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
359 		goto drop;
360 
361 	has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
362 
363 	/* the skb map_seq accounts for the skb offset:
364 	 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
365 	 * value
366 	 */
367 	MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
368 	MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
369 	MPTCP_SKB_CB(skb)->offset = offset;
370 	MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
371 
372 	if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
373 		/* in sequence */
374 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
375 		tail = skb_peek_tail(&sk->sk_receive_queue);
376 		if (tail && mptcp_try_coalesce(sk, tail, skb))
377 			return true;
378 
379 		mptcp_set_owner_r(skb, sk);
380 		__skb_queue_tail(&sk->sk_receive_queue, skb);
381 		return true;
382 	} else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
383 		mptcp_data_queue_ofo(msk, skb);
384 		return false;
385 	}
386 
387 	/* old data, keep it simple and drop the whole pkt, sender
388 	 * will retransmit as needed, if needed.
389 	 */
390 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
391 drop:
392 	mptcp_drop(sk, skb);
393 	return false;
394 }
395 
396 static void mptcp_stop_timer(struct sock *sk)
397 {
398 	struct inet_connection_sock *icsk = inet_csk(sk);
399 
400 	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
401 	mptcp_sk(sk)->timer_ival = 0;
402 }
403 
404 static void mptcp_close_wake_up(struct sock *sk)
405 {
406 	if (sock_flag(sk, SOCK_DEAD))
407 		return;
408 
409 	sk->sk_state_change(sk);
410 	if (sk->sk_shutdown == SHUTDOWN_MASK ||
411 	    sk->sk_state == TCP_CLOSE)
412 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
413 	else
414 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
415 }
416 
417 static bool mptcp_pending_data_fin_ack(struct sock *sk)
418 {
419 	struct mptcp_sock *msk = mptcp_sk(sk);
420 
421 	return !__mptcp_check_fallback(msk) &&
422 	       ((1 << sk->sk_state) &
423 		(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
424 	       msk->write_seq == READ_ONCE(msk->snd_una);
425 }
426 
427 static void mptcp_check_data_fin_ack(struct sock *sk)
428 {
429 	struct mptcp_sock *msk = mptcp_sk(sk);
430 
431 	/* Look for an acknowledged DATA_FIN */
432 	if (mptcp_pending_data_fin_ack(sk)) {
433 		WRITE_ONCE(msk->snd_data_fin_enable, 0);
434 
435 		switch (sk->sk_state) {
436 		case TCP_FIN_WAIT1:
437 			inet_sk_state_store(sk, TCP_FIN_WAIT2);
438 			break;
439 		case TCP_CLOSING:
440 		case TCP_LAST_ACK:
441 			inet_sk_state_store(sk, TCP_CLOSE);
442 			break;
443 		}
444 
445 		mptcp_close_wake_up(sk);
446 	}
447 }
448 
449 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
450 {
451 	struct mptcp_sock *msk = mptcp_sk(sk);
452 
453 	if (READ_ONCE(msk->rcv_data_fin) &&
454 	    ((1 << sk->sk_state) &
455 	     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
456 		u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
457 
458 		if (msk->ack_seq == rcv_data_fin_seq) {
459 			if (seq)
460 				*seq = rcv_data_fin_seq;
461 
462 			return true;
463 		}
464 	}
465 
466 	return false;
467 }
468 
469 static void mptcp_set_datafin_timeout(const struct sock *sk)
470 {
471 	struct inet_connection_sock *icsk = inet_csk(sk);
472 	u32 retransmits;
473 
474 	retransmits = min_t(u32, icsk->icsk_retransmits,
475 			    ilog2(TCP_RTO_MAX / TCP_RTO_MIN));
476 
477 	mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits;
478 }
479 
480 static void __mptcp_set_timeout(struct sock *sk, long tout)
481 {
482 	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
483 }
484 
485 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
486 {
487 	const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
488 
489 	return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
490 	       inet_csk(ssk)->icsk_timeout - jiffies : 0;
491 }
492 
493 static void mptcp_set_timeout(struct sock *sk)
494 {
495 	struct mptcp_subflow_context *subflow;
496 	long tout = 0;
497 
498 	mptcp_for_each_subflow(mptcp_sk(sk), subflow)
499 		tout = max(tout, mptcp_timeout_from_subflow(subflow));
500 	__mptcp_set_timeout(sk, tout);
501 }
502 
503 static bool tcp_can_send_ack(const struct sock *ssk)
504 {
505 	return !((1 << inet_sk_state_load(ssk)) &
506 	       (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
507 }
508 
509 void mptcp_subflow_send_ack(struct sock *ssk)
510 {
511 	bool slow;
512 
513 	slow = lock_sock_fast(ssk);
514 	if (tcp_can_send_ack(ssk))
515 		tcp_send_ack(ssk);
516 	unlock_sock_fast(ssk, slow);
517 }
518 
519 static void mptcp_send_ack(struct mptcp_sock *msk)
520 {
521 	struct mptcp_subflow_context *subflow;
522 
523 	mptcp_for_each_subflow(msk, subflow)
524 		mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
525 }
526 
527 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
528 {
529 	bool slow;
530 
531 	slow = lock_sock_fast(ssk);
532 	if (tcp_can_send_ack(ssk))
533 		tcp_cleanup_rbuf(ssk, 1);
534 	unlock_sock_fast(ssk, slow);
535 }
536 
537 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
538 {
539 	const struct inet_connection_sock *icsk = inet_csk(ssk);
540 	u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
541 	const struct tcp_sock *tp = tcp_sk(ssk);
542 
543 	return (ack_pending & ICSK_ACK_SCHED) &&
544 		((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
545 		  READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
546 		 (rx_empty && ack_pending &
547 			      (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
548 }
549 
550 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
551 {
552 	int old_space = READ_ONCE(msk->old_wspace);
553 	struct mptcp_subflow_context *subflow;
554 	struct sock *sk = (struct sock *)msk;
555 	int space =  __mptcp_space(sk);
556 	bool cleanup, rx_empty;
557 
558 	cleanup = (space > 0) && (space >= (old_space << 1));
559 	rx_empty = !__mptcp_rmem(sk);
560 
561 	mptcp_for_each_subflow(msk, subflow) {
562 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
563 
564 		if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
565 			mptcp_subflow_cleanup_rbuf(ssk);
566 	}
567 }
568 
569 static bool mptcp_check_data_fin(struct sock *sk)
570 {
571 	struct mptcp_sock *msk = mptcp_sk(sk);
572 	u64 rcv_data_fin_seq;
573 	bool ret = false;
574 
575 	if (__mptcp_check_fallback(msk))
576 		return ret;
577 
578 	/* Need to ack a DATA_FIN received from a peer while this side
579 	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
580 	 * msk->rcv_data_fin was set when parsing the incoming options
581 	 * at the subflow level and the msk lock was not held, so this
582 	 * is the first opportunity to act on the DATA_FIN and change
583 	 * the msk state.
584 	 *
585 	 * If we are caught up to the sequence number of the incoming
586 	 * DATA_FIN, send the DATA_ACK now and do state transition.  If
587 	 * not caught up, do nothing and let the recv code send DATA_ACK
588 	 * when catching up.
589 	 */
590 
591 	if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
592 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
593 		WRITE_ONCE(msk->rcv_data_fin, 0);
594 
595 		sk->sk_shutdown |= RCV_SHUTDOWN;
596 		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
597 
598 		switch (sk->sk_state) {
599 		case TCP_ESTABLISHED:
600 			inet_sk_state_store(sk, TCP_CLOSE_WAIT);
601 			break;
602 		case TCP_FIN_WAIT1:
603 			inet_sk_state_store(sk, TCP_CLOSING);
604 			break;
605 		case TCP_FIN_WAIT2:
606 			inet_sk_state_store(sk, TCP_CLOSE);
607 			break;
608 		default:
609 			/* Other states not expected */
610 			WARN_ON_ONCE(1);
611 			break;
612 		}
613 
614 		ret = true;
615 		mptcp_send_ack(msk);
616 		mptcp_close_wake_up(sk);
617 	}
618 	return ret;
619 }
620 
621 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
622 					   struct sock *ssk,
623 					   unsigned int *bytes)
624 {
625 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
626 	struct sock *sk = (struct sock *)msk;
627 	unsigned int moved = 0;
628 	bool more_data_avail;
629 	struct tcp_sock *tp;
630 	bool done = false;
631 	int sk_rbuf;
632 
633 	sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
634 
635 	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
636 		int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
637 
638 		if (unlikely(ssk_rbuf > sk_rbuf)) {
639 			WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf);
640 			sk_rbuf = ssk_rbuf;
641 		}
642 	}
643 
644 	pr_debug("msk=%p ssk=%p", msk, ssk);
645 	tp = tcp_sk(ssk);
646 	do {
647 		u32 map_remaining, offset;
648 		u32 seq = tp->copied_seq;
649 		struct sk_buff *skb;
650 		bool fin;
651 
652 		/* try to move as much data as available */
653 		map_remaining = subflow->map_data_len -
654 				mptcp_subflow_get_map_offset(subflow);
655 
656 		skb = skb_peek(&ssk->sk_receive_queue);
657 		if (!skb) {
658 			/* if no data is found, a racing workqueue/recvmsg
659 			 * already processed the new data, stop here or we
660 			 * can enter an infinite loop
661 			 */
662 			if (!moved)
663 				done = true;
664 			break;
665 		}
666 
667 		if (__mptcp_check_fallback(msk)) {
668 			/* if we are running under the workqueue, TCP could have
669 			 * collapsed skbs between dummy map creation and now
670 			 * be sure to adjust the size
671 			 */
672 			map_remaining = skb->len;
673 			subflow->map_data_len = skb->len;
674 		}
675 
676 		offset = seq - TCP_SKB_CB(skb)->seq;
677 		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
678 		if (fin) {
679 			done = true;
680 			seq++;
681 		}
682 
683 		if (offset < skb->len) {
684 			size_t len = skb->len - offset;
685 
686 			if (tp->urg_data)
687 				done = true;
688 
689 			if (__mptcp_move_skb(msk, ssk, skb, offset, len))
690 				moved += len;
691 			seq += len;
692 
693 			if (WARN_ON_ONCE(map_remaining < len))
694 				break;
695 		} else {
696 			WARN_ON_ONCE(!fin);
697 			sk_eat_skb(ssk, skb);
698 			done = true;
699 		}
700 
701 		WRITE_ONCE(tp->copied_seq, seq);
702 		more_data_avail = mptcp_subflow_data_available(ssk);
703 
704 		if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) {
705 			done = true;
706 			break;
707 		}
708 	} while (more_data_avail);
709 
710 	*bytes += moved;
711 	return done;
712 }
713 
714 static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
715 {
716 	struct sock *sk = (struct sock *)msk;
717 	struct sk_buff *skb, *tail;
718 	bool moved = false;
719 	struct rb_node *p;
720 	u64 end_seq;
721 
722 	p = rb_first(&msk->out_of_order_queue);
723 	pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
724 	while (p) {
725 		skb = rb_to_skb(p);
726 		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
727 			break;
728 
729 		p = rb_next(p);
730 		rb_erase(&skb->rbnode, &msk->out_of_order_queue);
731 
732 		if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
733 				      msk->ack_seq))) {
734 			mptcp_drop(sk, skb);
735 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
736 			continue;
737 		}
738 
739 		end_seq = MPTCP_SKB_CB(skb)->end_seq;
740 		tail = skb_peek_tail(&sk->sk_receive_queue);
741 		if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
742 			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
743 
744 			/* skip overlapping data, if any */
745 			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
746 				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
747 				 delta);
748 			MPTCP_SKB_CB(skb)->offset += delta;
749 			MPTCP_SKB_CB(skb)->map_seq += delta;
750 			__skb_queue_tail(&sk->sk_receive_queue, skb);
751 		}
752 		msk->ack_seq = end_seq;
753 		moved = true;
754 	}
755 	return moved;
756 }
757 
758 /* In most cases we will be able to lock the mptcp socket.  If its already
759  * owned, we need to defer to the work queue to avoid ABBA deadlock.
760  */
761 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
762 {
763 	struct sock *sk = (struct sock *)msk;
764 	unsigned int moved = 0;
765 
766 	__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
767 	__mptcp_ofo_queue(msk);
768 	if (unlikely(ssk->sk_err)) {
769 		if (!sock_owned_by_user(sk))
770 			__mptcp_error_report(sk);
771 		else
772 			__set_bit(MPTCP_ERROR_REPORT,  &msk->cb_flags);
773 	}
774 
775 	/* If the moves have caught up with the DATA_FIN sequence number
776 	 * it's time to ack the DATA_FIN and change socket state, but
777 	 * this is not a good place to change state. Let the workqueue
778 	 * do it.
779 	 */
780 	if (mptcp_pending_data_fin(sk, NULL))
781 		mptcp_schedule_work(sk);
782 	return moved > 0;
783 }
784 
785 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
786 {
787 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
788 	struct mptcp_sock *msk = mptcp_sk(sk);
789 	int sk_rbuf, ssk_rbuf;
790 
791 	/* The peer can send data while we are shutting down this
792 	 * subflow at msk destruction time, but we must avoid enqueuing
793 	 * more data to the msk receive queue
794 	 */
795 	if (unlikely(subflow->disposable))
796 		return;
797 
798 	ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
799 	sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
800 	if (unlikely(ssk_rbuf > sk_rbuf))
801 		sk_rbuf = ssk_rbuf;
802 
803 	/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
804 	if (__mptcp_rmem(sk) > sk_rbuf) {
805 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
806 		return;
807 	}
808 
809 	/* Wake-up the reader only for in-sequence data */
810 	mptcp_data_lock(sk);
811 	if (move_skbs_to_msk(msk, ssk))
812 		sk->sk_data_ready(sk);
813 
814 	mptcp_data_unlock(sk);
815 }
816 
817 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
818 {
819 	struct sock *sk = (struct sock *)msk;
820 
821 	if (sk->sk_state != TCP_ESTABLISHED)
822 		return false;
823 
824 	/* attach to msk socket only after we are sure we will deal with it
825 	 * at close time
826 	 */
827 	if (sk->sk_socket && !ssk->sk_socket)
828 		mptcp_sock_graft(ssk, sk->sk_socket);
829 
830 	mptcp_propagate_sndbuf((struct sock *)msk, ssk);
831 	mptcp_sockopt_sync_locked(msk, ssk);
832 	return true;
833 }
834 
835 static void __mptcp_flush_join_list(struct sock *sk)
836 {
837 	struct mptcp_subflow_context *tmp, *subflow;
838 	struct mptcp_sock *msk = mptcp_sk(sk);
839 
840 	list_for_each_entry_safe(subflow, tmp, &msk->join_list, node) {
841 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
842 		bool slow = lock_sock_fast(ssk);
843 
844 		list_move_tail(&subflow->node, &msk->conn_list);
845 		if (!__mptcp_finish_join(msk, ssk))
846 			mptcp_subflow_reset(ssk);
847 		unlock_sock_fast(ssk, slow);
848 	}
849 }
850 
851 static bool mptcp_timer_pending(struct sock *sk)
852 {
853 	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
854 }
855 
856 static void mptcp_reset_timer(struct sock *sk)
857 {
858 	struct inet_connection_sock *icsk = inet_csk(sk);
859 	unsigned long tout;
860 
861 	/* prevent rescheduling on close */
862 	if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
863 		return;
864 
865 	tout = mptcp_sk(sk)->timer_ival;
866 	sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
867 }
868 
869 bool mptcp_schedule_work(struct sock *sk)
870 {
871 	if (inet_sk_state_load(sk) != TCP_CLOSE &&
872 	    schedule_work(&mptcp_sk(sk)->work)) {
873 		/* each subflow already holds a reference to the sk, and the
874 		 * workqueue is invoked by a subflow, so sk can't go away here.
875 		 */
876 		sock_hold(sk);
877 		return true;
878 	}
879 	return false;
880 }
881 
882 void mptcp_subflow_eof(struct sock *sk)
883 {
884 	if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags))
885 		mptcp_schedule_work(sk);
886 }
887 
888 static void mptcp_check_for_eof(struct mptcp_sock *msk)
889 {
890 	struct mptcp_subflow_context *subflow;
891 	struct sock *sk = (struct sock *)msk;
892 	int receivers = 0;
893 
894 	mptcp_for_each_subflow(msk, subflow)
895 		receivers += !subflow->rx_eof;
896 	if (receivers)
897 		return;
898 
899 	if (!(sk->sk_shutdown & RCV_SHUTDOWN)) {
900 		/* hopefully temporary hack: propagate shutdown status
901 		 * to msk, when all subflows agree on it
902 		 */
903 		sk->sk_shutdown |= RCV_SHUTDOWN;
904 
905 		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
906 		sk->sk_data_ready(sk);
907 	}
908 
909 	switch (sk->sk_state) {
910 	case TCP_ESTABLISHED:
911 		inet_sk_state_store(sk, TCP_CLOSE_WAIT);
912 		break;
913 	case TCP_FIN_WAIT1:
914 		inet_sk_state_store(sk, TCP_CLOSING);
915 		break;
916 	case TCP_FIN_WAIT2:
917 		inet_sk_state_store(sk, TCP_CLOSE);
918 		break;
919 	default:
920 		return;
921 	}
922 	mptcp_close_wake_up(sk);
923 }
924 
925 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
926 {
927 	struct mptcp_subflow_context *subflow;
928 	struct sock *sk = (struct sock *)msk;
929 
930 	sock_owned_by_me(sk);
931 
932 	mptcp_for_each_subflow(msk, subflow) {
933 		if (READ_ONCE(subflow->data_avail))
934 			return mptcp_subflow_tcp_sock(subflow);
935 	}
936 
937 	return NULL;
938 }
939 
940 static bool mptcp_skb_can_collapse_to(u64 write_seq,
941 				      const struct sk_buff *skb,
942 				      const struct mptcp_ext *mpext)
943 {
944 	if (!tcp_skb_can_collapse_to(skb))
945 		return false;
946 
947 	/* can collapse only if MPTCP level sequence is in order and this
948 	 * mapping has not been xmitted yet
949 	 */
950 	return mpext && mpext->data_seq + mpext->data_len == write_seq &&
951 	       !mpext->frozen;
952 }
953 
954 /* we can append data to the given data frag if:
955  * - there is space available in the backing page_frag
956  * - the data frag tail matches the current page_frag free offset
957  * - the data frag end sequence number matches the current write seq
958  */
959 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
960 				       const struct page_frag *pfrag,
961 				       const struct mptcp_data_frag *df)
962 {
963 	return df && pfrag->page == df->page &&
964 		pfrag->size - pfrag->offset > 0 &&
965 		pfrag->offset == (df->offset + df->data_len) &&
966 		df->data_seq + df->data_len == msk->write_seq;
967 }
968 
969 static void __mptcp_mem_reclaim_partial(struct sock *sk)
970 {
971 	int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
972 
973 	lockdep_assert_held_once(&sk->sk_lock.slock);
974 
975 	if (reclaimable > SK_MEM_QUANTUM)
976 		__mptcp_rmem_reclaim(sk, reclaimable - 1);
977 
978 	sk_mem_reclaim_partial(sk);
979 }
980 
981 static void mptcp_mem_reclaim_partial(struct sock *sk)
982 {
983 	mptcp_data_lock(sk);
984 	__mptcp_mem_reclaim_partial(sk);
985 	mptcp_data_unlock(sk);
986 }
987 
988 static void dfrag_uncharge(struct sock *sk, int len)
989 {
990 	sk_mem_uncharge(sk, len);
991 	sk_wmem_queued_add(sk, -len);
992 }
993 
994 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
995 {
996 	int len = dfrag->data_len + dfrag->overhead;
997 
998 	list_del(&dfrag->list);
999 	dfrag_uncharge(sk, len);
1000 	put_page(dfrag->page);
1001 }
1002 
1003 static void __mptcp_clean_una(struct sock *sk)
1004 {
1005 	struct mptcp_sock *msk = mptcp_sk(sk);
1006 	struct mptcp_data_frag *dtmp, *dfrag;
1007 	bool cleaned = false;
1008 	u64 snd_una;
1009 
1010 	/* on fallback we just need to ignore snd_una, as this is really
1011 	 * plain TCP
1012 	 */
1013 	if (__mptcp_check_fallback(msk))
1014 		msk->snd_una = READ_ONCE(msk->snd_nxt);
1015 
1016 	snd_una = msk->snd_una;
1017 	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1018 		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1019 			break;
1020 
1021 		if (unlikely(dfrag == msk->first_pending)) {
1022 			/* in recovery mode can see ack after the current snd head */
1023 			if (WARN_ON_ONCE(!msk->recovery))
1024 				break;
1025 
1026 			WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1027 		}
1028 
1029 		dfrag_clear(sk, dfrag);
1030 		cleaned = true;
1031 	}
1032 
1033 	dfrag = mptcp_rtx_head(sk);
1034 	if (dfrag && after64(snd_una, dfrag->data_seq)) {
1035 		u64 delta = snd_una - dfrag->data_seq;
1036 
1037 		/* prevent wrap around in recovery mode */
1038 		if (unlikely(delta > dfrag->already_sent)) {
1039 			if (WARN_ON_ONCE(!msk->recovery))
1040 				goto out;
1041 			if (WARN_ON_ONCE(delta > dfrag->data_len))
1042 				goto out;
1043 			dfrag->already_sent += delta - dfrag->already_sent;
1044 		}
1045 
1046 		dfrag->data_seq += delta;
1047 		dfrag->offset += delta;
1048 		dfrag->data_len -= delta;
1049 		dfrag->already_sent -= delta;
1050 
1051 		dfrag_uncharge(sk, delta);
1052 		cleaned = true;
1053 	}
1054 
1055 	/* all retransmitted data acked, recovery completed */
1056 	if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1057 		msk->recovery = false;
1058 
1059 out:
1060 	if (cleaned && tcp_under_memory_pressure(sk))
1061 		__mptcp_mem_reclaim_partial(sk);
1062 
1063 	if (snd_una == READ_ONCE(msk->snd_nxt) &&
1064 	    snd_una == READ_ONCE(msk->write_seq)) {
1065 		if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
1066 			mptcp_stop_timer(sk);
1067 	} else {
1068 		mptcp_reset_timer(sk);
1069 	}
1070 }
1071 
1072 static void __mptcp_clean_una_wakeup(struct sock *sk)
1073 {
1074 	lockdep_assert_held_once(&sk->sk_lock.slock);
1075 
1076 	__mptcp_clean_una(sk);
1077 	mptcp_write_space(sk);
1078 }
1079 
1080 static void mptcp_clean_una_wakeup(struct sock *sk)
1081 {
1082 	mptcp_data_lock(sk);
1083 	__mptcp_clean_una_wakeup(sk);
1084 	mptcp_data_unlock(sk);
1085 }
1086 
1087 static void mptcp_enter_memory_pressure(struct sock *sk)
1088 {
1089 	struct mptcp_subflow_context *subflow;
1090 	struct mptcp_sock *msk = mptcp_sk(sk);
1091 	bool first = true;
1092 
1093 	sk_stream_moderate_sndbuf(sk);
1094 	mptcp_for_each_subflow(msk, subflow) {
1095 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1096 
1097 		if (first)
1098 			tcp_enter_memory_pressure(ssk);
1099 		sk_stream_moderate_sndbuf(ssk);
1100 		first = false;
1101 	}
1102 }
1103 
1104 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1105  * data
1106  */
1107 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1108 {
1109 	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
1110 					pfrag, sk->sk_allocation)))
1111 		return true;
1112 
1113 	mptcp_enter_memory_pressure(sk);
1114 	return false;
1115 }
1116 
1117 static struct mptcp_data_frag *
1118 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1119 		      int orig_offset)
1120 {
1121 	int offset = ALIGN(orig_offset, sizeof(long));
1122 	struct mptcp_data_frag *dfrag;
1123 
1124 	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
1125 	dfrag->data_len = 0;
1126 	dfrag->data_seq = msk->write_seq;
1127 	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
1128 	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
1129 	dfrag->already_sent = 0;
1130 	dfrag->page = pfrag->page;
1131 
1132 	return dfrag;
1133 }
1134 
1135 struct mptcp_sendmsg_info {
1136 	int mss_now;
1137 	int size_goal;
1138 	u16 limit;
1139 	u16 sent;
1140 	unsigned int flags;
1141 	bool data_lock_held;
1142 };
1143 
1144 static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq,
1145 				    int avail_size)
1146 {
1147 	u64 window_end = mptcp_wnd_end(msk);
1148 
1149 	if (__mptcp_check_fallback(msk))
1150 		return avail_size;
1151 
1152 	if (!before64(data_seq + avail_size, window_end)) {
1153 		u64 allowed_size = window_end - data_seq;
1154 
1155 		return min_t(unsigned int, allowed_size, avail_size);
1156 	}
1157 
1158 	return avail_size;
1159 }
1160 
1161 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
1162 {
1163 	struct skb_ext *mpext = __skb_ext_alloc(gfp);
1164 
1165 	if (!mpext)
1166 		return false;
1167 	__skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
1168 	return true;
1169 }
1170 
1171 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
1172 {
1173 	struct sk_buff *skb;
1174 
1175 	skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
1176 	if (likely(skb)) {
1177 		if (likely(__mptcp_add_ext(skb, gfp))) {
1178 			skb_reserve(skb, MAX_TCP_HEADER);
1179 			skb->ip_summed = CHECKSUM_PARTIAL;
1180 			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
1181 			return skb;
1182 		}
1183 		__kfree_skb(skb);
1184 	} else {
1185 		mptcp_enter_memory_pressure(sk);
1186 	}
1187 	return NULL;
1188 }
1189 
1190 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1191 {
1192 	struct sk_buff *skb;
1193 
1194 	skb = __mptcp_do_alloc_tx_skb(sk, gfp);
1195 	if (!skb)
1196 		return NULL;
1197 
1198 	if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1199 		tcp_skb_entail(ssk, skb);
1200 		return skb;
1201 	}
1202 	tcp_skb_tsorted_anchor_cleanup(skb);
1203 	kfree_skb(skb);
1204 	return NULL;
1205 }
1206 
1207 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
1208 {
1209 	gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
1210 
1211 	if (unlikely(tcp_under_memory_pressure(sk))) {
1212 		if (data_lock_held)
1213 			__mptcp_mem_reclaim_partial(sk);
1214 		else
1215 			mptcp_mem_reclaim_partial(sk);
1216 	}
1217 	return __mptcp_alloc_tx_skb(sk, ssk, gfp);
1218 }
1219 
1220 /* note: this always recompute the csum on the whole skb, even
1221  * if we just appended a single frag. More status info needed
1222  */
1223 static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
1224 {
1225 	struct mptcp_ext *mpext = mptcp_get_ext(skb);
1226 	__wsum csum = ~csum_unfold(mpext->csum);
1227 	int offset = skb->len - added;
1228 
1229 	mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
1230 }
1231 
1232 static void mptcp_update_infinite_map(struct mptcp_sock *msk,
1233 				      struct sock *ssk,
1234 				      struct mptcp_ext *mpext)
1235 {
1236 	if (!mpext)
1237 		return;
1238 
1239 	mpext->infinite_map = 1;
1240 	mpext->data_len = 0;
1241 
1242 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
1243 	mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
1244 	pr_fallback(msk);
1245 	__mptcp_do_fallback(msk);
1246 }
1247 
1248 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1249 			      struct mptcp_data_frag *dfrag,
1250 			      struct mptcp_sendmsg_info *info)
1251 {
1252 	u64 data_seq = dfrag->data_seq + info->sent;
1253 	int offset = dfrag->offset + info->sent;
1254 	struct mptcp_sock *msk = mptcp_sk(sk);
1255 	bool zero_window_probe = false;
1256 	struct mptcp_ext *mpext = NULL;
1257 	bool can_coalesce = false;
1258 	bool reuse_skb = true;
1259 	struct sk_buff *skb;
1260 	size_t copy;
1261 	int i;
1262 
1263 	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
1264 		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1265 
1266 	if (WARN_ON_ONCE(info->sent > info->limit ||
1267 			 info->limit > dfrag->data_len))
1268 		return 0;
1269 
1270 	/* compute send limit */
1271 	info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1272 	copy = info->size_goal;
1273 
1274 	skb = tcp_write_queue_tail(ssk);
1275 	if (skb && copy > skb->len) {
1276 		/* Limit the write to the size available in the
1277 		 * current skb, if any, so that we create at most a new skb.
1278 		 * Explicitly tells TCP internals to avoid collapsing on later
1279 		 * queue management operation, to avoid breaking the ext <->
1280 		 * SSN association set here
1281 		 */
1282 		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
1283 		if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
1284 			TCP_SKB_CB(skb)->eor = 1;
1285 			goto alloc_skb;
1286 		}
1287 
1288 		i = skb_shinfo(skb)->nr_frags;
1289 		can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
1290 		if (!can_coalesce && i >= sysctl_max_skb_frags) {
1291 			tcp_mark_push(tcp_sk(ssk), skb);
1292 			goto alloc_skb;
1293 		}
1294 
1295 		copy -= skb->len;
1296 	} else {
1297 alloc_skb:
1298 		skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
1299 		if (!skb)
1300 			return -ENOMEM;
1301 
1302 		i = skb_shinfo(skb)->nr_frags;
1303 		reuse_skb = false;
1304 		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
1305 	}
1306 
1307 	/* Zero window and all data acked? Probe. */
1308 	copy = mptcp_check_allowed_size(msk, data_seq, copy);
1309 	if (copy == 0) {
1310 		u64 snd_una = READ_ONCE(msk->snd_una);
1311 
1312 		if (snd_una != msk->snd_nxt) {
1313 			tcp_remove_empty_skb(ssk);
1314 			return 0;
1315 		}
1316 
1317 		zero_window_probe = true;
1318 		data_seq = snd_una - 1;
1319 		copy = 1;
1320 
1321 		/* all mptcp-level data is acked, no skbs should be present into the
1322 		 * ssk write queue
1323 		 */
1324 		WARN_ON_ONCE(reuse_skb);
1325 	}
1326 
1327 	copy = min_t(size_t, copy, info->limit - info->sent);
1328 	if (!sk_wmem_schedule(ssk, copy)) {
1329 		tcp_remove_empty_skb(ssk);
1330 		return -ENOMEM;
1331 	}
1332 
1333 	if (can_coalesce) {
1334 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1335 	} else {
1336 		get_page(dfrag->page);
1337 		skb_fill_page_desc(skb, i, dfrag->page, offset, copy);
1338 	}
1339 
1340 	skb->len += copy;
1341 	skb->data_len += copy;
1342 	skb->truesize += copy;
1343 	sk_wmem_queued_add(ssk, copy);
1344 	sk_mem_charge(ssk, copy);
1345 	WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
1346 	TCP_SKB_CB(skb)->end_seq += copy;
1347 	tcp_skb_pcount_set(skb, 0);
1348 
1349 	/* on skb reuse we just need to update the DSS len */
1350 	if (reuse_skb) {
1351 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1352 		mpext->data_len += copy;
1353 		WARN_ON_ONCE(zero_window_probe);
1354 		goto out;
1355 	}
1356 
1357 	memset(mpext, 0, sizeof(*mpext));
1358 	mpext->data_seq = data_seq;
1359 	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1360 	mpext->data_len = copy;
1361 	mpext->use_map = 1;
1362 	mpext->dsn64 = 1;
1363 
1364 	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
1365 		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1366 		 mpext->dsn64);
1367 
1368 	if (zero_window_probe) {
1369 		mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1370 		mpext->frozen = 1;
1371 		if (READ_ONCE(msk->csum_enabled))
1372 			mptcp_update_data_checksum(skb, copy);
1373 		tcp_push_pending_frames(ssk);
1374 		return 0;
1375 	}
1376 out:
1377 	if (READ_ONCE(msk->csum_enabled))
1378 		mptcp_update_data_checksum(skb, copy);
1379 	if (mptcp_subflow_ctx(ssk)->send_infinite_map)
1380 		mptcp_update_infinite_map(msk, ssk, mpext);
1381 	trace_mptcp_sendmsg_frag(mpext);
1382 	mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1383 	return copy;
1384 }
1385 
1386 #define MPTCP_SEND_BURST_SIZE		((1 << 16) - \
1387 					 sizeof(struct tcphdr) - \
1388 					 MAX_TCP_OPTION_SPACE - \
1389 					 sizeof(struct ipv6hdr) - \
1390 					 sizeof(struct frag_hdr))
1391 
1392 struct subflow_send_info {
1393 	struct sock *ssk;
1394 	u64 linger_time;
1395 };
1396 
1397 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
1398 {
1399 	if (!subflow->stale)
1400 		return;
1401 
1402 	subflow->stale = 0;
1403 	MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
1404 }
1405 
1406 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1407 {
1408 	if (unlikely(subflow->stale)) {
1409 		u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);
1410 
1411 		if (subflow->stale_rcv_tstamp == rcv_tstamp)
1412 			return false;
1413 
1414 		mptcp_subflow_set_active(subflow);
1415 	}
1416 	return __mptcp_subflow_active(subflow);
1417 }
1418 
1419 #define SSK_MODE_ACTIVE	0
1420 #define SSK_MODE_BACKUP	1
1421 #define SSK_MODE_MAX	2
1422 
1423 /* implement the mptcp packet scheduler;
1424  * returns the subflow that will transmit the next DSS
1425  * additionally updates the rtx timeout
1426  */
1427 static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
1428 {
1429 	struct subflow_send_info send_info[SSK_MODE_MAX];
1430 	struct mptcp_subflow_context *subflow;
1431 	struct sock *sk = (struct sock *)msk;
1432 	u32 pace, burst, wmem;
1433 	int i, nr_active = 0;
1434 	struct sock *ssk;
1435 	u64 linger_time;
1436 	long tout = 0;
1437 
1438 	sock_owned_by_me(sk);
1439 
1440 	if (__mptcp_check_fallback(msk)) {
1441 		if (!msk->first)
1442 			return NULL;
1443 		return sk_stream_memory_free(msk->first) ? msk->first : NULL;
1444 	}
1445 
1446 	/* re-use last subflow, if the burst allow that */
1447 	if (msk->last_snd && msk->snd_burst > 0 &&
1448 	    sk_stream_memory_free(msk->last_snd) &&
1449 	    mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
1450 		mptcp_set_timeout(sk);
1451 		return msk->last_snd;
1452 	}
1453 
1454 	/* pick the subflow with the lower wmem/wspace ratio */
1455 	for (i = 0; i < SSK_MODE_MAX; ++i) {
1456 		send_info[i].ssk = NULL;
1457 		send_info[i].linger_time = -1;
1458 	}
1459 
1460 	mptcp_for_each_subflow(msk, subflow) {
1461 		trace_mptcp_subflow_get_send(subflow);
1462 		ssk =  mptcp_subflow_tcp_sock(subflow);
1463 		if (!mptcp_subflow_active(subflow))
1464 			continue;
1465 
1466 		tout = max(tout, mptcp_timeout_from_subflow(subflow));
1467 		nr_active += !subflow->backup;
1468 		pace = subflow->avg_pacing_rate;
1469 		if (unlikely(!pace)) {
1470 			/* init pacing rate from socket */
1471 			subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
1472 			pace = subflow->avg_pacing_rate;
1473 			if (!pace)
1474 				continue;
1475 		}
1476 
1477 		linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
1478 		if (linger_time < send_info[subflow->backup].linger_time) {
1479 			send_info[subflow->backup].ssk = ssk;
1480 			send_info[subflow->backup].linger_time = linger_time;
1481 		}
1482 	}
1483 	__mptcp_set_timeout(sk, tout);
1484 
1485 	/* pick the best backup if no other subflow is active */
1486 	if (!nr_active)
1487 		send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
1488 
1489 	/* According to the blest algorithm, to avoid HoL blocking for the
1490 	 * faster flow, we need to:
1491 	 * - estimate the faster flow linger time
1492 	 * - use the above to estimate the amount of byte transferred
1493 	 *   by the faster flow
1494 	 * - check that the amount of queued data is greter than the above,
1495 	 *   otherwise do not use the picked, slower, subflow
1496 	 * We select the subflow with the shorter estimated time to flush
1497 	 * the queued mem, which basically ensure the above. We just need
1498 	 * to check that subflow has a non empty cwin.
1499 	 */
1500 	ssk = send_info[SSK_MODE_ACTIVE].ssk;
1501 	if (!ssk || !sk_stream_memory_free(ssk) || !tcp_sk(ssk)->snd_wnd)
1502 		return NULL;
1503 
1504 	burst = min_t(int, MPTCP_SEND_BURST_SIZE, tcp_sk(ssk)->snd_wnd);
1505 	wmem = READ_ONCE(ssk->sk_wmem_queued);
1506 	subflow = mptcp_subflow_ctx(ssk);
1507 	subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
1508 					   READ_ONCE(ssk->sk_pacing_rate) * burst,
1509 					   burst + wmem);
1510 	msk->last_snd = ssk;
1511 	msk->snd_burst = burst;
1512 	return ssk;
1513 }
1514 
1515 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
1516 {
1517 	tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1518 	release_sock(ssk);
1519 }
1520 
1521 static void mptcp_update_post_push(struct mptcp_sock *msk,
1522 				   struct mptcp_data_frag *dfrag,
1523 				   u32 sent)
1524 {
1525 	u64 snd_nxt_new = dfrag->data_seq;
1526 
1527 	dfrag->already_sent += sent;
1528 
1529 	msk->snd_burst -= sent;
1530 
1531 	snd_nxt_new += dfrag->already_sent;
1532 
1533 	/* snd_nxt_new can be smaller than snd_nxt in case mptcp
1534 	 * is recovering after a failover. In that event, this re-sends
1535 	 * old segments.
1536 	 *
1537 	 * Thus compute snd_nxt_new candidate based on
1538 	 * the dfrag->data_seq that was sent and the data
1539 	 * that has been handed to the subflow for transmission
1540 	 * and skip update in case it was old dfrag.
1541 	 */
1542 	if (likely(after64(snd_nxt_new, msk->snd_nxt)))
1543 		msk->snd_nxt = snd_nxt_new;
1544 }
1545 
1546 void mptcp_check_and_set_pending(struct sock *sk)
1547 {
1548 	if (mptcp_send_head(sk))
1549 		mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING);
1550 }
1551 
1552 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
1553 {
1554 	struct sock *prev_ssk = NULL, *ssk = NULL;
1555 	struct mptcp_sock *msk = mptcp_sk(sk);
1556 	struct mptcp_sendmsg_info info = {
1557 				.flags = flags,
1558 	};
1559 	struct mptcp_data_frag *dfrag;
1560 	int len, copied = 0;
1561 
1562 	while ((dfrag = mptcp_send_head(sk))) {
1563 		info.sent = dfrag->already_sent;
1564 		info.limit = dfrag->data_len;
1565 		len = dfrag->data_len - dfrag->already_sent;
1566 		while (len > 0) {
1567 			int ret = 0;
1568 
1569 			prev_ssk = ssk;
1570 			ssk = mptcp_subflow_get_send(msk);
1571 
1572 			/* First check. If the ssk has changed since
1573 			 * the last round, release prev_ssk
1574 			 */
1575 			if (ssk != prev_ssk && prev_ssk)
1576 				mptcp_push_release(prev_ssk, &info);
1577 			if (!ssk)
1578 				goto out;
1579 
1580 			/* Need to lock the new subflow only if different
1581 			 * from the previous one, otherwise we are still
1582 			 * helding the relevant lock
1583 			 */
1584 			if (ssk != prev_ssk)
1585 				lock_sock(ssk);
1586 
1587 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
1588 			if (ret <= 0) {
1589 				mptcp_push_release(ssk, &info);
1590 				goto out;
1591 			}
1592 
1593 			info.sent += ret;
1594 			copied += ret;
1595 			len -= ret;
1596 
1597 			mptcp_update_post_push(msk, dfrag, ret);
1598 		}
1599 		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1600 	}
1601 
1602 	/* at this point we held the socket lock for the last subflow we used */
1603 	if (ssk)
1604 		mptcp_push_release(ssk, &info);
1605 
1606 out:
1607 	/* ensure the rtx timer is running */
1608 	if (!mptcp_timer_pending(sk))
1609 		mptcp_reset_timer(sk);
1610 	if (copied)
1611 		__mptcp_check_send_data_fin(sk);
1612 }
1613 
1614 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
1615 {
1616 	struct mptcp_sock *msk = mptcp_sk(sk);
1617 	struct mptcp_sendmsg_info info = {
1618 		.data_lock_held = true,
1619 	};
1620 	struct mptcp_data_frag *dfrag;
1621 	struct sock *xmit_ssk;
1622 	int len, copied = 0;
1623 	bool first = true;
1624 
1625 	info.flags = 0;
1626 	while ((dfrag = mptcp_send_head(sk))) {
1627 		info.sent = dfrag->already_sent;
1628 		info.limit = dfrag->data_len;
1629 		len = dfrag->data_len - dfrag->already_sent;
1630 		while (len > 0) {
1631 			int ret = 0;
1632 
1633 			/* the caller already invoked the packet scheduler,
1634 			 * check for a different subflow usage only after
1635 			 * spooling the first chunk of data
1636 			 */
1637 			xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk));
1638 			if (!xmit_ssk)
1639 				goto out;
1640 			if (xmit_ssk != ssk) {
1641 				mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
1642 						       MPTCP_DELEGATE_SEND);
1643 				goto out;
1644 			}
1645 
1646 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
1647 			if (ret <= 0)
1648 				goto out;
1649 
1650 			info.sent += ret;
1651 			copied += ret;
1652 			len -= ret;
1653 			first = false;
1654 
1655 			mptcp_update_post_push(msk, dfrag, ret);
1656 		}
1657 		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1658 	}
1659 
1660 out:
1661 	/* __mptcp_alloc_tx_skb could have released some wmem and we are
1662 	 * not going to flush it via release_sock()
1663 	 */
1664 	if (copied) {
1665 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1666 			 info.size_goal);
1667 		if (!mptcp_timer_pending(sk))
1668 			mptcp_reset_timer(sk);
1669 
1670 		if (msk->snd_data_fin_enable &&
1671 		    msk->snd_nxt + 1 == msk->write_seq)
1672 			mptcp_schedule_work(sk);
1673 	}
1674 }
1675 
1676 static void mptcp_set_nospace(struct sock *sk)
1677 {
1678 	/* enable autotune */
1679 	set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1680 
1681 	/* will be cleared on avail space */
1682 	set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
1683 }
1684 
1685 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1686 {
1687 	struct mptcp_sock *msk = mptcp_sk(sk);
1688 	struct page_frag *pfrag;
1689 	size_t copied = 0;
1690 	int ret = 0;
1691 	long timeo;
1692 
1693 	/* we don't support FASTOPEN yet */
1694 	if (msg->msg_flags & MSG_FASTOPEN)
1695 		return -EOPNOTSUPP;
1696 
1697 	/* silently ignore everything else */
1698 	msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL;
1699 
1700 	lock_sock(sk);
1701 
1702 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1703 
1704 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1705 		ret = sk_stream_wait_connect(sk, &timeo);
1706 		if (ret)
1707 			goto out;
1708 	}
1709 
1710 	pfrag = sk_page_frag(sk);
1711 
1712 	while (msg_data_left(msg)) {
1713 		int total_ts, frag_truesize = 0;
1714 		struct mptcp_data_frag *dfrag;
1715 		bool dfrag_collapsed;
1716 		size_t psize, offset;
1717 
1718 		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
1719 			ret = -EPIPE;
1720 			goto out;
1721 		}
1722 
1723 		/* reuse tail pfrag, if possible, or carve a new one from the
1724 		 * page allocator
1725 		 */
1726 		dfrag = mptcp_pending_tail(sk);
1727 		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1728 		if (!dfrag_collapsed) {
1729 			if (!sk_stream_memory_free(sk))
1730 				goto wait_for_memory;
1731 
1732 			if (!mptcp_page_frag_refill(sk, pfrag))
1733 				goto wait_for_memory;
1734 
1735 			dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1736 			frag_truesize = dfrag->overhead;
1737 		}
1738 
1739 		/* we do not bound vs wspace, to allow a single packet.
1740 		 * memory accounting will prevent execessive memory usage
1741 		 * anyway
1742 		 */
1743 		offset = dfrag->offset + dfrag->data_len;
1744 		psize = pfrag->size - offset;
1745 		psize = min_t(size_t, psize, msg_data_left(msg));
1746 		total_ts = psize + frag_truesize;
1747 
1748 		if (!sk_wmem_schedule(sk, total_ts))
1749 			goto wait_for_memory;
1750 
1751 		if (copy_page_from_iter(dfrag->page, offset, psize,
1752 					&msg->msg_iter) != psize) {
1753 			ret = -EFAULT;
1754 			goto out;
1755 		}
1756 
1757 		/* data successfully copied into the write queue */
1758 		sk->sk_forward_alloc -= total_ts;
1759 		copied += psize;
1760 		dfrag->data_len += psize;
1761 		frag_truesize += psize;
1762 		pfrag->offset += frag_truesize;
1763 		WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
1764 
1765 		/* charge data on mptcp pending queue to the msk socket
1766 		 * Note: we charge such data both to sk and ssk
1767 		 */
1768 		sk_wmem_queued_add(sk, frag_truesize);
1769 		if (!dfrag_collapsed) {
1770 			get_page(dfrag->page);
1771 			list_add_tail(&dfrag->list, &msk->rtx_queue);
1772 			if (!msk->first_pending)
1773 				WRITE_ONCE(msk->first_pending, dfrag);
1774 		}
1775 		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
1776 			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
1777 			 !dfrag_collapsed);
1778 
1779 		continue;
1780 
1781 wait_for_memory:
1782 		mptcp_set_nospace(sk);
1783 		__mptcp_push_pending(sk, msg->msg_flags);
1784 		ret = sk_stream_wait_memory(sk, &timeo);
1785 		if (ret)
1786 			goto out;
1787 	}
1788 
1789 	if (copied)
1790 		__mptcp_push_pending(sk, msg->msg_flags);
1791 
1792 out:
1793 	release_sock(sk);
1794 	return copied ? : ret;
1795 }
1796 
1797 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
1798 				struct msghdr *msg,
1799 				size_t len, int flags,
1800 				struct scm_timestamping_internal *tss,
1801 				int *cmsg_flags)
1802 {
1803 	struct sk_buff *skb, *tmp;
1804 	int copied = 0;
1805 
1806 	skb_queue_walk_safe(&msk->receive_queue, skb, tmp) {
1807 		u32 offset = MPTCP_SKB_CB(skb)->offset;
1808 		u32 data_len = skb->len - offset;
1809 		u32 count = min_t(size_t, len - copied, data_len);
1810 		int err;
1811 
1812 		if (!(flags & MSG_TRUNC)) {
1813 			err = skb_copy_datagram_msg(skb, offset, msg, count);
1814 			if (unlikely(err < 0)) {
1815 				if (!copied)
1816 					return err;
1817 				break;
1818 			}
1819 		}
1820 
1821 		if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
1822 			tcp_update_recv_tstamps(skb, tss);
1823 			*cmsg_flags |= MPTCP_CMSG_TS;
1824 		}
1825 
1826 		copied += count;
1827 
1828 		if (count < data_len) {
1829 			if (!(flags & MSG_PEEK)) {
1830 				MPTCP_SKB_CB(skb)->offset += count;
1831 				MPTCP_SKB_CB(skb)->map_seq += count;
1832 			}
1833 			break;
1834 		}
1835 
1836 		if (!(flags & MSG_PEEK)) {
1837 			/* we will bulk release the skb memory later */
1838 			skb->destructor = NULL;
1839 			WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
1840 			__skb_unlink(skb, &msk->receive_queue);
1841 			__kfree_skb(skb);
1842 		}
1843 
1844 		if (copied >= len)
1845 			break;
1846 	}
1847 
1848 	return copied;
1849 }
1850 
1851 /* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
1852  *
1853  * Only difference: Use highest rtt estimate of the subflows in use.
1854  */
1855 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1856 {
1857 	struct mptcp_subflow_context *subflow;
1858 	struct sock *sk = (struct sock *)msk;
1859 	u32 time, advmss = 1;
1860 	u64 rtt_us, mstamp;
1861 
1862 	sock_owned_by_me(sk);
1863 
1864 	if (copied <= 0)
1865 		return;
1866 
1867 	msk->rcvq_space.copied += copied;
1868 
1869 	mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
1870 	time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1871 
1872 	rtt_us = msk->rcvq_space.rtt_us;
1873 	if (rtt_us && time < (rtt_us >> 3))
1874 		return;
1875 
1876 	rtt_us = 0;
1877 	mptcp_for_each_subflow(msk, subflow) {
1878 		const struct tcp_sock *tp;
1879 		u64 sf_rtt_us;
1880 		u32 sf_advmss;
1881 
1882 		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
1883 
1884 		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
1885 		sf_advmss = READ_ONCE(tp->advmss);
1886 
1887 		rtt_us = max(sf_rtt_us, rtt_us);
1888 		advmss = max(sf_advmss, advmss);
1889 	}
1890 
1891 	msk->rcvq_space.rtt_us = rtt_us;
1892 	if (time < (rtt_us >> 3) || rtt_us == 0)
1893 		return;
1894 
1895 	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
1896 		goto new_measure;
1897 
1898 	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
1899 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
1900 		int rcvmem, rcvbuf;
1901 		u64 rcvwin, grow;
1902 
1903 		rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
1904 
1905 		grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
1906 
1907 		do_div(grow, msk->rcvq_space.space);
1908 		rcvwin += (grow << 1);
1909 
1910 		rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
1911 		while (tcp_win_from_space(sk, rcvmem) < advmss)
1912 			rcvmem += 128;
1913 
1914 		do_div(rcvwin, advmss);
1915 		rcvbuf = min_t(u64, rcvwin * rcvmem,
1916 			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
1917 
1918 		if (rcvbuf > sk->sk_rcvbuf) {
1919 			u32 window_clamp;
1920 
1921 			window_clamp = tcp_win_from_space(sk, rcvbuf);
1922 			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
1923 
1924 			/* Make subflows follow along.  If we do not do this, we
1925 			 * get drops at subflow level if skbs can't be moved to
1926 			 * the mptcp rx queue fast enough (announced rcv_win can
1927 			 * exceed ssk->sk_rcvbuf).
1928 			 */
1929 			mptcp_for_each_subflow(msk, subflow) {
1930 				struct sock *ssk;
1931 				bool slow;
1932 
1933 				ssk = mptcp_subflow_tcp_sock(subflow);
1934 				slow = lock_sock_fast(ssk);
1935 				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
1936 				tcp_sk(ssk)->window_clamp = window_clamp;
1937 				tcp_cleanup_rbuf(ssk, 1);
1938 				unlock_sock_fast(ssk, slow);
1939 			}
1940 		}
1941 	}
1942 
1943 	msk->rcvq_space.space = msk->rcvq_space.copied;
1944 new_measure:
1945 	msk->rcvq_space.copied = 0;
1946 	msk->rcvq_space.time = mstamp;
1947 }
1948 
1949 static void __mptcp_update_rmem(struct sock *sk)
1950 {
1951 	struct mptcp_sock *msk = mptcp_sk(sk);
1952 
1953 	if (!msk->rmem_released)
1954 		return;
1955 
1956 	atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
1957 	mptcp_rmem_uncharge(sk, msk->rmem_released);
1958 	WRITE_ONCE(msk->rmem_released, 0);
1959 }
1960 
1961 static void __mptcp_splice_receive_queue(struct sock *sk)
1962 {
1963 	struct mptcp_sock *msk = mptcp_sk(sk);
1964 
1965 	skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
1966 }
1967 
1968 static bool __mptcp_move_skbs(struct mptcp_sock *msk)
1969 {
1970 	struct sock *sk = (struct sock *)msk;
1971 	unsigned int moved = 0;
1972 	bool ret, done;
1973 
1974 	do {
1975 		struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1976 		bool slowpath;
1977 
1978 		/* we can have data pending in the subflows only if the msk
1979 		 * receive buffer was full at subflow_data_ready() time,
1980 		 * that is an unlikely slow path.
1981 		 */
1982 		if (likely(!ssk))
1983 			break;
1984 
1985 		slowpath = lock_sock_fast(ssk);
1986 		mptcp_data_lock(sk);
1987 		__mptcp_update_rmem(sk);
1988 		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
1989 		mptcp_data_unlock(sk);
1990 
1991 		if (unlikely(ssk->sk_err))
1992 			__mptcp_error_report(sk);
1993 		unlock_sock_fast(ssk, slowpath);
1994 	} while (!done);
1995 
1996 	/* acquire the data lock only if some input data is pending */
1997 	ret = moved > 0;
1998 	if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
1999 	    !skb_queue_empty_lockless(&sk->sk_receive_queue)) {
2000 		mptcp_data_lock(sk);
2001 		__mptcp_update_rmem(sk);
2002 		ret |= __mptcp_ofo_queue(msk);
2003 		__mptcp_splice_receive_queue(sk);
2004 		mptcp_data_unlock(sk);
2005 	}
2006 	if (ret)
2007 		mptcp_check_data_fin((struct sock *)msk);
2008 	return !skb_queue_empty(&msk->receive_queue);
2009 }
2010 
2011 static unsigned int mptcp_inq_hint(const struct sock *sk)
2012 {
2013 	const struct mptcp_sock *msk = mptcp_sk(sk);
2014 	const struct sk_buff *skb;
2015 
2016 	skb = skb_peek(&msk->receive_queue);
2017 	if (skb) {
2018 		u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
2019 
2020 		if (hint_val >= INT_MAX)
2021 			return INT_MAX;
2022 
2023 		return (unsigned int)hint_val;
2024 	}
2025 
2026 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
2027 		return 1;
2028 
2029 	return 0;
2030 }
2031 
2032 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2033 			 int flags, int *addr_len)
2034 {
2035 	struct mptcp_sock *msk = mptcp_sk(sk);
2036 	struct scm_timestamping_internal tss;
2037 	int copied = 0, cmsg_flags = 0;
2038 	int target;
2039 	long timeo;
2040 
2041 	/* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
2042 	if (unlikely(flags & MSG_ERRQUEUE))
2043 		return inet_recv_error(sk, msg, len, addr_len);
2044 
2045 	lock_sock(sk);
2046 	if (unlikely(sk->sk_state == TCP_LISTEN)) {
2047 		copied = -ENOTCONN;
2048 		goto out_err;
2049 	}
2050 
2051 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2052 
2053 	len = min_t(size_t, len, INT_MAX);
2054 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2055 
2056 	if (unlikely(msk->recvmsg_inq))
2057 		cmsg_flags = MPTCP_CMSG_INQ;
2058 
2059 	while (copied < len) {
2060 		int bytes_read;
2061 
2062 		bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
2063 		if (unlikely(bytes_read < 0)) {
2064 			if (!copied)
2065 				copied = bytes_read;
2066 			goto out_err;
2067 		}
2068 
2069 		copied += bytes_read;
2070 
2071 		/* be sure to advertise window change */
2072 		mptcp_cleanup_rbuf(msk);
2073 
2074 		if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
2075 			continue;
2076 
2077 		/* only the master socket status is relevant here. The exit
2078 		 * conditions mirror closely tcp_recvmsg()
2079 		 */
2080 		if (copied >= target)
2081 			break;
2082 
2083 		if (copied) {
2084 			if (sk->sk_err ||
2085 			    sk->sk_state == TCP_CLOSE ||
2086 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2087 			    !timeo ||
2088 			    signal_pending(current))
2089 				break;
2090 		} else {
2091 			if (sk->sk_err) {
2092 				copied = sock_error(sk);
2093 				break;
2094 			}
2095 
2096 			if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
2097 				mptcp_check_for_eof(msk);
2098 
2099 			if (sk->sk_shutdown & RCV_SHUTDOWN) {
2100 				/* race breaker: the shutdown could be after the
2101 				 * previous receive queue check
2102 				 */
2103 				if (__mptcp_move_skbs(msk))
2104 					continue;
2105 				break;
2106 			}
2107 
2108 			if (sk->sk_state == TCP_CLOSE) {
2109 				copied = -ENOTCONN;
2110 				break;
2111 			}
2112 
2113 			if (!timeo) {
2114 				copied = -EAGAIN;
2115 				break;
2116 			}
2117 
2118 			if (signal_pending(current)) {
2119 				copied = sock_intr_errno(timeo);
2120 				break;
2121 			}
2122 		}
2123 
2124 		pr_debug("block timeout %ld", timeo);
2125 		sk_wait_data(sk, &timeo, NULL);
2126 	}
2127 
2128 out_err:
2129 	if (cmsg_flags && copied >= 0) {
2130 		if (cmsg_flags & MPTCP_CMSG_TS)
2131 			tcp_recv_timestamp(msg, sk, &tss);
2132 
2133 		if (cmsg_flags & MPTCP_CMSG_INQ) {
2134 			unsigned int inq = mptcp_inq_hint(sk);
2135 
2136 			put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2137 		}
2138 	}
2139 
2140 	pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
2141 		 msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
2142 		 skb_queue_empty(&msk->receive_queue), copied);
2143 	if (!(flags & MSG_PEEK))
2144 		mptcp_rcv_space_adjust(msk, copied);
2145 
2146 	release_sock(sk);
2147 	return copied;
2148 }
2149 
2150 static void mptcp_retransmit_timer(struct timer_list *t)
2151 {
2152 	struct inet_connection_sock *icsk = from_timer(icsk, t,
2153 						       icsk_retransmit_timer);
2154 	struct sock *sk = &icsk->icsk_inet.sk;
2155 	struct mptcp_sock *msk = mptcp_sk(sk);
2156 
2157 	bh_lock_sock(sk);
2158 	if (!sock_owned_by_user(sk)) {
2159 		/* we need a process context to retransmit */
2160 		if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
2161 			mptcp_schedule_work(sk);
2162 	} else {
2163 		/* delegate our work to tcp_release_cb() */
2164 		__set_bit(MPTCP_RETRANSMIT, &msk->cb_flags);
2165 	}
2166 	bh_unlock_sock(sk);
2167 	sock_put(sk);
2168 }
2169 
2170 static void mptcp_timeout_timer(struct timer_list *t)
2171 {
2172 	struct sock *sk = from_timer(sk, t, sk_timer);
2173 
2174 	mptcp_schedule_work(sk);
2175 	sock_put(sk);
2176 }
2177 
2178 /* Find an idle subflow.  Return NULL if there is unacked data at tcp
2179  * level.
2180  *
2181  * A backup subflow is returned only if that is the only kind available.
2182  */
2183 static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
2184 {
2185 	struct sock *backup = NULL, *pick = NULL;
2186 	struct mptcp_subflow_context *subflow;
2187 	int min_stale_count = INT_MAX;
2188 
2189 	sock_owned_by_me((const struct sock *)msk);
2190 
2191 	if (__mptcp_check_fallback(msk))
2192 		return NULL;
2193 
2194 	mptcp_for_each_subflow(msk, subflow) {
2195 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2196 
2197 		if (!__mptcp_subflow_active(subflow))
2198 			continue;
2199 
2200 		/* still data outstanding at TCP level? skip this */
2201 		if (!tcp_rtx_and_write_queues_empty(ssk)) {
2202 			mptcp_pm_subflow_chk_stale(msk, ssk);
2203 			min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
2204 			continue;
2205 		}
2206 
2207 		if (subflow->backup) {
2208 			if (!backup)
2209 				backup = ssk;
2210 			continue;
2211 		}
2212 
2213 		if (!pick)
2214 			pick = ssk;
2215 	}
2216 
2217 	if (pick)
2218 		return pick;
2219 
2220 	/* use backup only if there are no progresses anywhere */
2221 	return min_stale_count > 1 ? backup : NULL;
2222 }
2223 
2224 static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
2225 {
2226 	if (msk->subflow) {
2227 		iput(SOCK_INODE(msk->subflow));
2228 		msk->subflow = NULL;
2229 	}
2230 }
2231 
2232 bool __mptcp_retransmit_pending_data(struct sock *sk)
2233 {
2234 	struct mptcp_data_frag *cur, *rtx_head;
2235 	struct mptcp_sock *msk = mptcp_sk(sk);
2236 
2237 	if (__mptcp_check_fallback(mptcp_sk(sk)))
2238 		return false;
2239 
2240 	if (tcp_rtx_and_write_queues_empty(sk))
2241 		return false;
2242 
2243 	/* the closing socket has some data untransmitted and/or unacked:
2244 	 * some data in the mptcp rtx queue has not really xmitted yet.
2245 	 * keep it simple and re-inject the whole mptcp level rtx queue
2246 	 */
2247 	mptcp_data_lock(sk);
2248 	__mptcp_clean_una_wakeup(sk);
2249 	rtx_head = mptcp_rtx_head(sk);
2250 	if (!rtx_head) {
2251 		mptcp_data_unlock(sk);
2252 		return false;
2253 	}
2254 
2255 	msk->recovery_snd_nxt = msk->snd_nxt;
2256 	msk->recovery = true;
2257 	mptcp_data_unlock(sk);
2258 
2259 	msk->first_pending = rtx_head;
2260 	msk->snd_burst = 0;
2261 
2262 	/* be sure to clear the "sent status" on all re-injected fragments */
2263 	list_for_each_entry(cur, &msk->rtx_queue, list) {
2264 		if (!cur->already_sent)
2265 			break;
2266 		cur->already_sent = 0;
2267 	}
2268 
2269 	return true;
2270 }
2271 
2272 /* flags for __mptcp_close_ssk() */
2273 #define MPTCP_CF_PUSH		BIT(1)
2274 #define MPTCP_CF_FASTCLOSE	BIT(2)
2275 
2276 /* subflow sockets can be either outgoing (connect) or incoming
2277  * (accept).
2278  *
2279  * Outgoing subflows use in-kernel sockets.
2280  * Incoming subflows do not have their own 'struct socket' allocated,
2281  * so we need to use tcp_close() after detaching them from the mptcp
2282  * parent socket.
2283  */
2284 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2285 			      struct mptcp_subflow_context *subflow,
2286 			      unsigned int flags)
2287 {
2288 	struct mptcp_sock *msk = mptcp_sk(sk);
2289 	bool need_push, dispose_it;
2290 
2291 	dispose_it = !msk->subflow || ssk != msk->subflow->sk;
2292 	if (dispose_it)
2293 		list_del(&subflow->node);
2294 
2295 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2296 
2297 	if (flags & MPTCP_CF_FASTCLOSE)
2298 		subflow->send_fastclose = 1;
2299 
2300 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
2301 	if (!dispose_it) {
2302 		tcp_disconnect(ssk, 0);
2303 		msk->subflow->state = SS_UNCONNECTED;
2304 		mptcp_subflow_ctx_reset(subflow);
2305 		release_sock(ssk);
2306 
2307 		goto out;
2308 	}
2309 
2310 	/* if we are invoked by the msk cleanup code, the subflow is
2311 	 * already orphaned
2312 	 */
2313 	if (ssk->sk_socket)
2314 		sock_orphan(ssk);
2315 
2316 	subflow->disposable = 1;
2317 
2318 	/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2319 	 * the ssk has been already destroyed, we just need to release the
2320 	 * reference owned by msk;
2321 	 */
2322 	if (!inet_csk(ssk)->icsk_ulp_ops) {
2323 		kfree_rcu(subflow, rcu);
2324 	} else {
2325 		/* otherwise tcp will dispose of the ssk and subflow ctx */
2326 		__tcp_close(ssk, 0);
2327 
2328 		/* close acquired an extra ref */
2329 		__sock_put(ssk);
2330 	}
2331 	release_sock(ssk);
2332 
2333 	sock_put(ssk);
2334 
2335 	if (ssk == msk->first)
2336 		msk->first = NULL;
2337 
2338 out:
2339 	if (ssk == msk->last_snd)
2340 		msk->last_snd = NULL;
2341 
2342 	if (need_push)
2343 		__mptcp_push_pending(sk, 0);
2344 }
2345 
2346 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2347 		     struct mptcp_subflow_context *subflow)
2348 {
2349 	if (sk->sk_state == TCP_ESTABLISHED)
2350 		mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
2351 
2352 	/* subflow aborted before reaching the fully_established status
2353 	 * attempt the creation of the next subflow
2354 	 */
2355 	mptcp_pm_subflow_check_next(mptcp_sk(sk), ssk, subflow);
2356 
2357 	__mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
2358 }
2359 
2360 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
2361 {
2362 	return 0;
2363 }
2364 
2365 static void __mptcp_close_subflow(struct mptcp_sock *msk)
2366 {
2367 	struct mptcp_subflow_context *subflow, *tmp;
2368 
2369 	might_sleep();
2370 
2371 	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
2372 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2373 
2374 		if (inet_sk_state_load(ssk) != TCP_CLOSE)
2375 			continue;
2376 
2377 		/* 'subflow_data_ready' will re-sched once rx queue is empty */
2378 		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
2379 			continue;
2380 
2381 		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
2382 	}
2383 }
2384 
2385 static bool mptcp_check_close_timeout(const struct sock *sk)
2386 {
2387 	s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
2388 	struct mptcp_subflow_context *subflow;
2389 
2390 	if (delta >= TCP_TIMEWAIT_LEN)
2391 		return true;
2392 
2393 	/* if all subflows are in closed status don't bother with additional
2394 	 * timeout
2395 	 */
2396 	mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
2397 		if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
2398 		    TCP_CLOSE)
2399 			return false;
2400 	}
2401 	return true;
2402 }
2403 
2404 static void mptcp_check_fastclose(struct mptcp_sock *msk)
2405 {
2406 	struct mptcp_subflow_context *subflow, *tmp;
2407 	struct sock *sk = &msk->sk.icsk_inet.sk;
2408 
2409 	if (likely(!READ_ONCE(msk->rcv_fastclose)))
2410 		return;
2411 
2412 	mptcp_token_destroy(msk);
2413 
2414 	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
2415 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2416 		bool slow;
2417 
2418 		slow = lock_sock_fast(tcp_sk);
2419 		if (tcp_sk->sk_state != TCP_CLOSE) {
2420 			tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
2421 			tcp_set_state(tcp_sk, TCP_CLOSE);
2422 		}
2423 		unlock_sock_fast(tcp_sk, slow);
2424 	}
2425 
2426 	inet_sk_state_store(sk, TCP_CLOSE);
2427 	sk->sk_shutdown = SHUTDOWN_MASK;
2428 	smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2429 	set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2430 
2431 	mptcp_close_wake_up(sk);
2432 }
2433 
2434 static void __mptcp_retrans(struct sock *sk)
2435 {
2436 	struct mptcp_sock *msk = mptcp_sk(sk);
2437 	struct mptcp_sendmsg_info info = {};
2438 	struct mptcp_data_frag *dfrag;
2439 	size_t copied = 0;
2440 	struct sock *ssk;
2441 	int ret;
2442 
2443 	mptcp_clean_una_wakeup(sk);
2444 
2445 	/* first check ssk: need to kick "stale" logic */
2446 	ssk = mptcp_subflow_get_retrans(msk);
2447 	dfrag = mptcp_rtx_head(sk);
2448 	if (!dfrag) {
2449 		if (mptcp_data_fin_enabled(msk)) {
2450 			struct inet_connection_sock *icsk = inet_csk(sk);
2451 
2452 			icsk->icsk_retransmits++;
2453 			mptcp_set_datafin_timeout(sk);
2454 			mptcp_send_ack(msk);
2455 
2456 			goto reset_timer;
2457 		}
2458 
2459 		if (!mptcp_send_head(sk))
2460 			return;
2461 
2462 		goto reset_timer;
2463 	}
2464 
2465 	if (!ssk)
2466 		goto reset_timer;
2467 
2468 	lock_sock(ssk);
2469 
2470 	/* limit retransmission to the bytes already sent on some subflows */
2471 	info.sent = 0;
2472 	info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
2473 	while (info.sent < info.limit) {
2474 		ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2475 		if (ret <= 0)
2476 			break;
2477 
2478 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2479 		copied += ret;
2480 		info.sent += ret;
2481 	}
2482 	if (copied) {
2483 		dfrag->already_sent = max(dfrag->already_sent, info.sent);
2484 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2485 			 info.size_goal);
2486 		WRITE_ONCE(msk->allow_infinite_fallback, false);
2487 	}
2488 
2489 	release_sock(ssk);
2490 
2491 reset_timer:
2492 	mptcp_check_and_set_pending(sk);
2493 
2494 	if (!mptcp_timer_pending(sk))
2495 		mptcp_reset_timer(sk);
2496 }
2497 
2498 static void mptcp_worker(struct work_struct *work)
2499 {
2500 	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2501 	struct sock *sk = &msk->sk.icsk_inet.sk;
2502 	int state;
2503 
2504 	lock_sock(sk);
2505 	state = sk->sk_state;
2506 	if (unlikely(state == TCP_CLOSE))
2507 		goto unlock;
2508 
2509 	mptcp_check_data_fin_ack(sk);
2510 
2511 	mptcp_check_fastclose(msk);
2512 
2513 	mptcp_pm_nl_work(msk);
2514 
2515 	if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
2516 		mptcp_check_for_eof(msk);
2517 
2518 	__mptcp_check_send_data_fin(sk);
2519 	mptcp_check_data_fin(sk);
2520 
2521 	/* There is no point in keeping around an orphaned sk timedout or
2522 	 * closed, but we need the msk around to reply to incoming DATA_FIN,
2523 	 * even if it is orphaned and in FIN_WAIT2 state
2524 	 */
2525 	if (sock_flag(sk, SOCK_DEAD) &&
2526 	    (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) {
2527 		inet_sk_state_store(sk, TCP_CLOSE);
2528 		__mptcp_destroy_sock(sk);
2529 		goto unlock;
2530 	}
2531 
2532 	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2533 		__mptcp_close_subflow(msk);
2534 
2535 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
2536 		__mptcp_retrans(sk);
2537 
2538 unlock:
2539 	release_sock(sk);
2540 	sock_put(sk);
2541 }
2542 
2543 static int __mptcp_init_sock(struct sock *sk)
2544 {
2545 	struct mptcp_sock *msk = mptcp_sk(sk);
2546 
2547 	INIT_LIST_HEAD(&msk->conn_list);
2548 	INIT_LIST_HEAD(&msk->join_list);
2549 	INIT_LIST_HEAD(&msk->rtx_queue);
2550 	INIT_WORK(&msk->work, mptcp_worker);
2551 	__skb_queue_head_init(&msk->receive_queue);
2552 	msk->out_of_order_queue = RB_ROOT;
2553 	msk->first_pending = NULL;
2554 	msk->rmem_fwd_alloc = 0;
2555 	WRITE_ONCE(msk->rmem_released, 0);
2556 	msk->timer_ival = TCP_RTO_MIN;
2557 
2558 	msk->first = NULL;
2559 	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
2560 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
2561 	WRITE_ONCE(msk->allow_infinite_fallback, true);
2562 	msk->recovery = false;
2563 
2564 	mptcp_pm_data_init(msk);
2565 
2566 	/* re-use the csk retrans timer for MPTCP-level retrans */
2567 	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
2568 	timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
2569 
2570 	return 0;
2571 }
2572 
2573 static void mptcp_ca_reset(struct sock *sk)
2574 {
2575 	struct inet_connection_sock *icsk = inet_csk(sk);
2576 
2577 	tcp_assign_congestion_control(sk);
2578 	strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
2579 
2580 	/* no need to keep a reference to the ops, the name will suffice */
2581 	tcp_cleanup_congestion_control(sk);
2582 	icsk->icsk_ca_ops = NULL;
2583 }
2584 
2585 static int mptcp_init_sock(struct sock *sk)
2586 {
2587 	struct net *net = sock_net(sk);
2588 	int ret;
2589 
2590 	ret = __mptcp_init_sock(sk);
2591 	if (ret)
2592 		return ret;
2593 
2594 	if (!mptcp_is_enabled(net))
2595 		return -ENOPROTOOPT;
2596 
2597 	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
2598 		return -ENOMEM;
2599 
2600 	ret = __mptcp_socket_create(mptcp_sk(sk));
2601 	if (ret)
2602 		return ret;
2603 
2604 	/* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
2605 	 * propagate the correct value
2606 	 */
2607 	mptcp_ca_reset(sk);
2608 
2609 	sk_sockets_allocated_inc(sk);
2610 	sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
2611 	sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
2612 
2613 	return 0;
2614 }
2615 
2616 static void __mptcp_clear_xmit(struct sock *sk)
2617 {
2618 	struct mptcp_sock *msk = mptcp_sk(sk);
2619 	struct mptcp_data_frag *dtmp, *dfrag;
2620 
2621 	WRITE_ONCE(msk->first_pending, NULL);
2622 	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
2623 		dfrag_clear(sk, dfrag);
2624 }
2625 
2626 static void mptcp_cancel_work(struct sock *sk)
2627 {
2628 	struct mptcp_sock *msk = mptcp_sk(sk);
2629 
2630 	if (cancel_work_sync(&msk->work))
2631 		__sock_put(sk);
2632 }
2633 
2634 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
2635 {
2636 	lock_sock(ssk);
2637 
2638 	switch (ssk->sk_state) {
2639 	case TCP_LISTEN:
2640 		if (!(how & RCV_SHUTDOWN))
2641 			break;
2642 		fallthrough;
2643 	case TCP_SYN_SENT:
2644 		tcp_disconnect(ssk, O_NONBLOCK);
2645 		break;
2646 	default:
2647 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
2648 			pr_debug("Fallback");
2649 			ssk->sk_shutdown |= how;
2650 			tcp_shutdown(ssk, how);
2651 		} else {
2652 			pr_debug("Sending DATA_FIN on subflow %p", ssk);
2653 			tcp_send_ack(ssk);
2654 			if (!mptcp_timer_pending(sk))
2655 				mptcp_reset_timer(sk);
2656 		}
2657 		break;
2658 	}
2659 
2660 	release_sock(ssk);
2661 }
2662 
2663 static const unsigned char new_state[16] = {
2664 	/* current state:     new state:      action:	*/
2665 	[0 /* (Invalid) */] = TCP_CLOSE,
2666 	[TCP_ESTABLISHED]   = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2667 	[TCP_SYN_SENT]      = TCP_CLOSE,
2668 	[TCP_SYN_RECV]      = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2669 	[TCP_FIN_WAIT1]     = TCP_FIN_WAIT1,
2670 	[TCP_FIN_WAIT2]     = TCP_FIN_WAIT2,
2671 	[TCP_TIME_WAIT]     = TCP_CLOSE,	/* should not happen ! */
2672 	[TCP_CLOSE]         = TCP_CLOSE,
2673 	[TCP_CLOSE_WAIT]    = TCP_LAST_ACK  | TCP_ACTION_FIN,
2674 	[TCP_LAST_ACK]      = TCP_LAST_ACK,
2675 	[TCP_LISTEN]        = TCP_CLOSE,
2676 	[TCP_CLOSING]       = TCP_CLOSING,
2677 	[TCP_NEW_SYN_RECV]  = TCP_CLOSE,	/* should not happen ! */
2678 };
2679 
2680 static int mptcp_close_state(struct sock *sk)
2681 {
2682 	int next = (int)new_state[sk->sk_state];
2683 	int ns = next & TCP_STATE_MASK;
2684 
2685 	inet_sk_state_store(sk, ns);
2686 
2687 	return next & TCP_ACTION_FIN;
2688 }
2689 
2690 static void __mptcp_check_send_data_fin(struct sock *sk)
2691 {
2692 	struct mptcp_subflow_context *subflow;
2693 	struct mptcp_sock *msk = mptcp_sk(sk);
2694 
2695 	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
2696 		 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
2697 		 msk->snd_nxt, msk->write_seq);
2698 
2699 	/* we still need to enqueue subflows or not really shutting down,
2700 	 * skip this
2701 	 */
2702 	if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
2703 	    mptcp_send_head(sk))
2704 		return;
2705 
2706 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
2707 
2708 	/* fallback socket will not get data_fin/ack, can move to the next
2709 	 * state now
2710 	 */
2711 	if (__mptcp_check_fallback(msk)) {
2712 		WRITE_ONCE(msk->snd_una, msk->write_seq);
2713 		if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
2714 			inet_sk_state_store(sk, TCP_CLOSE);
2715 			mptcp_close_wake_up(sk);
2716 		} else if (sk->sk_state == TCP_FIN_WAIT1) {
2717 			inet_sk_state_store(sk, TCP_FIN_WAIT2);
2718 		}
2719 	}
2720 
2721 	mptcp_for_each_subflow(msk, subflow) {
2722 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2723 
2724 		mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
2725 	}
2726 }
2727 
2728 static void __mptcp_wr_shutdown(struct sock *sk)
2729 {
2730 	struct mptcp_sock *msk = mptcp_sk(sk);
2731 
2732 	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
2733 		 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
2734 		 !!mptcp_send_head(sk));
2735 
2736 	/* will be ignored by fallback sockets */
2737 	WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2738 	WRITE_ONCE(msk->snd_data_fin_enable, 1);
2739 
2740 	__mptcp_check_send_data_fin(sk);
2741 }
2742 
2743 static void __mptcp_destroy_sock(struct sock *sk)
2744 {
2745 	struct mptcp_subflow_context *subflow, *tmp;
2746 	struct mptcp_sock *msk = mptcp_sk(sk);
2747 	LIST_HEAD(conn_list);
2748 
2749 	pr_debug("msk=%p", msk);
2750 
2751 	might_sleep();
2752 
2753 	/* join list will be eventually flushed (with rst) at sock lock release time*/
2754 	list_splice_init(&msk->conn_list, &conn_list);
2755 
2756 	mptcp_stop_timer(sk);
2757 	sk_stop_timer(sk, &sk->sk_timer);
2758 	msk->pm.status = 0;
2759 
2760 	/* clears msk->subflow, allowing the following loop to close
2761 	 * even the initial subflow
2762 	 */
2763 	mptcp_dispose_initial_subflow(msk);
2764 	list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
2765 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2766 		__mptcp_close_ssk(sk, ssk, subflow, 0);
2767 	}
2768 
2769 	sk->sk_prot->destroy(sk);
2770 
2771 	WARN_ON_ONCE(msk->rmem_fwd_alloc);
2772 	WARN_ON_ONCE(msk->rmem_released);
2773 	sk_stream_kill_queues(sk);
2774 	xfrm_sk_free_policy(sk);
2775 
2776 	sk_refcnt_debug_release(sk);
2777 	sock_put(sk);
2778 }
2779 
2780 static void mptcp_close(struct sock *sk, long timeout)
2781 {
2782 	struct mptcp_subflow_context *subflow;
2783 	bool do_cancel_work = false;
2784 
2785 	lock_sock(sk);
2786 	sk->sk_shutdown = SHUTDOWN_MASK;
2787 
2788 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
2789 		inet_sk_state_store(sk, TCP_CLOSE);
2790 		goto cleanup;
2791 	}
2792 
2793 	if (mptcp_close_state(sk))
2794 		__mptcp_wr_shutdown(sk);
2795 
2796 	sk_stream_wait_close(sk, timeout);
2797 
2798 cleanup:
2799 	/* orphan all the subflows */
2800 	inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
2801 	mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
2802 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2803 		bool slow = lock_sock_fast_nested(ssk);
2804 
2805 		sock_orphan(ssk);
2806 		unlock_sock_fast(ssk, slow);
2807 	}
2808 	sock_orphan(sk);
2809 
2810 	sock_hold(sk);
2811 	pr_debug("msk=%p state=%d", sk, sk->sk_state);
2812 	if (mptcp_sk(sk)->token)
2813 		mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
2814 
2815 	if (sk->sk_state == TCP_CLOSE) {
2816 		__mptcp_destroy_sock(sk);
2817 		do_cancel_work = true;
2818 	} else {
2819 		sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN);
2820 	}
2821 	release_sock(sk);
2822 	if (do_cancel_work)
2823 		mptcp_cancel_work(sk);
2824 
2825 	sock_put(sk);
2826 }
2827 
2828 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
2829 {
2830 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2831 	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
2832 	struct ipv6_pinfo *msk6 = inet6_sk(msk);
2833 
2834 	msk->sk_v6_daddr = ssk->sk_v6_daddr;
2835 	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
2836 
2837 	if (msk6 && ssk6) {
2838 		msk6->saddr = ssk6->saddr;
2839 		msk6->flow_label = ssk6->flow_label;
2840 	}
2841 #endif
2842 
2843 	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
2844 	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
2845 	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
2846 	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
2847 	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
2848 	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
2849 }
2850 
2851 static int mptcp_disconnect(struct sock *sk, int flags)
2852 {
2853 	struct mptcp_subflow_context *subflow;
2854 	struct mptcp_sock *msk = mptcp_sk(sk);
2855 
2856 	inet_sk_state_store(sk, TCP_CLOSE);
2857 
2858 	mptcp_for_each_subflow(msk, subflow) {
2859 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2860 
2861 		__mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_FASTCLOSE);
2862 	}
2863 
2864 	mptcp_stop_timer(sk);
2865 	sk_stop_timer(sk, &sk->sk_timer);
2866 
2867 	if (mptcp_sk(sk)->token)
2868 		mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
2869 
2870 	mptcp_destroy_common(msk);
2871 	msk->last_snd = NULL;
2872 	WRITE_ONCE(msk->flags, 0);
2873 	msk->cb_flags = 0;
2874 	msk->push_pending = 0;
2875 	msk->recovery = false;
2876 	msk->can_ack = false;
2877 	msk->fully_established = false;
2878 	msk->rcv_data_fin = false;
2879 	msk->snd_data_fin_enable = false;
2880 	msk->rcv_fastclose = false;
2881 	msk->use_64bit_ack = false;
2882 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
2883 	mptcp_pm_data_reset(msk);
2884 	mptcp_ca_reset(sk);
2885 
2886 	sk->sk_shutdown = 0;
2887 	sk_error_report(sk);
2888 	return 0;
2889 }
2890 
2891 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2892 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
2893 {
2894 	unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
2895 
2896 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
2897 }
2898 #endif
2899 
2900 struct sock *mptcp_sk_clone(const struct sock *sk,
2901 			    const struct mptcp_options_received *mp_opt,
2902 			    struct request_sock *req)
2903 {
2904 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
2905 	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
2906 	struct mptcp_sock *msk;
2907 	u64 ack_seq;
2908 
2909 	if (!nsk)
2910 		return NULL;
2911 
2912 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2913 	if (nsk->sk_family == AF_INET6)
2914 		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
2915 #endif
2916 
2917 	__mptcp_init_sock(nsk);
2918 
2919 	msk = mptcp_sk(nsk);
2920 	msk->local_key = subflow_req->local_key;
2921 	msk->token = subflow_req->token;
2922 	msk->subflow = NULL;
2923 	WRITE_ONCE(msk->fully_established, false);
2924 	if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
2925 		WRITE_ONCE(msk->csum_enabled, true);
2926 
2927 	msk->write_seq = subflow_req->idsn + 1;
2928 	msk->snd_nxt = msk->write_seq;
2929 	msk->snd_una = msk->write_seq;
2930 	msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
2931 	msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
2932 
2933 	if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) {
2934 		msk->can_ack = true;
2935 		msk->remote_key = mp_opt->sndr_key;
2936 		mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
2937 		ack_seq++;
2938 		WRITE_ONCE(msk->ack_seq, ack_seq);
2939 		WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
2940 	}
2941 
2942 	sock_reset_flag(nsk, SOCK_RCU_FREE);
2943 	/* will be fully established after successful MPC subflow creation */
2944 	inet_sk_state_store(nsk, TCP_SYN_RECV);
2945 
2946 	security_inet_csk_clone(nsk, req);
2947 	bh_unlock_sock(nsk);
2948 
2949 	/* keep a single reference */
2950 	__sock_put(nsk);
2951 	return nsk;
2952 }
2953 
2954 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2955 {
2956 	const struct tcp_sock *tp = tcp_sk(ssk);
2957 
2958 	msk->rcvq_space.copied = 0;
2959 	msk->rcvq_space.rtt_us = 0;
2960 
2961 	msk->rcvq_space.time = tp->tcp_mstamp;
2962 
2963 	/* initial rcv_space offering made to peer */
2964 	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
2965 				      TCP_INIT_CWND * tp->advmss);
2966 	if (msk->rcvq_space.space == 0)
2967 		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
2968 
2969 	WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
2970 }
2971 
2972 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
2973 				 bool kern)
2974 {
2975 	struct mptcp_sock *msk = mptcp_sk(sk);
2976 	struct socket *listener;
2977 	struct sock *newsk;
2978 
2979 	listener = __mptcp_nmpc_socket(msk);
2980 	if (WARN_ON_ONCE(!listener)) {
2981 		*err = -EINVAL;
2982 		return NULL;
2983 	}
2984 
2985 	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
2986 	newsk = inet_csk_accept(listener->sk, flags, err, kern);
2987 	if (!newsk)
2988 		return NULL;
2989 
2990 	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
2991 	if (sk_is_mptcp(newsk)) {
2992 		struct mptcp_subflow_context *subflow;
2993 		struct sock *new_mptcp_sock;
2994 
2995 		subflow = mptcp_subflow_ctx(newsk);
2996 		new_mptcp_sock = subflow->conn;
2997 
2998 		/* is_mptcp should be false if subflow->conn is missing, see
2999 		 * subflow_syn_recv_sock()
3000 		 */
3001 		if (WARN_ON_ONCE(!new_mptcp_sock)) {
3002 			tcp_sk(newsk)->is_mptcp = 0;
3003 			goto out;
3004 		}
3005 
3006 		/* acquire the 2nd reference for the owning socket */
3007 		sock_hold(new_mptcp_sock);
3008 		newsk = new_mptcp_sock;
3009 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
3010 	} else {
3011 		MPTCP_INC_STATS(sock_net(sk),
3012 				MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
3013 	}
3014 
3015 out:
3016 	newsk->sk_kern_sock = kern;
3017 	return newsk;
3018 }
3019 
3020 void mptcp_destroy_common(struct mptcp_sock *msk)
3021 {
3022 	struct sock *sk = (struct sock *)msk;
3023 
3024 	__mptcp_clear_xmit(sk);
3025 
3026 	/* move to sk_receive_queue, sk_stream_kill_queues will purge it */
3027 	mptcp_data_lock(sk);
3028 	skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
3029 	__skb_queue_purge(&sk->sk_receive_queue);
3030 	skb_rbtree_purge(&msk->out_of_order_queue);
3031 	mptcp_data_unlock(sk);
3032 
3033 	/* move all the rx fwd alloc into the sk_mem_reclaim_final in
3034 	 * inet_sock_destruct() will dispose it
3035 	 */
3036 	sk->sk_forward_alloc += msk->rmem_fwd_alloc;
3037 	msk->rmem_fwd_alloc = 0;
3038 	mptcp_token_destroy(msk);
3039 	mptcp_pm_free_anno_list(msk);
3040 }
3041 
3042 static void mptcp_destroy(struct sock *sk)
3043 {
3044 	struct mptcp_sock *msk = mptcp_sk(sk);
3045 
3046 	mptcp_destroy_common(msk);
3047 	sk_sockets_allocated_dec(sk);
3048 }
3049 
3050 void __mptcp_data_acked(struct sock *sk)
3051 {
3052 	if (!sock_owned_by_user(sk))
3053 		__mptcp_clean_una(sk);
3054 	else
3055 		__set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
3056 
3057 	if (mptcp_pending_data_fin_ack(sk))
3058 		mptcp_schedule_work(sk);
3059 }
3060 
3061 void __mptcp_check_push(struct sock *sk, struct sock *ssk)
3062 {
3063 	if (!mptcp_send_head(sk))
3064 		return;
3065 
3066 	if (!sock_owned_by_user(sk)) {
3067 		struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));
3068 
3069 		if (xmit_ssk == ssk)
3070 			__mptcp_subflow_push_pending(sk, ssk);
3071 		else if (xmit_ssk)
3072 			mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
3073 	} else {
3074 		__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3075 	}
3076 }
3077 
3078 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3079 				      BIT(MPTCP_RETRANSMIT) | \
3080 				      BIT(MPTCP_FLUSH_JOIN_LIST))
3081 
3082 /* processes deferred events and flush wmem */
3083 static void mptcp_release_cb(struct sock *sk)
3084 	__must_hold(&sk->sk_lock.slock)
3085 {
3086 	struct mptcp_sock *msk = mptcp_sk(sk);
3087 
3088 	for (;;) {
3089 		unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) |
3090 				      msk->push_pending;
3091 		if (!flags)
3092 			break;
3093 
3094 		/* the following actions acquire the subflow socket lock
3095 		 *
3096 		 * 1) can't be invoked in atomic scope
3097 		 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3098 		 *    datapath acquires the msk socket spinlock while helding
3099 		 *    the subflow socket lock
3100 		 */
3101 		msk->push_pending = 0;
3102 		msk->cb_flags &= ~flags;
3103 		spin_unlock_bh(&sk->sk_lock.slock);
3104 		if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
3105 			__mptcp_flush_join_list(sk);
3106 		if (flags & BIT(MPTCP_PUSH_PENDING))
3107 			__mptcp_push_pending(sk, 0);
3108 		if (flags & BIT(MPTCP_RETRANSMIT))
3109 			__mptcp_retrans(sk);
3110 
3111 		cond_resched();
3112 		spin_lock_bh(&sk->sk_lock.slock);
3113 	}
3114 
3115 	if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
3116 		__mptcp_clean_una_wakeup(sk);
3117 	if (unlikely(&msk->cb_flags)) {
3118 		/* be sure to set the current sk state before tacking actions
3119 		 * depending on sk_state, that is processing MPTCP_ERROR_REPORT
3120 		 */
3121 		if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags))
3122 			__mptcp_set_connected(sk);
3123 		if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
3124 			__mptcp_error_report(sk);
3125 		if (__test_and_clear_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags))
3126 			msk->last_snd = NULL;
3127 	}
3128 
3129 	__mptcp_update_rmem(sk);
3130 }
3131 
3132 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3133  * TCP can't schedule delack timer before the subflow is fully established.
3134  * MPTCP uses the delack timer to do 3rd ack retransmissions
3135  */
3136 static void schedule_3rdack_retransmission(struct sock *ssk)
3137 {
3138 	struct inet_connection_sock *icsk = inet_csk(ssk);
3139 	struct tcp_sock *tp = tcp_sk(ssk);
3140 	unsigned long timeout;
3141 
3142 	if (mptcp_subflow_ctx(ssk)->fully_established)
3143 		return;
3144 
3145 	/* reschedule with a timeout above RTT, as we must look only for drop */
3146 	if (tp->srtt_us)
3147 		timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
3148 	else
3149 		timeout = TCP_TIMEOUT_INIT;
3150 	timeout += jiffies;
3151 
3152 	WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
3153 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3154 	icsk->icsk_ack.timeout = timeout;
3155 	sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
3156 }
3157 
3158 void mptcp_subflow_process_delegated(struct sock *ssk)
3159 {
3160 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3161 	struct sock *sk = subflow->conn;
3162 
3163 	if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
3164 		mptcp_data_lock(sk);
3165 		if (!sock_owned_by_user(sk))
3166 			__mptcp_subflow_push_pending(sk, ssk);
3167 		else
3168 			__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3169 		mptcp_data_unlock(sk);
3170 		mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
3171 	}
3172 	if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
3173 		schedule_3rdack_retransmission(ssk);
3174 		mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
3175 	}
3176 }
3177 
3178 static int mptcp_hash(struct sock *sk)
3179 {
3180 	/* should never be called,
3181 	 * we hash the TCP subflows not the master socket
3182 	 */
3183 	WARN_ON_ONCE(1);
3184 	return 0;
3185 }
3186 
3187 static void mptcp_unhash(struct sock *sk)
3188 {
3189 	/* called from sk_common_release(), but nothing to do here */
3190 }
3191 
3192 static int mptcp_get_port(struct sock *sk, unsigned short snum)
3193 {
3194 	struct mptcp_sock *msk = mptcp_sk(sk);
3195 	struct socket *ssock;
3196 
3197 	ssock = __mptcp_nmpc_socket(msk);
3198 	pr_debug("msk=%p, subflow=%p", msk, ssock);
3199 	if (WARN_ON_ONCE(!ssock))
3200 		return -EINVAL;
3201 
3202 	return inet_csk_get_port(ssock->sk, snum);
3203 }
3204 
3205 void mptcp_finish_connect(struct sock *ssk)
3206 {
3207 	struct mptcp_subflow_context *subflow;
3208 	struct mptcp_sock *msk;
3209 	struct sock *sk;
3210 	u64 ack_seq;
3211 
3212 	subflow = mptcp_subflow_ctx(ssk);
3213 	sk = subflow->conn;
3214 	msk = mptcp_sk(sk);
3215 
3216 	pr_debug("msk=%p, token=%u", sk, subflow->token);
3217 
3218 	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
3219 	ack_seq++;
3220 	subflow->map_seq = ack_seq;
3221 	subflow->map_subflow_seq = 1;
3222 
3223 	/* the socket is not connected yet, no msk/subflow ops can access/race
3224 	 * accessing the field below
3225 	 */
3226 	WRITE_ONCE(msk->remote_key, subflow->remote_key);
3227 	WRITE_ONCE(msk->local_key, subflow->local_key);
3228 	WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
3229 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3230 	WRITE_ONCE(msk->ack_seq, ack_seq);
3231 	WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
3232 	WRITE_ONCE(msk->can_ack, 1);
3233 	WRITE_ONCE(msk->snd_una, msk->write_seq);
3234 
3235 	mptcp_pm_new_connection(msk, ssk, 0);
3236 
3237 	mptcp_rcv_space_init(msk, ssk);
3238 }
3239 
3240 void mptcp_sock_graft(struct sock *sk, struct socket *parent)
3241 {
3242 	write_lock_bh(&sk->sk_callback_lock);
3243 	rcu_assign_pointer(sk->sk_wq, &parent->wq);
3244 	sk_set_socket(sk, parent);
3245 	sk->sk_uid = SOCK_INODE(parent)->i_uid;
3246 	write_unlock_bh(&sk->sk_callback_lock);
3247 }
3248 
3249 bool mptcp_finish_join(struct sock *ssk)
3250 {
3251 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3252 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3253 	struct sock *parent = (void *)msk;
3254 	bool ret = true;
3255 
3256 	pr_debug("msk=%p, subflow=%p", msk, subflow);
3257 
3258 	/* mptcp socket already closing? */
3259 	if (!mptcp_is_fully_established(parent)) {
3260 		subflow->reset_reason = MPTCP_RST_EMPTCP;
3261 		return false;
3262 	}
3263 
3264 	if (!msk->pm.server_side)
3265 		goto out;
3266 
3267 	if (!mptcp_pm_allow_new_subflow(msk))
3268 		goto err_prohibited;
3269 
3270 	if (WARN_ON_ONCE(!list_empty(&subflow->node)))
3271 		goto err_prohibited;
3272 
3273 	/* active connections are already on conn_list.
3274 	 * If we can't acquire msk socket lock here, let the release callback
3275 	 * handle it
3276 	 */
3277 	mptcp_data_lock(parent);
3278 	if (!sock_owned_by_user(parent)) {
3279 		ret = __mptcp_finish_join(msk, ssk);
3280 		if (ret) {
3281 			sock_hold(ssk);
3282 			list_add_tail(&subflow->node, &msk->conn_list);
3283 		}
3284 	} else {
3285 		sock_hold(ssk);
3286 		list_add_tail(&subflow->node, &msk->join_list);
3287 		__set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
3288 	}
3289 	mptcp_data_unlock(parent);
3290 
3291 	if (!ret) {
3292 err_prohibited:
3293 		subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3294 		return false;
3295 	}
3296 
3297 	subflow->map_seq = READ_ONCE(msk->ack_seq);
3298 	WRITE_ONCE(msk->allow_infinite_fallback, false);
3299 
3300 out:
3301 	mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
3302 	return true;
3303 }
3304 
3305 static void mptcp_shutdown(struct sock *sk, int how)
3306 {
3307 	pr_debug("sk=%p, how=%d", sk, how);
3308 
3309 	if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
3310 		__mptcp_wr_shutdown(sk);
3311 }
3312 
3313 static int mptcp_forward_alloc_get(const struct sock *sk)
3314 {
3315 	return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc;
3316 }
3317 
3318 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
3319 {
3320 	const struct sock *sk = (void *)msk;
3321 	u64 delta;
3322 
3323 	if (sk->sk_state == TCP_LISTEN)
3324 		return -EINVAL;
3325 
3326 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
3327 		return 0;
3328 
3329 	delta = msk->write_seq - v;
3330 	if (__mptcp_check_fallback(msk) && msk->first) {
3331 		struct tcp_sock *tp = tcp_sk(msk->first);
3332 
3333 		/* the first subflow is disconnected after close - see
3334 		 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
3335 		 * so ignore that status, too.
3336 		 */
3337 		if (!((1 << msk->first->sk_state) &
3338 		      (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)))
3339 			delta += READ_ONCE(tp->write_seq) - tp->snd_una;
3340 	}
3341 	if (delta > INT_MAX)
3342 		delta = INT_MAX;
3343 
3344 	return (int)delta;
3345 }
3346 
3347 static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
3348 {
3349 	struct mptcp_sock *msk = mptcp_sk(sk);
3350 	bool slow;
3351 	int answ;
3352 
3353 	switch (cmd) {
3354 	case SIOCINQ:
3355 		if (sk->sk_state == TCP_LISTEN)
3356 			return -EINVAL;
3357 
3358 		lock_sock(sk);
3359 		__mptcp_move_skbs(msk);
3360 		answ = mptcp_inq_hint(sk);
3361 		release_sock(sk);
3362 		break;
3363 	case SIOCOUTQ:
3364 		slow = lock_sock_fast(sk);
3365 		answ = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una));
3366 		unlock_sock_fast(sk, slow);
3367 		break;
3368 	case SIOCOUTQNSD:
3369 		slow = lock_sock_fast(sk);
3370 		answ = mptcp_ioctl_outq(msk, msk->snd_nxt);
3371 		unlock_sock_fast(sk, slow);
3372 		break;
3373 	default:
3374 		return -ENOIOCTLCMD;
3375 	}
3376 
3377 	return put_user(answ, (int __user *)arg);
3378 }
3379 
3380 static struct proto mptcp_prot = {
3381 	.name		= "MPTCP",
3382 	.owner		= THIS_MODULE,
3383 	.init		= mptcp_init_sock,
3384 	.disconnect	= mptcp_disconnect,
3385 	.close		= mptcp_close,
3386 	.accept		= mptcp_accept,
3387 	.setsockopt	= mptcp_setsockopt,
3388 	.getsockopt	= mptcp_getsockopt,
3389 	.shutdown	= mptcp_shutdown,
3390 	.destroy	= mptcp_destroy,
3391 	.sendmsg	= mptcp_sendmsg,
3392 	.ioctl		= mptcp_ioctl,
3393 	.recvmsg	= mptcp_recvmsg,
3394 	.release_cb	= mptcp_release_cb,
3395 	.hash		= mptcp_hash,
3396 	.unhash		= mptcp_unhash,
3397 	.get_port	= mptcp_get_port,
3398 	.forward_alloc_get	= mptcp_forward_alloc_get,
3399 	.sockets_allocated	= &mptcp_sockets_allocated,
3400 	.memory_allocated	= &tcp_memory_allocated,
3401 	.memory_pressure	= &tcp_memory_pressure,
3402 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
3403 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
3404 	.sysctl_mem	= sysctl_tcp_mem,
3405 	.obj_size	= sizeof(struct mptcp_sock),
3406 	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
3407 	.no_autobind	= true,
3408 };
3409 
3410 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3411 {
3412 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3413 	struct socket *ssock;
3414 	int err;
3415 
3416 	lock_sock(sock->sk);
3417 	ssock = __mptcp_nmpc_socket(msk);
3418 	if (!ssock) {
3419 		err = -EINVAL;
3420 		goto unlock;
3421 	}
3422 
3423 	err = ssock->ops->bind(ssock, uaddr, addr_len);
3424 	if (!err)
3425 		mptcp_copy_inaddrs(sock->sk, ssock->sk);
3426 
3427 unlock:
3428 	release_sock(sock->sk);
3429 	return err;
3430 }
3431 
3432 static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
3433 					 struct mptcp_subflow_context *subflow)
3434 {
3435 	subflow->request_mptcp = 0;
3436 	__mptcp_do_fallback(msk);
3437 }
3438 
3439 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
3440 				int addr_len, int flags)
3441 {
3442 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3443 	struct mptcp_subflow_context *subflow;
3444 	struct socket *ssock;
3445 	int err = -EINVAL;
3446 
3447 	lock_sock(sock->sk);
3448 	if (uaddr) {
3449 		if (addr_len < sizeof(uaddr->sa_family))
3450 			goto unlock;
3451 
3452 		if (uaddr->sa_family == AF_UNSPEC) {
3453 			err = mptcp_disconnect(sock->sk, flags);
3454 			sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
3455 			goto unlock;
3456 		}
3457 	}
3458 
3459 	if (sock->state != SS_UNCONNECTED && msk->subflow) {
3460 		/* pending connection or invalid state, let existing subflow
3461 		 * cope with that
3462 		 */
3463 		ssock = msk->subflow;
3464 		goto do_connect;
3465 	}
3466 
3467 	ssock = __mptcp_nmpc_socket(msk);
3468 	if (!ssock)
3469 		goto unlock;
3470 
3471 	mptcp_token_destroy(msk);
3472 	inet_sk_state_store(sock->sk, TCP_SYN_SENT);
3473 	subflow = mptcp_subflow_ctx(ssock->sk);
3474 #ifdef CONFIG_TCP_MD5SIG
3475 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3476 	 * TCP option space.
3477 	 */
3478 	if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
3479 		mptcp_subflow_early_fallback(msk, subflow);
3480 #endif
3481 	if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
3482 		MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
3483 		mptcp_subflow_early_fallback(msk, subflow);
3484 	}
3485 	if (likely(!__mptcp_check_fallback(msk)))
3486 		MPTCP_INC_STATS(sock_net(sock->sk), MPTCP_MIB_MPCAPABLEACTIVE);
3487 
3488 do_connect:
3489 	err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
3490 	sock->state = ssock->state;
3491 
3492 	/* on successful connect, the msk state will be moved to established by
3493 	 * subflow_finish_connect()
3494 	 */
3495 	if (!err || err == -EINPROGRESS)
3496 		mptcp_copy_inaddrs(sock->sk, ssock->sk);
3497 	else
3498 		inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
3499 
3500 unlock:
3501 	release_sock(sock->sk);
3502 	return err;
3503 }
3504 
3505 static int mptcp_listen(struct socket *sock, int backlog)
3506 {
3507 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3508 	struct socket *ssock;
3509 	int err;
3510 
3511 	pr_debug("msk=%p", msk);
3512 
3513 	lock_sock(sock->sk);
3514 	ssock = __mptcp_nmpc_socket(msk);
3515 	if (!ssock) {
3516 		err = -EINVAL;
3517 		goto unlock;
3518 	}
3519 
3520 	mptcp_token_destroy(msk);
3521 	inet_sk_state_store(sock->sk, TCP_LISTEN);
3522 	sock_set_flag(sock->sk, SOCK_RCU_FREE);
3523 
3524 	err = ssock->ops->listen(ssock, backlog);
3525 	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
3526 	if (!err)
3527 		mptcp_copy_inaddrs(sock->sk, ssock->sk);
3528 
3529 unlock:
3530 	release_sock(sock->sk);
3531 	return err;
3532 }
3533 
3534 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
3535 			       int flags, bool kern)
3536 {
3537 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
3538 	struct socket *ssock;
3539 	int err;
3540 
3541 	pr_debug("msk=%p", msk);
3542 
3543 	ssock = __mptcp_nmpc_socket(msk);
3544 	if (!ssock)
3545 		return -EINVAL;
3546 
3547 	err = ssock->ops->accept(sock, newsock, flags, kern);
3548 	if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
3549 		struct mptcp_sock *msk = mptcp_sk(newsock->sk);
3550 		struct mptcp_subflow_context *subflow;
3551 		struct sock *newsk = newsock->sk;
3552 
3553 		lock_sock(newsk);
3554 
3555 		/* PM/worker can now acquire the first subflow socket
3556 		 * lock without racing with listener queue cleanup,
3557 		 * we can notify it, if needed.
3558 		 *
3559 		 * Even if remote has reset the initial subflow by now
3560 		 * the refcnt is still at least one.
3561 		 */
3562 		subflow = mptcp_subflow_ctx(msk->first);
3563 		list_add(&subflow->node, &msk->conn_list);
3564 		sock_hold(msk->first);
3565 		if (mptcp_is_fully_established(newsk))
3566 			mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
3567 
3568 		mptcp_copy_inaddrs(newsk, msk->first);
3569 		mptcp_rcv_space_init(msk, msk->first);
3570 		mptcp_propagate_sndbuf(newsk, msk->first);
3571 
3572 		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
3573 		 * This is needed so NOSPACE flag can be set from tcp stack.
3574 		 */
3575 		mptcp_for_each_subflow(msk, subflow) {
3576 			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3577 
3578 			if (!ssk->sk_socket)
3579 				mptcp_sock_graft(ssk, newsock);
3580 		}
3581 		release_sock(newsk);
3582 	}
3583 
3584 	return err;
3585 }
3586 
3587 static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
3588 {
3589 	/* Concurrent splices from sk_receive_queue into receive_queue will
3590 	 * always show at least one non-empty queue when checked in this order.
3591 	 */
3592 	if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
3593 	    skb_queue_empty_lockless(&msk->receive_queue))
3594 		return 0;
3595 
3596 	return EPOLLIN | EPOLLRDNORM;
3597 }
3598 
3599 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
3600 {
3601 	struct sock *sk = (struct sock *)msk;
3602 
3603 	if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
3604 		return EPOLLOUT | EPOLLWRNORM;
3605 
3606 	if (sk_stream_is_writeable(sk))
3607 		return EPOLLOUT | EPOLLWRNORM;
3608 
3609 	mptcp_set_nospace(sk);
3610 	smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
3611 	if (sk_stream_is_writeable(sk))
3612 		return EPOLLOUT | EPOLLWRNORM;
3613 
3614 	return 0;
3615 }
3616 
3617 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
3618 			   struct poll_table_struct *wait)
3619 {
3620 	struct sock *sk = sock->sk;
3621 	struct mptcp_sock *msk;
3622 	__poll_t mask = 0;
3623 	int state;
3624 
3625 	msk = mptcp_sk(sk);
3626 	sock_poll_wait(file, sock, wait);
3627 
3628 	state = inet_sk_state_load(sk);
3629 	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
3630 	if (state == TCP_LISTEN) {
3631 		if (WARN_ON_ONCE(!msk->subflow || !msk->subflow->sk))
3632 			return 0;
3633 
3634 		return inet_csk_listen_poll(msk->subflow->sk);
3635 	}
3636 
3637 	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
3638 		mask |= mptcp_check_readable(msk);
3639 		mask |= mptcp_check_writeable(msk);
3640 	}
3641 	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
3642 		mask |= EPOLLHUP;
3643 	if (sk->sk_shutdown & RCV_SHUTDOWN)
3644 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
3645 
3646 	/* This barrier is coupled with smp_wmb() in tcp_reset() */
3647 	smp_rmb();
3648 	if (sk->sk_err)
3649 		mask |= EPOLLERR;
3650 
3651 	return mask;
3652 }
3653 
3654 static const struct proto_ops mptcp_stream_ops = {
3655 	.family		   = PF_INET,
3656 	.owner		   = THIS_MODULE,
3657 	.release	   = inet_release,
3658 	.bind		   = mptcp_bind,
3659 	.connect	   = mptcp_stream_connect,
3660 	.socketpair	   = sock_no_socketpair,
3661 	.accept		   = mptcp_stream_accept,
3662 	.getname	   = inet_getname,
3663 	.poll		   = mptcp_poll,
3664 	.ioctl		   = inet_ioctl,
3665 	.gettstamp	   = sock_gettstamp,
3666 	.listen		   = mptcp_listen,
3667 	.shutdown	   = inet_shutdown,
3668 	.setsockopt	   = sock_common_setsockopt,
3669 	.getsockopt	   = sock_common_getsockopt,
3670 	.sendmsg	   = inet_sendmsg,
3671 	.recvmsg	   = inet_recvmsg,
3672 	.mmap		   = sock_no_mmap,
3673 	.sendpage	   = inet_sendpage,
3674 };
3675 
3676 static struct inet_protosw mptcp_protosw = {
3677 	.type		= SOCK_STREAM,
3678 	.protocol	= IPPROTO_MPTCP,
3679 	.prot		= &mptcp_prot,
3680 	.ops		= &mptcp_stream_ops,
3681 	.flags		= INET_PROTOSW_ICSK,
3682 };
3683 
3684 static int mptcp_napi_poll(struct napi_struct *napi, int budget)
3685 {
3686 	struct mptcp_delegated_action *delegated;
3687 	struct mptcp_subflow_context *subflow;
3688 	int work_done = 0;
3689 
3690 	delegated = container_of(napi, struct mptcp_delegated_action, napi);
3691 	while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
3692 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3693 
3694 		bh_lock_sock_nested(ssk);
3695 		if (!sock_owned_by_user(ssk) &&
3696 		    mptcp_subflow_has_delegated_action(subflow))
3697 			mptcp_subflow_process_delegated(ssk);
3698 		/* ... elsewhere tcp_release_cb_override already processed
3699 		 * the action or will do at next release_sock().
3700 		 * In both case must dequeue the subflow here - on the same
3701 		 * CPU that scheduled it.
3702 		 */
3703 		bh_unlock_sock(ssk);
3704 		sock_put(ssk);
3705 
3706 		if (++work_done == budget)
3707 			return budget;
3708 	}
3709 
3710 	/* always provide a 0 'work_done' argument, so that napi_complete_done
3711 	 * will not try accessing the NULL napi->dev ptr
3712 	 */
3713 	napi_complete_done(napi, 0);
3714 	return work_done;
3715 }
3716 
3717 void __init mptcp_proto_init(void)
3718 {
3719 	struct mptcp_delegated_action *delegated;
3720 	int cpu;
3721 
3722 	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
3723 
3724 	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
3725 		panic("Failed to allocate MPTCP pcpu counter\n");
3726 
3727 	init_dummy_netdev(&mptcp_napi_dev);
3728 	for_each_possible_cpu(cpu) {
3729 		delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
3730 		INIT_LIST_HEAD(&delegated->head);
3731 		netif_tx_napi_add(&mptcp_napi_dev, &delegated->napi, mptcp_napi_poll,
3732 				  NAPI_POLL_WEIGHT);
3733 		napi_enable(&delegated->napi);
3734 	}
3735 
3736 	mptcp_subflow_init();
3737 	mptcp_pm_init();
3738 	mptcp_token_init();
3739 
3740 	if (proto_register(&mptcp_prot, 1) != 0)
3741 		panic("Failed to register MPTCP proto.\n");
3742 
3743 	inet_register_protosw(&mptcp_protosw);
3744 
3745 	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
3746 }
3747 
3748 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3749 static const struct proto_ops mptcp_v6_stream_ops = {
3750 	.family		   = PF_INET6,
3751 	.owner		   = THIS_MODULE,
3752 	.release	   = inet6_release,
3753 	.bind		   = mptcp_bind,
3754 	.connect	   = mptcp_stream_connect,
3755 	.socketpair	   = sock_no_socketpair,
3756 	.accept		   = mptcp_stream_accept,
3757 	.getname	   = inet6_getname,
3758 	.poll		   = mptcp_poll,
3759 	.ioctl		   = inet6_ioctl,
3760 	.gettstamp	   = sock_gettstamp,
3761 	.listen		   = mptcp_listen,
3762 	.shutdown	   = inet_shutdown,
3763 	.setsockopt	   = sock_common_setsockopt,
3764 	.getsockopt	   = sock_common_getsockopt,
3765 	.sendmsg	   = inet6_sendmsg,
3766 	.recvmsg	   = inet6_recvmsg,
3767 	.mmap		   = sock_no_mmap,
3768 	.sendpage	   = inet_sendpage,
3769 #ifdef CONFIG_COMPAT
3770 	.compat_ioctl	   = inet6_compat_ioctl,
3771 #endif
3772 };
3773 
3774 static struct proto mptcp_v6_prot;
3775 
3776 static void mptcp_v6_destroy(struct sock *sk)
3777 {
3778 	mptcp_destroy(sk);
3779 	inet6_destroy_sock(sk);
3780 }
3781 
3782 static struct inet_protosw mptcp_v6_protosw = {
3783 	.type		= SOCK_STREAM,
3784 	.protocol	= IPPROTO_MPTCP,
3785 	.prot		= &mptcp_v6_prot,
3786 	.ops		= &mptcp_v6_stream_ops,
3787 	.flags		= INET_PROTOSW_ICSK,
3788 };
3789 
3790 int __init mptcp_proto_v6_init(void)
3791 {
3792 	int err;
3793 
3794 	mptcp_v6_prot = mptcp_prot;
3795 	strcpy(mptcp_v6_prot.name, "MPTCPv6");
3796 	mptcp_v6_prot.slab = NULL;
3797 	mptcp_v6_prot.destroy = mptcp_v6_destroy;
3798 	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
3799 
3800 	err = proto_register(&mptcp_v6_prot, 1);
3801 	if (err)
3802 		return err;
3803 
3804 	err = inet6_register_protosw(&mptcp_v6_protosw);
3805 	if (err)
3806 		proto_unregister(&mptcp_v6_prot);
3807 
3808 	return err;
3809 }
3810 #endif
3811