protocol.c (b1aa3db2c13ec0c63a73bd8fc5dfbfb112e3ff56) protocol.c (190f8b060ee38fcea885e08b2fe0e3fdd428a618)
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/sched/signal.h>
13#include <linux/atomic.h>
14#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/sched/signal.h>
13#include <linux/atomic.h>
14#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
19#include <net/tcp_states.h>
19#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/transp_v6.h>
21#endif
22#include <net/mptcp.h>
23#include "protocol.h"
24#include "mib.h"
25
26#define MPTCP_SAME_STATE TCP_MAX_STATES

--- 20 unchanged lines hidden (view full) ---

47static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
48{
49 if (!msk->subflow || READ_ONCE(msk->can_ack))
50 return NULL;
51
52 return msk->subflow;
53}
54
20#if IS_ENABLED(CONFIG_MPTCP_IPV6)
21#include <net/transp_v6.h>
22#endif
23#include <net/mptcp.h>
24#include "protocol.h"
25#include "mib.h"
26
27#define MPTCP_SAME_STATE TCP_MAX_STATES

--- 20 unchanged lines hidden (view full) ---

48static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
49{
50 if (!msk->subflow || READ_ONCE(msk->can_ack))
51 return NULL;
52
53 return msk->subflow;
54}
55
55static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk)
56static bool mptcp_is_tcpsk(struct sock *sk)
56{
57{
57 return msk->first && !sk_is_mptcp(msk->first);
58}
59
60static struct socket *mptcp_is_tcpsk(struct sock *sk)
61{
62 struct socket *sock = sk->sk_socket;
63
58 struct socket *sock = sk->sk_socket;
59
64 if (sock->sk != sk)
65 return NULL;
66
67 if (unlikely(sk->sk_prot == &tcp_prot)) {
68 /* we are being invoked after mptcp_accept() has
69 * accepted a non-mp-capable flow: sk is a tcp_sk,
70 * not an mptcp one.
71 *
72 * Hand the socket over to tcp so all further socket ops
73 * bypass mptcp.
74 */
75 sock->ops = &inet_stream_ops;
60 if (unlikely(sk->sk_prot == &tcp_prot)) {
61 /* we are being invoked after mptcp_accept() has
62 * accepted a non-mp-capable flow: sk is a tcp_sk,
63 * not an mptcp one.
64 *
65 * Hand the socket over to tcp so all further socket ops
66 * bypass mptcp.
67 */
68 sock->ops = &inet_stream_ops;
76 return sock;
69 return true;
77#if IS_ENABLED(CONFIG_MPTCP_IPV6)
78 } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
79 sock->ops = &inet6_stream_ops;
70#if IS_ENABLED(CONFIG_MPTCP_IPV6)
71 } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
72 sock->ops = &inet6_stream_ops;
80 return sock;
73 return true;
81#endif
82 }
83
74#endif
75 }
76
84 return NULL;
77 return false;
85}
86
78}
79
87static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
80static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
88{
81{
89 struct socket *sock;
90
91 sock_owned_by_me((const struct sock *)msk);
92
82 sock_owned_by_me((const struct sock *)msk);
83
93 sock = mptcp_is_tcpsk((struct sock *)msk);
94 if (unlikely(sock))
95 return sock;
96
97 if (likely(!__mptcp_needs_tcp_fallback(msk)))
84 if (likely(!__mptcp_check_fallback(msk)))
98 return NULL;
99
85 return NULL;
86
100 return msk->subflow;
87 return msk->first;
101}
102
88}
89
103static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
90static int __mptcp_socket_create(struct mptcp_sock *msk)
104{
91{
105 return !msk->first;
106}
107
108static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
109{
110 struct mptcp_subflow_context *subflow;
111 struct sock *sk = (struct sock *)msk;
112 struct socket *ssock;
113 int err;
114
92 struct mptcp_subflow_context *subflow;
93 struct sock *sk = (struct sock *)msk;
94 struct socket *ssock;
95 int err;
96
115 ssock = __mptcp_tcp_fallback(msk);
116 if (unlikely(ssock))
117 return ssock;
118
119 ssock = __mptcp_nmpc_socket(msk);
120 if (ssock)
121 goto set_state;
122
123 if (!__mptcp_can_create_subflow(msk))
124 return ERR_PTR(-EINVAL);
125
126 err = mptcp_subflow_create_socket(sk, &ssock);
127 if (err)
97 err = mptcp_subflow_create_socket(sk, &ssock);
98 if (err)
128 return ERR_PTR(err);
99 return err;
129
130 msk->first = ssock->sk;
131 msk->subflow = ssock;
132 subflow = mptcp_subflow_ctx(ssock->sk);
133 list_add(&subflow->node, &msk->conn_list);
134 subflow->request_mptcp = 1;
135
100
101 msk->first = ssock->sk;
102 msk->subflow = ssock;
103 subflow = mptcp_subflow_ctx(ssock->sk);
104 list_add(&subflow->node, &msk->conn_list);
105 subflow->request_mptcp = 1;
106
136set_state:
137 if (state != MPTCP_SAME_STATE)
138 inet_sk_state_store(sk, state);
139 return ssock;
107 /* accept() will wait on first subflow sk_wq, and we always wakes up
108 * via msk->sk_socket
109 */
110 RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
111
112 return 0;
140}
141
142static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
143 struct sk_buff *skb,
144 unsigned int offset, size_t copy_len)
145{
146 struct sock *sk = (struct sock *)msk;
147 struct sk_buff *tail;

--- 17 unchanged lines hidden (view full) ---

165 }
166 }
167
168 skb_set_owner_r(skb, sk);
169 __skb_queue_tail(&sk->sk_receive_queue, skb);
170 MPTCP_SKB_CB(skb)->offset = offset;
171}
172
113}
114
115static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
116 struct sk_buff *skb,
117 unsigned int offset, size_t copy_len)
118{
119 struct sock *sk = (struct sock *)msk;
120 struct sk_buff *tail;

--- 17 unchanged lines hidden (view full) ---

138 }
139 }
140
141 skb_set_owner_r(skb, sk);
142 __skb_queue_tail(&sk->sk_receive_queue, skb);
143 MPTCP_SKB_CB(skb)->offset = offset;
144}
145
146static void mptcp_stop_timer(struct sock *sk)
147{
148 struct inet_connection_sock *icsk = inet_csk(sk);
149
150 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
151 mptcp_sk(sk)->timer_ival = 0;
152}
153
173/* both sockets must be locked */
174static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
175 struct sock *ssk)
176{
177 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
178 u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);
179
180 /* revalidate data sequence number.

--- 5 unchanged lines hidden (view full) ---

186 */
187 if (likely(dsn == msk->ack_seq))
188 return true;
189
190 subflow->data_avail = 0;
191 return mptcp_subflow_data_available(ssk);
192}
193
154/* both sockets must be locked */
155static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
156 struct sock *ssk)
157{
158 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
159 u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);
160
161 /* revalidate data sequence number.

--- 5 unchanged lines hidden (view full) ---

167 */
168 if (likely(dsn == msk->ack_seq))
169 return true;
170
171 subflow->data_avail = 0;
172 return mptcp_subflow_data_available(ssk);
173}
174
175static void mptcp_check_data_fin_ack(struct sock *sk)
176{
177 struct mptcp_sock *msk = mptcp_sk(sk);
178
179 if (__mptcp_check_fallback(msk))
180 return;
181
182 /* Look for an acknowledged DATA_FIN */
183 if (((1 << sk->sk_state) &
184 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
185 msk->write_seq == atomic64_read(&msk->snd_una)) {
186 mptcp_stop_timer(sk);
187
188 WRITE_ONCE(msk->snd_data_fin_enable, 0);
189
190 switch (sk->sk_state) {
191 case TCP_FIN_WAIT1:
192 inet_sk_state_store(sk, TCP_FIN_WAIT2);
193 sk->sk_state_change(sk);
194 break;
195 case TCP_CLOSING:
196 fallthrough;
197 case TCP_LAST_ACK:
198 inet_sk_state_store(sk, TCP_CLOSE);
199 sk->sk_state_change(sk);
200 break;
201 }
202
203 if (sk->sk_shutdown == SHUTDOWN_MASK ||
204 sk->sk_state == TCP_CLOSE)
205 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
206 else
207 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
208 }
209}
210
211static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
212{
213 struct mptcp_sock *msk = mptcp_sk(sk);
214
215 if (READ_ONCE(msk->rcv_data_fin) &&
216 ((1 << sk->sk_state) &
217 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
218 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
219
220 if (msk->ack_seq == rcv_data_fin_seq) {
221 if (seq)
222 *seq = rcv_data_fin_seq;
223
224 return true;
225 }
226 }
227
228 return false;
229}
230
231static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
232{
233 long tout = ssk && inet_csk(ssk)->icsk_pending ?
234 inet_csk(ssk)->icsk_timeout - jiffies : 0;
235
236 if (tout <= 0)
237 tout = mptcp_sk(sk)->timer_ival;
238 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
239}
240
241static void mptcp_check_data_fin(struct sock *sk)
242{
243 struct mptcp_sock *msk = mptcp_sk(sk);
244 u64 rcv_data_fin_seq;
245
246 if (__mptcp_check_fallback(msk) || !msk->first)
247 return;
248
249 /* Need to ack a DATA_FIN received from a peer while this side
250 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
251 * msk->rcv_data_fin was set when parsing the incoming options
252 * at the subflow level and the msk lock was not held, so this
253 * is the first opportunity to act on the DATA_FIN and change
254 * the msk state.
255 *
256 * If we are caught up to the sequence number of the incoming
257 * DATA_FIN, send the DATA_ACK now and do state transition. If
258 * not caught up, do nothing and let the recv code send DATA_ACK
259 * when catching up.
260 */
261
262 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
263 struct mptcp_subflow_context *subflow;
264
265 msk->ack_seq++;
266 WRITE_ONCE(msk->rcv_data_fin, 0);
267
268 sk->sk_shutdown |= RCV_SHUTDOWN;
269 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
270 set_bit(MPTCP_DATA_READY, &msk->flags);
271
272 switch (sk->sk_state) {
273 case TCP_ESTABLISHED:
274 inet_sk_state_store(sk, TCP_CLOSE_WAIT);
275 break;
276 case TCP_FIN_WAIT1:
277 inet_sk_state_store(sk, TCP_CLOSING);
278 break;
279 case TCP_FIN_WAIT2:
280 inet_sk_state_store(sk, TCP_CLOSE);
281 // @@ Close subflows now?
282 break;
283 default:
284 /* Other states not expected */
285 WARN_ON_ONCE(1);
286 break;
287 }
288
289 mptcp_set_timeout(sk, NULL);
290 mptcp_for_each_subflow(msk, subflow) {
291 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
292
293 lock_sock(ssk);
294 tcp_send_ack(ssk);
295 release_sock(ssk);
296 }
297
298 sk->sk_state_change(sk);
299
300 if (sk->sk_shutdown == SHUTDOWN_MASK ||
301 sk->sk_state == TCP_CLOSE)
302 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
303 else
304 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
305 }
306}
307
194static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
195 struct sock *ssk,
196 unsigned int *bytes)
197{
198 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
199 struct sock *sk = (struct sock *)msk;
200 unsigned int moved = 0;
201 bool more_data_avail;
202 struct tcp_sock *tp;
203 bool done = false;
204
205 if (!mptcp_subflow_dsn_valid(msk, ssk)) {
206 *bytes = 0;
207 return false;
208 }
209
308static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
309 struct sock *ssk,
310 unsigned int *bytes)
311{
312 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
313 struct sock *sk = (struct sock *)msk;
314 unsigned int moved = 0;
315 bool more_data_avail;
316 struct tcp_sock *tp;
317 bool done = false;
318
319 if (!mptcp_subflow_dsn_valid(msk, ssk)) {
320 *bytes = 0;
321 return false;
322 }
323
210 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
211 int rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf);
212
213 if (rcvbuf > sk->sk_rcvbuf)
214 sk->sk_rcvbuf = rcvbuf;
215 }
216
217 tp = tcp_sk(ssk);
218 do {
219 u32 map_remaining, offset;
220 u32 seq = tp->copied_seq;
221 struct sk_buff *skb;
222 bool fin;
223
224 /* try to move as much data as available */
225 map_remaining = subflow->map_data_len -
226 mptcp_subflow_get_map_offset(subflow);
227
228 skb = skb_peek(&ssk->sk_receive_queue);
229 if (!skb)
230 break;
231
324 tp = tcp_sk(ssk);
325 do {
326 u32 map_remaining, offset;
327 u32 seq = tp->copied_seq;
328 struct sk_buff *skb;
329 bool fin;
330
331 /* try to move as much data as available */
332 map_remaining = subflow->map_data_len -
333 mptcp_subflow_get_map_offset(subflow);
334
335 skb = skb_peek(&ssk->sk_receive_queue);
336 if (!skb)
337 break;
338
339 if (__mptcp_check_fallback(msk)) {
340 /* if we are running under the workqueue, TCP could have
341 * collapsed skbs between dummy map creation and now
342 * be sure to adjust the size
343 */
344 map_remaining = skb->len;
345 subflow->map_data_len = skb->len;
346 }
347
232 offset = seq - TCP_SKB_CB(skb)->seq;
233 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
234 if (fin) {
235 done = true;
236 seq++;
237 }
238
239 if (offset < skb->len) {

--- 20 unchanged lines hidden (view full) ---

260 if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
261 done = true;
262 break;
263 }
264 } while (more_data_avail);
265
266 *bytes = moved;
267
348 offset = seq - TCP_SKB_CB(skb)->seq;
349 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
350 if (fin) {
351 done = true;
352 seq++;
353 }
354
355 if (offset < skb->len) {

--- 20 unchanged lines hidden (view full) ---

376 if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
377 done = true;
378 break;
379 }
380 } while (more_data_avail);
381
382 *bytes = moved;
383
384 /* If the moves have caught up with the DATA_FIN sequence number
385 * it's time to ack the DATA_FIN and change socket state, but
386 * this is not a good place to change state. Let the workqueue
387 * do it.
388 */
389 if (mptcp_pending_data_fin(sk, NULL) &&
390 schedule_work(&msk->work))
391 sock_hold(sk);
392
268 return done;
269}
270
271/* In most cases we will be able to lock the mptcp socket. If its already
272 * owned, we need to defer to the work queue to avoid ABBA deadlock.
273 */
274static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
275{

--- 48 unchanged lines hidden (view full) ---

324 if (likely(list_empty(&msk->join_list)))
325 return;
326
327 spin_lock_bh(&msk->join_list_lock);
328 list_splice_tail_init(&msk->join_list, &msk->conn_list);
329 spin_unlock_bh(&msk->join_list_lock);
330}
331
393 return done;
394}
395
396/* In most cases we will be able to lock the mptcp socket. If its already
397 * owned, we need to defer to the work queue to avoid ABBA deadlock.
398 */
399static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
400{

--- 48 unchanged lines hidden (view full) ---

449 if (likely(list_empty(&msk->join_list)))
450 return;
451
452 spin_lock_bh(&msk->join_list_lock);
453 list_splice_tail_init(&msk->join_list, &msk->conn_list);
454 spin_unlock_bh(&msk->join_list_lock);
455}
456
332static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
333{
334 long tout = ssk && inet_csk(ssk)->icsk_pending ?
335 inet_csk(ssk)->icsk_timeout - jiffies : 0;
336
337 if (tout <= 0)
338 tout = mptcp_sk(sk)->timer_ival;
339 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
340}
341
342static bool mptcp_timer_pending(struct sock *sk)
343{
344 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
345}
346
347static void mptcp_reset_timer(struct sock *sk)
348{
349 struct inet_connection_sock *icsk = inet_csk(sk);

--- 5 unchanged lines hidden (view full) ---

355 tout = TCP_RTO_MIN;
356 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
357}
358
359void mptcp_data_acked(struct sock *sk)
360{
361 mptcp_reset_timer(sk);
362
457static bool mptcp_timer_pending(struct sock *sk)
458{
459 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
460}
461
462static void mptcp_reset_timer(struct sock *sk)
463{
464 struct inet_connection_sock *icsk = inet_csk(sk);

--- 5 unchanged lines hidden (view full) ---

470 tout = TCP_RTO_MIN;
471 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
472}
473
474void mptcp_data_acked(struct sock *sk)
475{
476 mptcp_reset_timer(sk);
477
363 if (!sk_stream_is_writeable(sk) &&
478 if ((!sk_stream_is_writeable(sk) ||
479 (inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
364 schedule_work(&mptcp_sk(sk)->work))
365 sock_hold(sk);
366}
367
368void mptcp_subflow_eof(struct sock *sk)
369{
370 struct mptcp_sock *msk = mptcp_sk(sk);
371

--- 18 unchanged lines hidden (view full) ---

390 sk->sk_shutdown |= RCV_SHUTDOWN;
391
392 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
393 set_bit(MPTCP_DATA_READY, &msk->flags);
394 sk->sk_data_ready(sk);
395 }
396}
397
480 schedule_work(&mptcp_sk(sk)->work))
481 sock_hold(sk);
482}
483
484void mptcp_subflow_eof(struct sock *sk)
485{
486 struct mptcp_sock *msk = mptcp_sk(sk);
487

--- 18 unchanged lines hidden (view full) ---

506 sk->sk_shutdown |= RCV_SHUTDOWN;
507
508 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
509 set_bit(MPTCP_DATA_READY, &msk->flags);
510 sk->sk_data_ready(sk);
511 }
512}
513
398static void mptcp_stop_timer(struct sock *sk)
399{
400 struct inet_connection_sock *icsk = inet_csk(sk);
401
402 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
403 mptcp_sk(sk)->timer_ival = 0;
404}
405
406static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
407{
408 const struct sock *sk = (const struct sock *)msk;
409
410 if (!msk->cached_ext)
411 msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
412
413 return !!msk->cached_ext;

--- 47 unchanged lines hidden (view full) ---

461 dfrag_uncharge(sk, len);
462 put_page(dfrag->page);
463}
464
465static void mptcp_clean_una(struct sock *sk)
466{
467 struct mptcp_sock *msk = mptcp_sk(sk);
468 struct mptcp_data_frag *dtmp, *dfrag;
514static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
515{
516 const struct sock *sk = (const struct sock *)msk;
517
518 if (!msk->cached_ext)
519 msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
520
521 return !!msk->cached_ext;

--- 47 unchanged lines hidden (view full) ---

569 dfrag_uncharge(sk, len);
570 put_page(dfrag->page);
571}
572
573static void mptcp_clean_una(struct sock *sk)
574{
575 struct mptcp_sock *msk = mptcp_sk(sk);
576 struct mptcp_data_frag *dtmp, *dfrag;
469 u64 snd_una = atomic64_read(&msk->snd_una);
470 bool cleaned = false;
577 bool cleaned = false;
578 u64 snd_una;
471
579
580 /* on fallback we just need to ignore snd_una, as this is really
581 * plain TCP
582 */
583 if (__mptcp_check_fallback(msk))
584 atomic64_set(&msk->snd_una, msk->write_seq);
585 snd_una = atomic64_read(&msk->snd_una);
586
472 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
473 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
474 break;
475
476 dfrag_clear(sk, dfrag);
477 cleaned = true;
478 }
479
480 dfrag = mptcp_rtx_head(sk);
481 if (dfrag && after64(snd_una, dfrag->data_seq)) {
587 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
588 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
589 break;
590
591 dfrag_clear(sk, dfrag);
592 cleaned = true;
593 }
594
595 dfrag = mptcp_rtx_head(sk);
596 if (dfrag && after64(snd_una, dfrag->data_seq)) {
482 u64 delta = dfrag->data_seq + dfrag->data_len - snd_una;
597 u64 delta = snd_una - dfrag->data_seq;
483
598
599 if (WARN_ON_ONCE(delta > dfrag->data_len))
600 goto out;
601
484 dfrag->data_seq += delta;
602 dfrag->data_seq += delta;
603 dfrag->offset += delta;
485 dfrag->data_len -= delta;
486
487 dfrag_uncharge(sk, delta);
488 cleaned = true;
489 }
490
604 dfrag->data_len -= delta;
605
606 dfrag_uncharge(sk, delta);
607 cleaned = true;
608 }
609
610out:
491 if (cleaned) {
492 sk_mem_reclaim_partial(sk);
493
494 /* Only wake up writers if a subflow is ready */
495 if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
496 sk_stream_write_space(sk);
497 }
498}

--- 169 unchanged lines hidden (view full) ---

668
669 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
670 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
671 mpext->dsn64);
672
673out:
674 if (!retransmission)
675 pfrag->offset += frag_truesize;
611 if (cleaned) {
612 sk_mem_reclaim_partial(sk);
613
614 /* Only wake up writers if a subflow is ready */
615 if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
616 sk_stream_write_space(sk);
617 }
618}

--- 169 unchanged lines hidden (view full) ---

788
789 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
790 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
791 mpext->dsn64);
792
793out:
794 if (!retransmission)
795 pfrag->offset += frag_truesize;
676 *write_seq += ret;
796 WRITE_ONCE(*write_seq, *write_seq + ret);
677 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
678
679 return ret;
680}
681
682static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock)
683{
684 clear_bit(MPTCP_SEND_SPACE, &msk->flags);

--- 50 unchanged lines hidden (view full) ---

735 mptcp_nospace(msk, sock);
736}
737
738static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
739{
740 int mss_now = 0, size_goal = 0, ret = 0;
741 struct mptcp_sock *msk = mptcp_sk(sk);
742 struct page_frag *pfrag;
797 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
798
799 return ret;
800}
801
802static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock)
803{
804 clear_bit(MPTCP_SEND_SPACE, &msk->flags);

--- 50 unchanged lines hidden (view full) ---

855 mptcp_nospace(msk, sock);
856}
857
858static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
859{
860 int mss_now = 0, size_goal = 0, ret = 0;
861 struct mptcp_sock *msk = mptcp_sk(sk);
862 struct page_frag *pfrag;
743 struct socket *ssock;
744 size_t copied = 0;
745 struct sock *ssk;
746 bool tx_ok;
747 long timeo;
748
749 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
750 return -EOPNOTSUPP;
751
752 lock_sock(sk);
753
754 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
755
756 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
757 ret = sk_stream_wait_connect(sk, &timeo);
758 if (ret)
759 goto out;
760 }
761
863 size_t copied = 0;
864 struct sock *ssk;
865 bool tx_ok;
866 long timeo;
867
868 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
869 return -EOPNOTSUPP;
870
871 lock_sock(sk);
872
873 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
874
875 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
876 ret = sk_stream_wait_connect(sk, &timeo);
877 if (ret)
878 goto out;
879 }
880
762fallback:
763 ssock = __mptcp_tcp_fallback(msk);
764 if (unlikely(ssock)) {
765 release_sock(sk);
766 pr_debug("fallback passthrough");
767 ret = sock_sendmsg(ssock, msg);
768 return ret >= 0 ? ret + copied : (copied ? copied : ret);
769 }
770
771 pfrag = sk_page_frag(sk);
772restart:
773 mptcp_clean_una(sk);
774
881 pfrag = sk_page_frag(sk);
882restart:
883 mptcp_clean_una(sk);
884
885 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
886 ret = -EPIPE;
887 goto out;
888 }
889
775wait_for_sndbuf:
776 __mptcp_flush_join_list(msk);
777 ssk = mptcp_subflow_get_send(msk);
778 while (!sk_stream_memory_free(sk) ||
779 !ssk ||
780 !mptcp_page_frag_refill(ssk, pfrag)) {
781 if (ssk) {
782 /* make sure retransmit timer is

--- 31 unchanged lines hidden (view full) ---

814 if (ret < 0) {
815 if (ret == -EAGAIN && timeo > 0) {
816 mptcp_set_timeout(sk, ssk);
817 release_sock(ssk);
818 goto restart;
819 }
820 break;
821 }
890wait_for_sndbuf:
891 __mptcp_flush_join_list(msk);
892 ssk = mptcp_subflow_get_send(msk);
893 while (!sk_stream_memory_free(sk) ||
894 !ssk ||
895 !mptcp_page_frag_refill(ssk, pfrag)) {
896 if (ssk) {
897 /* make sure retransmit timer is

--- 31 unchanged lines hidden (view full) ---

929 if (ret < 0) {
930 if (ret == -EAGAIN && timeo > 0) {
931 mptcp_set_timeout(sk, ssk);
932 release_sock(ssk);
933 goto restart;
934 }
935 break;
936 }
822 if (ret == 0 && unlikely(__mptcp_needs_tcp_fallback(msk))) {
823 /* Can happen for passive sockets:
824 * 3WHS negotiated MPTCP, but first packet after is
825 * plain TCP (e.g. due to middlebox filtering unknown
826 * options).
827 *
828 * Fall back to TCP.
829 */
830 release_sock(ssk);
831 goto fallback;
832 }
833
834 copied += ret;
835
836 tx_ok = msg_data_left(msg);
837 if (!tx_ok)
838 break;
839
840 if (!sk_stream_memory_free(ssk) ||

--- 103 unchanged lines hidden (view full) ---

944
945 if (copied >= len)
946 break;
947 }
948
949 return copied;
950}
951
937
938 copied += ret;
939
940 tx_ok = msg_data_left(msg);
941 if (!tx_ok)
942 break;
943
944 if (!sk_stream_memory_free(ssk) ||

--- 103 unchanged lines hidden (view full) ---

1048
1049 if (copied >= len)
1050 break;
1051 }
1052
1053 return copied;
1054}
1055
1056/* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
1057 *
1058 * Only difference: Use highest rtt estimate of the subflows in use.
1059 */
1060static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1061{
1062 struct mptcp_subflow_context *subflow;
1063 struct sock *sk = (struct sock *)msk;
1064 u32 time, advmss = 1;
1065 u64 rtt_us, mstamp;
1066
1067 sock_owned_by_me(sk);
1068
1069 if (copied <= 0)
1070 return;
1071
1072 msk->rcvq_space.copied += copied;
1073
1074 mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
1075 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1076
1077 rtt_us = msk->rcvq_space.rtt_us;
1078 if (rtt_us && time < (rtt_us >> 3))
1079 return;
1080
1081 rtt_us = 0;
1082 mptcp_for_each_subflow(msk, subflow) {
1083 const struct tcp_sock *tp;
1084 u64 sf_rtt_us;
1085 u32 sf_advmss;
1086
1087 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
1088
1089 sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
1090 sf_advmss = READ_ONCE(tp->advmss);
1091
1092 rtt_us = max(sf_rtt_us, rtt_us);
1093 advmss = max(sf_advmss, advmss);
1094 }
1095
1096 msk->rcvq_space.rtt_us = rtt_us;
1097 if (time < (rtt_us >> 3) || rtt_us == 0)
1098 return;
1099
1100 if (msk->rcvq_space.copied <= msk->rcvq_space.space)
1101 goto new_measure;
1102
1103 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
1104 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
1105 int rcvmem, rcvbuf;
1106 u64 rcvwin, grow;
1107
1108 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
1109
1110 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
1111
1112 do_div(grow, msk->rcvq_space.space);
1113 rcvwin += (grow << 1);
1114
1115 rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
1116 while (tcp_win_from_space(sk, rcvmem) < advmss)
1117 rcvmem += 128;
1118
1119 do_div(rcvwin, advmss);
1120 rcvbuf = min_t(u64, rcvwin * rcvmem,
1121 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
1122
1123 if (rcvbuf > sk->sk_rcvbuf) {
1124 u32 window_clamp;
1125
1126 window_clamp = tcp_win_from_space(sk, rcvbuf);
1127 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
1128
1129 /* Make subflows follow along. If we do not do this, we
1130 * get drops at subflow level if skbs can't be moved to
1131 * the mptcp rx queue fast enough (announced rcv_win can
1132 * exceed ssk->sk_rcvbuf).
1133 */
1134 mptcp_for_each_subflow(msk, subflow) {
1135 struct sock *ssk;
1136
1137 ssk = mptcp_subflow_tcp_sock(subflow);
1138 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
1139 tcp_sk(ssk)->window_clamp = window_clamp;
1140 }
1141 }
1142 }
1143
1144 msk->rcvq_space.space = msk->rcvq_space.copied;
1145new_measure:
1146 msk->rcvq_space.copied = 0;
1147 msk->rcvq_space.time = mstamp;
1148}
1149
952static bool __mptcp_move_skbs(struct mptcp_sock *msk)
953{
954 unsigned int moved = 0;
955 bool done;
956
957 do {
958 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
959

--- 7 unchanged lines hidden (view full) ---

967
968 return moved > 0;
969}
970
971static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
972 int nonblock, int flags, int *addr_len)
973{
974 struct mptcp_sock *msk = mptcp_sk(sk);
1150static bool __mptcp_move_skbs(struct mptcp_sock *msk)
1151{
1152 unsigned int moved = 0;
1153 bool done;
1154
1155 do {
1156 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1157

--- 7 unchanged lines hidden (view full) ---

1165
1166 return moved > 0;
1167}
1168
1169static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1170 int nonblock, int flags, int *addr_len)
1171{
1172 struct mptcp_sock *msk = mptcp_sk(sk);
975 struct socket *ssock;
976 int copied = 0;
977 int target;
978 long timeo;
979
980 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
981 return -EOPNOTSUPP;
982
983 lock_sock(sk);
1173 int copied = 0;
1174 int target;
1175 long timeo;
1176
1177 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
1178 return -EOPNOTSUPP;
1179
1180 lock_sock(sk);
984 ssock = __mptcp_tcp_fallback(msk);
985 if (unlikely(ssock)) {
986fallback:
987 release_sock(sk);
988 pr_debug("fallback-read subflow=%p",
989 mptcp_subflow_ctx(ssock->sk));
990 copied = sock_recvmsg(ssock, msg, flags);
991 return copied;
992 }
993
994 timeo = sock_rcvtimeo(sk, nonblock);
995
996 len = min_t(size_t, len, INT_MAX);
997 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
998 __mptcp_flush_join_list(msk);
999
1000 while (len > (size_t)copied) {
1001 int bytes_read;

--- 49 unchanged lines hidden (view full) ---

1051 if (signal_pending(current)) {
1052 copied = sock_intr_errno(timeo);
1053 break;
1054 }
1055 }
1056
1057 pr_debug("block timeout %ld", timeo);
1058 mptcp_wait_data(sk, &timeo);
1181 timeo = sock_rcvtimeo(sk, nonblock);
1182
1183 len = min_t(size_t, len, INT_MAX);
1184 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1185 __mptcp_flush_join_list(msk);
1186
1187 while (len > (size_t)copied) {
1188 int bytes_read;

--- 49 unchanged lines hidden (view full) ---

1238 if (signal_pending(current)) {
1239 copied = sock_intr_errno(timeo);
1240 break;
1241 }
1242 }
1243
1244 pr_debug("block timeout %ld", timeo);
1245 mptcp_wait_data(sk, &timeo);
1059 ssock = __mptcp_tcp_fallback(msk);
1060 if (unlikely(ssock))
1061 goto fallback;
1062 }
1063
1064 if (skb_queue_empty(&sk->sk_receive_queue)) {
1065 /* entire backlog drained, clear DATA_READY. */
1066 clear_bit(MPTCP_DATA_READY, &msk->flags);
1067
1068 /* .. race-breaker: ssk might have gotten new data
1069 * after last __mptcp_move_skbs() returned false.
1070 */
1071 if (unlikely(__mptcp_move_skbs(msk)))
1072 set_bit(MPTCP_DATA_READY, &msk->flags);
1073 } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
1074 /* data to read but mptcp_wait_data() cleared DATA_READY */
1075 set_bit(MPTCP_DATA_READY, &msk->flags);
1076 }
1077out_err:
1246 }
1247
1248 if (skb_queue_empty(&sk->sk_receive_queue)) {
1249 /* entire backlog drained, clear DATA_READY. */
1250 clear_bit(MPTCP_DATA_READY, &msk->flags);
1251
1252 /* .. race-breaker: ssk might have gotten new data
1253 * after last __mptcp_move_skbs() returned false.
1254 */
1255 if (unlikely(__mptcp_move_skbs(msk)))
1256 set_bit(MPTCP_DATA_READY, &msk->flags);
1257 } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
1258 /* data to read but mptcp_wait_data() cleared DATA_READY */
1259 set_bit(MPTCP_DATA_READY, &msk->flags);
1260 }
1261out_err:
1262 mptcp_rcv_space_adjust(msk, copied);
1263
1078 release_sock(sk);
1079 return copied;
1080}
1081
1082static void mptcp_retransmit_handler(struct sock *sk)
1083{
1084 struct mptcp_sock *msk = mptcp_sk(sk);
1085
1264 release_sock(sk);
1265 return copied;
1266}
1267
1268static void mptcp_retransmit_handler(struct sock *sk)
1269{
1270 struct mptcp_sock *msk = mptcp_sk(sk);
1271
1086 if (atomic64_read(&msk->snd_una) == msk->write_seq) {
1272 if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) {
1087 mptcp_stop_timer(sk);
1088 } else {
1089 set_bit(MPTCP_WORK_RTX, &msk->flags);
1090 if (schedule_work(&msk->work))
1091 sock_hold(sk);
1092 }
1093}
1094

--- 72 unchanged lines hidden (view full) ---

1167 }
1168}
1169
1170static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
1171{
1172 return 0;
1173}
1174
1273 mptcp_stop_timer(sk);
1274 } else {
1275 set_bit(MPTCP_WORK_RTX, &msk->flags);
1276 if (schedule_work(&msk->work))
1277 sock_hold(sk);
1278 }
1279}
1280

--- 72 unchanged lines hidden (view full) ---

1353 }
1354}
1355
1356static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
1357{
1358 return 0;
1359}
1360
1361static void pm_work(struct mptcp_sock *msk)
1362{
1363 struct mptcp_pm_data *pm = &msk->pm;
1364
1365 spin_lock_bh(&msk->pm.lock);
1366
1367 pr_debug("msk=%p status=%x", msk, pm->status);
1368 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
1369 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
1370 mptcp_pm_nl_add_addr_received(msk);
1371 }
1372 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
1373 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
1374 mptcp_pm_nl_fully_established(msk);
1375 }
1376 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
1377 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
1378 mptcp_pm_nl_subflow_established(msk);
1379 }
1380
1381 spin_unlock_bh(&msk->pm.lock);
1382}
1383
1175static void mptcp_worker(struct work_struct *work)
1176{
1177 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
1178 struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1179 int orig_len, orig_offset, mss_now = 0, size_goal = 0;
1180 struct mptcp_data_frag *dfrag;
1181 u64 orig_write_seq;
1182 size_t copied = 0;
1183 struct msghdr msg;
1184 long timeo = 0;
1185
1186 lock_sock(sk);
1187 mptcp_clean_una(sk);
1384static void mptcp_worker(struct work_struct *work)
1385{
1386 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
1387 struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1388 int orig_len, orig_offset, mss_now = 0, size_goal = 0;
1389 struct mptcp_data_frag *dfrag;
1390 u64 orig_write_seq;
1391 size_t copied = 0;
1392 struct msghdr msg;
1393 long timeo = 0;
1394
1395 lock_sock(sk);
1396 mptcp_clean_una(sk);
1397 mptcp_check_data_fin_ack(sk);
1188 __mptcp_flush_join_list(msk);
1189 __mptcp_move_skbs(msk);
1190
1398 __mptcp_flush_join_list(msk);
1399 __mptcp_move_skbs(msk);
1400
1401 if (msk->pm.status)
1402 pm_work(msk);
1403
1191 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1192 mptcp_check_for_eof(msk);
1193
1404 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1405 mptcp_check_for_eof(msk);
1406
1407 mptcp_check_data_fin(sk);
1408
1194 if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
1195 goto unlock;
1196
1197 dfrag = mptcp_rtx_head(sk);
1198 if (!dfrag)
1199 goto unlock;
1200
1201 if (!mptcp_ext_cache_refill(msk))

--- 76 unchanged lines hidden (view full) ---

1278
1279 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
1280 return -ENOMEM;
1281
1282 ret = __mptcp_init_sock(sk);
1283 if (ret)
1284 return ret;
1285
1409 if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
1410 goto unlock;
1411
1412 dfrag = mptcp_rtx_head(sk);
1413 if (!dfrag)
1414 goto unlock;
1415
1416 if (!mptcp_ext_cache_refill(msk))

--- 76 unchanged lines hidden (view full) ---

1493
1494 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
1495 return -ENOMEM;
1496
1497 ret = __mptcp_init_sock(sk);
1498 if (ret)
1499 return ret;
1500
1501 ret = __mptcp_socket_create(mptcp_sk(sk));
1502 if (ret)
1503 return ret;
1504
1286 sk_sockets_allocated_inc(sk);
1505 sk_sockets_allocated_inc(sk);
1506 sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
1287 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
1288
1289 return 0;
1290}
1291
1292static void __mptcp_clear_xmit(struct sock *sk)
1293{
1294 struct mptcp_sock *msk = mptcp_sk(sk);

--- 8 unchanged lines hidden (view full) ---

1303static void mptcp_cancel_work(struct sock *sk)
1304{
1305 struct mptcp_sock *msk = mptcp_sk(sk);
1306
1307 if (cancel_work_sync(&msk->work))
1308 sock_put(sk);
1309}
1310
1507 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
1508
1509 return 0;
1510}
1511
1512static void __mptcp_clear_xmit(struct sock *sk)
1513{
1514 struct mptcp_sock *msk = mptcp_sk(sk);

--- 8 unchanged lines hidden (view full) ---

1523static void mptcp_cancel_work(struct sock *sk)
1524{
1525 struct mptcp_sock *msk = mptcp_sk(sk);
1526
1527 if (cancel_work_sync(&msk->work))
1528 sock_put(sk);
1529}
1530
1311static void mptcp_subflow_shutdown(struct sock *ssk, int how,
1312 bool data_fin_tx_enable, u64 data_fin_tx_seq)
1531static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
1313{
1314 lock_sock(ssk);
1315
1316 switch (ssk->sk_state) {
1317 case TCP_LISTEN:
1318 if (!(how & RCV_SHUTDOWN))
1319 break;
1320 /* fall through */
1321 case TCP_SYN_SENT:
1322 tcp_disconnect(ssk, O_NONBLOCK);
1323 break;
1324 default:
1532{
1533 lock_sock(ssk);
1534
1535 switch (ssk->sk_state) {
1536 case TCP_LISTEN:
1537 if (!(how & RCV_SHUTDOWN))
1538 break;
1539 /* fall through */
1540 case TCP_SYN_SENT:
1541 tcp_disconnect(ssk, O_NONBLOCK);
1542 break;
1543 default:
1325 if (data_fin_tx_enable) {
1326 struct mptcp_subflow_context *subflow;
1327
1328 subflow = mptcp_subflow_ctx(ssk);
1329 subflow->data_fin_tx_seq = data_fin_tx_seq;
1330 subflow->data_fin_tx_enable = 1;
1544 if (__mptcp_check_fallback(mptcp_sk(sk))) {
1545 pr_debug("Fallback");
1546 ssk->sk_shutdown |= how;
1547 tcp_shutdown(ssk, how);
1548 } else {
1549 pr_debug("Sending DATA_FIN on subflow %p", ssk);
1550 mptcp_set_timeout(sk, ssk);
1551 tcp_send_ack(ssk);
1331 }
1552 }
1332
1333 ssk->sk_shutdown |= how;
1334 tcp_shutdown(ssk, how);
1335 break;
1336 }
1337
1553 break;
1554 }
1555
1338 /* Wake up anyone sleeping in poll. */
1339 ssk->sk_state_change(ssk);
1340 release_sock(ssk);
1341}
1342
1556 release_sock(ssk);
1557}
1558
1343/* Called with msk lock held, releases such lock before returning */
1559static const unsigned char new_state[16] = {
1560 /* current state: new state: action: */
1561 [0 /* (Invalid) */] = TCP_CLOSE,
1562 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1563 [TCP_SYN_SENT] = TCP_CLOSE,
1564 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1565 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
1566 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
1567 [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */
1568 [TCP_CLOSE] = TCP_CLOSE,
1569 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
1570 [TCP_LAST_ACK] = TCP_LAST_ACK,
1571 [TCP_LISTEN] = TCP_CLOSE,
1572 [TCP_CLOSING] = TCP_CLOSING,
1573 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
1574};
1575
1576static int mptcp_close_state(struct sock *sk)
1577{
1578 int next = (int)new_state[sk->sk_state];
1579 int ns = next & TCP_STATE_MASK;
1580
1581 inet_sk_state_store(sk, ns);
1582
1583 return next & TCP_ACTION_FIN;
1584}
1585
1344static void mptcp_close(struct sock *sk, long timeout)
1345{
1346 struct mptcp_subflow_context *subflow, *tmp;
1347 struct mptcp_sock *msk = mptcp_sk(sk);
1348 LIST_HEAD(conn_list);
1586static void mptcp_close(struct sock *sk, long timeout)
1587{
1588 struct mptcp_subflow_context *subflow, *tmp;
1589 struct mptcp_sock *msk = mptcp_sk(sk);
1590 LIST_HEAD(conn_list);
1349 u64 data_fin_tx_seq;
1350
1351 lock_sock(sk);
1591
1592 lock_sock(sk);
1593 sk->sk_shutdown = SHUTDOWN_MASK;
1352
1594
1595 if (sk->sk_state == TCP_LISTEN) {
1596 inet_sk_state_store(sk, TCP_CLOSE);
1597 goto cleanup;
1598 } else if (sk->sk_state == TCP_CLOSE) {
1599 goto cleanup;
1600 }
1601
1602 if (__mptcp_check_fallback(msk)) {
1603 goto update_state;
1604 } else if (mptcp_close_state(sk)) {
1605 pr_debug("Sending DATA_FIN sk=%p", sk);
1606 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
1607 WRITE_ONCE(msk->snd_data_fin_enable, 1);
1608
1609 mptcp_for_each_subflow(msk, subflow) {
1610 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
1611
1612 mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK);
1613 }
1614 }
1615
1616 sk_stream_wait_close(sk, timeout);
1617
1618update_state:
1353 inet_sk_state_store(sk, TCP_CLOSE);
1354
1619 inet_sk_state_store(sk, TCP_CLOSE);
1620
1621cleanup:
1355 /* be sure to always acquire the join list lock, to sync vs
1356 * mptcp_finish_join().
1357 */
1358 spin_lock_bh(&msk->join_list_lock);
1359 list_splice_tail_init(&msk->join_list, &msk->conn_list);
1360 spin_unlock_bh(&msk->join_list_lock);
1361 list_splice_init(&msk->conn_list, &conn_list);
1362
1622 /* be sure to always acquire the join list lock, to sync vs
1623 * mptcp_finish_join().
1624 */
1625 spin_lock_bh(&msk->join_list_lock);
1626 list_splice_tail_init(&msk->join_list, &msk->conn_list);
1627 spin_unlock_bh(&msk->join_list_lock);
1628 list_splice_init(&msk->conn_list, &conn_list);
1629
1363 data_fin_tx_seq = msk->write_seq;
1364
1365 __mptcp_clear_xmit(sk);
1366
1367 release_sock(sk);
1368
1369 list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
1370 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1630 __mptcp_clear_xmit(sk);
1631
1632 release_sock(sk);
1633
1634 list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
1635 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1371
1372 subflow->data_fin_tx_seq = data_fin_tx_seq;
1373 subflow->data_fin_tx_enable = 1;
1374 __mptcp_close_ssk(sk, ssk, subflow, timeout);
1375 }
1376
1377 mptcp_cancel_work(sk);
1636 __mptcp_close_ssk(sk, ssk, subflow, timeout);
1637 }
1638
1639 mptcp_cancel_work(sk);
1378 mptcp_pm_close(msk);
1379
1380 __skb_queue_purge(&sk->sk_receive_queue);
1381
1382 sk_common_release(sk);
1383}
1384
1385static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
1386{

--- 55 unchanged lines hidden (view full) ---

1442#endif
1443
1444 __mptcp_init_sock(nsk);
1445
1446 msk = mptcp_sk(nsk);
1447 msk->local_key = subflow_req->local_key;
1448 msk->token = subflow_req->token;
1449 msk->subflow = NULL;
1640
1641 __skb_queue_purge(&sk->sk_receive_queue);
1642
1643 sk_common_release(sk);
1644}
1645
1646static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
1647{

--- 55 unchanged lines hidden (view full) ---

1703#endif
1704
1705 __mptcp_init_sock(nsk);
1706
1707 msk = mptcp_sk(nsk);
1708 msk->local_key = subflow_req->local_key;
1709 msk->token = subflow_req->token;
1710 msk->subflow = NULL;
1711 WRITE_ONCE(msk->fully_established, false);
1450
1712
1451 if (unlikely(mptcp_token_new_accept(subflow_req->token, nsk))) {
1452 nsk->sk_state = TCP_CLOSE;
1453 bh_unlock_sock(nsk);
1454
1455 /* we can't call into mptcp_close() here - possible BH context
1456 * free the sock directly.
1457 * sk_clone_lock() sets nsk refcnt to two, hence call sk_free()
1458 * too.
1459 */
1460 sk_common_release(nsk);
1461 sk_free(nsk);
1462 return NULL;
1463 }
1464
1465 msk->write_seq = subflow_req->idsn + 1;
1466 atomic64_set(&msk->snd_una, msk->write_seq);
1467 if (mp_opt->mp_capable) {
1468 msk->can_ack = true;
1469 msk->remote_key = mp_opt->sndr_key;
1470 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
1471 ack_seq++;
1472 msk->ack_seq = ack_seq;

--- 4 unchanged lines hidden (view full) ---

1477 inet_sk_state_store(nsk, TCP_SYN_RECV);
1478 bh_unlock_sock(nsk);
1479
1480 /* keep a single reference */
1481 __sock_put(nsk);
1482 return nsk;
1483}
1484
1713 msk->write_seq = subflow_req->idsn + 1;
1714 atomic64_set(&msk->snd_una, msk->write_seq);
1715 if (mp_opt->mp_capable) {
1716 msk->can_ack = true;
1717 msk->remote_key = mp_opt->sndr_key;
1718 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
1719 ack_seq++;
1720 msk->ack_seq = ack_seq;

--- 4 unchanged lines hidden (view full) ---

1725 inet_sk_state_store(nsk, TCP_SYN_RECV);
1726 bh_unlock_sock(nsk);
1727
1728 /* keep a single reference */
1729 __sock_put(nsk);
1730 return nsk;
1731}
1732
1733void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
1734{
1735 const struct tcp_sock *tp = tcp_sk(ssk);
1736
1737 msk->rcvq_space.copied = 0;
1738 msk->rcvq_space.rtt_us = 0;
1739
1740 msk->rcvq_space.time = tp->tcp_mstamp;
1741
1742 /* initial rcv_space offering made to peer */
1743 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
1744 TCP_INIT_CWND * tp->advmss);
1745 if (msk->rcvq_space.space == 0)
1746 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
1747}
1748
1485static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
1486 bool kern)
1487{
1488 struct mptcp_sock *msk = mptcp_sk(sk);
1489 struct socket *listener;
1490 struct sock *newsk;
1491
1492 listener = __mptcp_nmpc_socket(msk);
1493 if (WARN_ON_ONCE(!listener)) {
1494 *err = -EINVAL;
1495 return NULL;
1496 }
1497
1498 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
1499 newsk = inet_csk_accept(listener->sk, flags, err, kern);
1500 if (!newsk)
1501 return NULL;
1502
1503 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
1749static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
1750 bool kern)
1751{
1752 struct mptcp_sock *msk = mptcp_sk(sk);
1753 struct socket *listener;
1754 struct sock *newsk;
1755
1756 listener = __mptcp_nmpc_socket(msk);
1757 if (WARN_ON_ONCE(!listener)) {
1758 *err = -EINVAL;
1759 return NULL;
1760 }
1761
1762 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
1763 newsk = inet_csk_accept(listener->sk, flags, err, kern);
1764 if (!newsk)
1765 return NULL;
1766
1767 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
1504
1505 if (sk_is_mptcp(newsk)) {
1506 struct mptcp_subflow_context *subflow;
1507 struct sock *new_mptcp_sock;
1508 struct sock *ssk = newsk;
1509
1510 subflow = mptcp_subflow_ctx(newsk);
1511 new_mptcp_sock = subflow->conn;
1512

--- 11 unchanged lines hidden (view full) ---

1524 local_bh_disable();
1525 bh_lock_sock(new_mptcp_sock);
1526 msk = mptcp_sk(new_mptcp_sock);
1527 msk->first = newsk;
1528
1529 newsk = new_mptcp_sock;
1530 mptcp_copy_inaddrs(newsk, ssk);
1531 list_add(&subflow->node, &msk->conn_list);
1768 if (sk_is_mptcp(newsk)) {
1769 struct mptcp_subflow_context *subflow;
1770 struct sock *new_mptcp_sock;
1771 struct sock *ssk = newsk;
1772
1773 subflow = mptcp_subflow_ctx(newsk);
1774 new_mptcp_sock = subflow->conn;
1775

--- 11 unchanged lines hidden (view full) ---

1787 local_bh_disable();
1788 bh_lock_sock(new_mptcp_sock);
1789 msk = mptcp_sk(new_mptcp_sock);
1790 msk->first = newsk;
1791
1792 newsk = new_mptcp_sock;
1793 mptcp_copy_inaddrs(newsk, ssk);
1794 list_add(&subflow->node, &msk->conn_list);
1532 inet_sk_state_store(newsk, TCP_ESTABLISHED);
1533
1795
1796 mptcp_rcv_space_init(msk, ssk);
1534 bh_unlock_sock(new_mptcp_sock);
1535
1536 __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
1537 local_bh_enable();
1538 } else {
1539 MPTCP_INC_STATS(sock_net(sk),
1540 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
1541 }
1542
1543 return newsk;
1544}
1545
1546static void mptcp_destroy(struct sock *sk)
1547{
1548 struct mptcp_sock *msk = mptcp_sk(sk);
1549
1797 bh_unlock_sock(new_mptcp_sock);
1798
1799 __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
1800 local_bh_enable();
1801 } else {
1802 MPTCP_INC_STATS(sock_net(sk),
1803 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
1804 }
1805
1806 return newsk;
1807}
1808
1809static void mptcp_destroy(struct sock *sk)
1810{
1811 struct mptcp_sock *msk = mptcp_sk(sk);
1812
1550 mptcp_token_destroy(msk->token);
1813 mptcp_token_destroy(msk);
1551 if (msk->cached_ext)
1552 __skb_ext_put(msk->cached_ext);
1553
1554 sk_sockets_allocated_dec(sk);
1555}
1556
1814 if (msk->cached_ext)
1815 __skb_ext_put(msk->cached_ext);
1816
1817 sk_sockets_allocated_dec(sk);
1818}
1819
1820static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
1821 sockptr_t optval, unsigned int optlen)
1822{
1823 struct sock *sk = (struct sock *)msk;
1824 struct socket *ssock;
1825 int ret;
1826
1827 switch (optname) {
1828 case SO_REUSEPORT:
1829 case SO_REUSEADDR:
1830 lock_sock(sk);
1831 ssock = __mptcp_nmpc_socket(msk);
1832 if (!ssock) {
1833 release_sock(sk);
1834 return -EINVAL;
1835 }
1836
1837 ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
1838 if (ret == 0) {
1839 if (optname == SO_REUSEPORT)
1840 sk->sk_reuseport = ssock->sk->sk_reuseport;
1841 else if (optname == SO_REUSEADDR)
1842 sk->sk_reuse = ssock->sk->sk_reuse;
1843 }
1844 release_sock(sk);
1845 return ret;
1846 }
1847
1848 return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
1849}
1850
1851static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
1852 sockptr_t optval, unsigned int optlen)
1853{
1854 struct sock *sk = (struct sock *)msk;
1855 int ret = -EOPNOTSUPP;
1856 struct socket *ssock;
1857
1858 switch (optname) {
1859 case IPV6_V6ONLY:
1860 lock_sock(sk);
1861 ssock = __mptcp_nmpc_socket(msk);
1862 if (!ssock) {
1863 release_sock(sk);
1864 return -EINVAL;
1865 }
1866
1867 ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
1868 if (ret == 0)
1869 sk->sk_ipv6only = ssock->sk->sk_ipv6only;
1870
1871 release_sock(sk);
1872 break;
1873 }
1874
1875 return ret;
1876}
1877
1557static int mptcp_setsockopt(struct sock *sk, int level, int optname,
1878static int mptcp_setsockopt(struct sock *sk, int level, int optname,
1558 char __user *optval, unsigned int optlen)
1879 sockptr_t optval, unsigned int optlen)
1559{
1560 struct mptcp_sock *msk = mptcp_sk(sk);
1880{
1881 struct mptcp_sock *msk = mptcp_sk(sk);
1561 struct socket *ssock;
1882 struct sock *ssk;
1562
1563 pr_debug("msk=%p", msk);
1564
1883
1884 pr_debug("msk=%p", msk);
1885
1886 if (level == SOL_SOCKET)
1887 return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
1888
1565 /* @@ the meaning of setsockopt() when the socket is connected and
1566 * there are multiple subflows is not yet defined. It is up to the
1567 * MPTCP-level socket to configure the subflows until the subflow
1568 * is in TCP fallback, when TCP socket options are passed through
1569 * to the one remaining subflow.
1570 */
1571 lock_sock(sk);
1889 /* @@ the meaning of setsockopt() when the socket is connected and
1890 * there are multiple subflows is not yet defined. It is up to the
1891 * MPTCP-level socket to configure the subflows until the subflow
1892 * is in TCP fallback, when TCP socket options are passed through
1893 * to the one remaining subflow.
1894 */
1895 lock_sock(sk);
1572 ssock = __mptcp_tcp_fallback(msk);
1896 ssk = __mptcp_tcp_fallback(msk);
1573 release_sock(sk);
1897 release_sock(sk);
1574 if (ssock)
1575 return tcp_setsockopt(ssock->sk, level, optname, optval,
1576 optlen);
1898 if (ssk)
1899 return tcp_setsockopt(ssk, level, optname, optval, optlen);
1577
1900
1901 if (level == SOL_IPV6)
1902 return mptcp_setsockopt_v6(msk, optname, optval, optlen);
1903
1578 return -EOPNOTSUPP;
1579}
1580
1581static int mptcp_getsockopt(struct sock *sk, int level, int optname,
1582 char __user *optval, int __user *option)
1583{
1584 struct mptcp_sock *msk = mptcp_sk(sk);
1904 return -EOPNOTSUPP;
1905}
1906
1907static int mptcp_getsockopt(struct sock *sk, int level, int optname,
1908 char __user *optval, int __user *option)
1909{
1910 struct mptcp_sock *msk = mptcp_sk(sk);
1585 struct socket *ssock;
1911 struct sock *ssk;
1586
1587 pr_debug("msk=%p", msk);
1588
1589 /* @@ the meaning of setsockopt() when the socket is connected and
1590 * there are multiple subflows is not yet defined. It is up to the
1591 * MPTCP-level socket to configure the subflows until the subflow
1592 * is in TCP fallback, when socket options are passed through
1593 * to the one remaining subflow.
1594 */
1595 lock_sock(sk);
1912
1913 pr_debug("msk=%p", msk);
1914
1915 /* @@ the meaning of setsockopt() when the socket is connected and
1916 * there are multiple subflows is not yet defined. It is up to the
1917 * MPTCP-level socket to configure the subflows until the subflow
1918 * is in TCP fallback, when socket options are passed through
1919 * to the one remaining subflow.
1920 */
1921 lock_sock(sk);
1596 ssock = __mptcp_tcp_fallback(msk);
1922 ssk = __mptcp_tcp_fallback(msk);
1597 release_sock(sk);
1923 release_sock(sk);
1598 if (ssock)
1599 return tcp_getsockopt(ssock->sk, level, optname, optval,
1600 option);
1924 if (ssk)
1925 return tcp_getsockopt(ssk, level, optname, optval, option);
1601
1602 return -EOPNOTSUPP;
1603}
1604
1605#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
1606 TCPF_WRITE_TIMER_DEFERRED)
1607
1608/* this is very alike tcp_release_cb() but we must handle differently a

--- 22 unchanged lines hidden (view full) ---

1631 }
1632
1633 if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1634 mptcp_retransmit_handler(sk);
1635 __sock_put(sk);
1636 }
1637}
1638
1926
1927 return -EOPNOTSUPP;
1928}
1929
1930#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
1931 TCPF_WRITE_TIMER_DEFERRED)
1932
1933/* this is very alike tcp_release_cb() but we must handle differently a

--- 22 unchanged lines hidden (view full) ---

1956 }
1957
1958 if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1959 mptcp_retransmit_handler(sk);
1960 __sock_put(sk);
1961 }
1962}
1963
1964static int mptcp_hash(struct sock *sk)
1965{
1966 /* should never be called,
1967 * we hash the TCP subflows not the master socket
1968 */
1969 WARN_ON_ONCE(1);
1970 return 0;
1971}
1972
1973static void mptcp_unhash(struct sock *sk)
1974{
1975 /* called from sk_common_release(), but nothing to do here */
1976}
1977
1639static int mptcp_get_port(struct sock *sk, unsigned short snum)
1640{
1641 struct mptcp_sock *msk = mptcp_sk(sk);
1642 struct socket *ssock;
1643
1644 ssock = __mptcp_nmpc_socket(msk);
1645 pr_debug("msk=%p, subflow=%p", msk, ssock);
1646 if (WARN_ON_ONCE(!ssock))

--- 8 unchanged lines hidden (view full) ---

1655 struct mptcp_sock *msk;
1656 struct sock *sk;
1657 u64 ack_seq;
1658
1659 subflow = mptcp_subflow_ctx(ssk);
1660 sk = subflow->conn;
1661 msk = mptcp_sk(sk);
1662
1978static int mptcp_get_port(struct sock *sk, unsigned short snum)
1979{
1980 struct mptcp_sock *msk = mptcp_sk(sk);
1981 struct socket *ssock;
1982
1983 ssock = __mptcp_nmpc_socket(msk);
1984 pr_debug("msk=%p, subflow=%p", msk, ssock);
1985 if (WARN_ON_ONCE(!ssock))

--- 8 unchanged lines hidden (view full) ---

1994 struct mptcp_sock *msk;
1995 struct sock *sk;
1996 u64 ack_seq;
1997
1998 subflow = mptcp_subflow_ctx(ssk);
1999 sk = subflow->conn;
2000 msk = mptcp_sk(sk);
2001
1663 if (!subflow->mp_capable) {
1664 MPTCP_INC_STATS(sock_net(sk),
1665 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
1666 return;
1667 }
1668
1669 pr_debug("msk=%p, token=%u", sk, subflow->token);
1670
1671 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
1672 ack_seq++;
1673 subflow->map_seq = ack_seq;
1674 subflow->map_subflow_seq = 1;
2002 pr_debug("msk=%p, token=%u", sk, subflow->token);
2003
2004 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
2005 ack_seq++;
2006 subflow->map_seq = ack_seq;
2007 subflow->map_subflow_seq = 1;
1675 subflow->rel_write_seq = 1;
1676
1677 /* the socket is not connected yet, no msk/subflow ops can access/race
1678 * accessing the field below
1679 */
1680 WRITE_ONCE(msk->remote_key, subflow->remote_key);
1681 WRITE_ONCE(msk->local_key, subflow->local_key);
2008
2009 /* the socket is not connected yet, no msk/subflow ops can access/race
2010 * accessing the field below
2011 */
2012 WRITE_ONCE(msk->remote_key, subflow->remote_key);
2013 WRITE_ONCE(msk->local_key, subflow->local_key);
1682 WRITE_ONCE(msk->token, subflow->token);
1683 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
1684 WRITE_ONCE(msk->ack_seq, ack_seq);
1685 WRITE_ONCE(msk->can_ack, 1);
1686 atomic64_set(&msk->snd_una, msk->write_seq);
1687
1688 mptcp_pm_new_connection(msk, 0);
2014 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
2015 WRITE_ONCE(msk->ack_seq, ack_seq);
2016 WRITE_ONCE(msk->can_ack, 1);
2017 atomic64_set(&msk->snd_una, msk->write_seq);
2018
2019 mptcp_pm_new_connection(msk, 0);
2020
2021 mptcp_rcv_space_init(msk, ssk);
1689}
1690
1691static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
1692{
1693 write_lock_bh(&sk->sk_callback_lock);
1694 rcu_assign_pointer(sk->sk_wq, &parent->wq);
1695 sk_set_socket(sk, parent);
1696 sk->sk_uid = SOCK_INODE(parent)->i_uid;

--- 6 unchanged lines hidden (view full) ---

1703 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1704 struct sock *parent = (void *)msk;
1705 struct socket *parent_sock;
1706 bool ret;
1707
1708 pr_debug("msk=%p, subflow=%p", msk, subflow);
1709
1710 /* mptcp socket already closing? */
2022}
2023
2024static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
2025{
2026 write_lock_bh(&sk->sk_callback_lock);
2027 rcu_assign_pointer(sk->sk_wq, &parent->wq);
2028 sk_set_socket(sk, parent);
2029 sk->sk_uid = SOCK_INODE(parent)->i_uid;

--- 6 unchanged lines hidden (view full) ---

2036 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
2037 struct sock *parent = (void *)msk;
2038 struct socket *parent_sock;
2039 bool ret;
2040
2041 pr_debug("msk=%p, subflow=%p", msk, subflow);
2042
2043 /* mptcp socket already closing? */
1711 if (inet_sk_state_load(parent) != TCP_ESTABLISHED)
2044 if (!mptcp_is_fully_established(parent))
1712 return false;
1713
1714 if (!msk->pm.server_side)
1715 return true;
1716
1717 if (!mptcp_pm_allow_new_subflow(msk))
1718 return false;
1719

--- 36 unchanged lines hidden (view full) ---

1756 .accept = mptcp_accept,
1757 .setsockopt = mptcp_setsockopt,
1758 .getsockopt = mptcp_getsockopt,
1759 .shutdown = tcp_shutdown,
1760 .destroy = mptcp_destroy,
1761 .sendmsg = mptcp_sendmsg,
1762 .recvmsg = mptcp_recvmsg,
1763 .release_cb = mptcp_release_cb,
2045 return false;
2046
2047 if (!msk->pm.server_side)
2048 return true;
2049
2050 if (!mptcp_pm_allow_new_subflow(msk))
2051 return false;
2052

--- 36 unchanged lines hidden (view full) ---

2089 .accept = mptcp_accept,
2090 .setsockopt = mptcp_setsockopt,
2091 .getsockopt = mptcp_getsockopt,
2092 .shutdown = tcp_shutdown,
2093 .destroy = mptcp_destroy,
2094 .sendmsg = mptcp_sendmsg,
2095 .recvmsg = mptcp_recvmsg,
2096 .release_cb = mptcp_release_cb,
1764 .hash = inet_hash,
1765 .unhash = inet_unhash,
2097 .hash = mptcp_hash,
2098 .unhash = mptcp_unhash,
1766 .get_port = mptcp_get_port,
1767 .sockets_allocated = &mptcp_sockets_allocated,
1768 .memory_allocated = &tcp_memory_allocated,
1769 .memory_pressure = &tcp_memory_pressure,
1770 .stream_memory_free = mptcp_memory_free,
1771 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1772 .sysctl_mem = sysctl_tcp_mem,
1773 .obj_size = sizeof(struct mptcp_sock),
2099 .get_port = mptcp_get_port,
2100 .sockets_allocated = &mptcp_sockets_allocated,
2101 .memory_allocated = &tcp_memory_allocated,
2102 .memory_pressure = &tcp_memory_pressure,
2103 .stream_memory_free = mptcp_memory_free,
2104 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2105 .sysctl_mem = sysctl_tcp_mem,
2106 .obj_size = sizeof(struct mptcp_sock),
2107 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1774 .no_autobind = true,
1775};
1776
1777static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1778{
1779 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1780 struct socket *ssock;
1781 int err;
1782
1783 lock_sock(sock->sk);
2108 .no_autobind = true,
2109};
2110
2111static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2112{
2113 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2114 struct socket *ssock;
2115 int err;
2116
2117 lock_sock(sock->sk);
1784 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
1785 if (IS_ERR(ssock)) {
1786 err = PTR_ERR(ssock);
2118 ssock = __mptcp_nmpc_socket(msk);
2119 if (!ssock) {
2120 err = -EINVAL;
1787 goto unlock;
1788 }
1789
1790 err = ssock->ops->bind(ssock, uaddr, addr_len);
1791 if (!err)
1792 mptcp_copy_inaddrs(sock->sk, ssock->sk);
1793
1794unlock:
1795 release_sock(sock->sk);
1796 return err;
1797}
1798
2121 goto unlock;
2122 }
2123
2124 err = ssock->ops->bind(ssock, uaddr, addr_len);
2125 if (!err)
2126 mptcp_copy_inaddrs(sock->sk, ssock->sk);
2127
2128unlock:
2129 release_sock(sock->sk);
2130 return err;
2131}
2132
2133static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
2134 struct mptcp_subflow_context *subflow)
2135{
2136 subflow->request_mptcp = 0;
2137 __mptcp_do_fallback(msk);
2138}
2139
1799static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1800 int addr_len, int flags)
1801{
1802 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2140static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
2141 int addr_len, int flags)
2142{
2143 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2144 struct mptcp_subflow_context *subflow;
1803 struct socket *ssock;
1804 int err;
1805
1806 lock_sock(sock->sk);
1807 if (sock->state != SS_UNCONNECTED && msk->subflow) {
1808 /* pending connection or invalid state, let existing subflow
1809 * cope with that
1810 */
1811 ssock = msk->subflow;
1812 goto do_connect;
1813 }
1814
2145 struct socket *ssock;
2146 int err;
2147
2148 lock_sock(sock->sk);
2149 if (sock->state != SS_UNCONNECTED && msk->subflow) {
2150 /* pending connection or invalid state, let existing subflow
2151 * cope with that
2152 */
2153 ssock = msk->subflow;
2154 goto do_connect;
2155 }
2156
1815 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
1816 if (IS_ERR(ssock)) {
1817 err = PTR_ERR(ssock);
2157 ssock = __mptcp_nmpc_socket(msk);
2158 if (!ssock) {
2159 err = -EINVAL;
1818 goto unlock;
1819 }
1820
2160 goto unlock;
2161 }
2162
2163 mptcp_token_destroy(msk);
2164 inet_sk_state_store(sock->sk, TCP_SYN_SENT);
2165 subflow = mptcp_subflow_ctx(ssock->sk);
1821#ifdef CONFIG_TCP_MD5SIG
1822 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
1823 * TCP option space.
1824 */
1825 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
2166#ifdef CONFIG_TCP_MD5SIG
2167 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
2168 * TCP option space.
2169 */
2170 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
1826 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
2171 mptcp_subflow_early_fallback(msk, subflow);
1827#endif
2172#endif
2173 if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
2174 mptcp_subflow_early_fallback(msk, subflow);
1828
1829do_connect:
1830 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
1831 sock->state = ssock->state;
1832
1833 /* on successful connect, the msk state will be moved to established by
1834 * subflow_finish_connect()
1835 */
1836 if (!err || err == -EINPROGRESS)
1837 mptcp_copy_inaddrs(sock->sk, ssock->sk);
1838 else
1839 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
1840
1841unlock:
1842 release_sock(sock->sk);
1843 return err;
1844}
1845
2175
2176do_connect:
2177 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
2178 sock->state = ssock->state;
2179
2180 /* on successful connect, the msk state will be moved to established by
2181 * subflow_finish_connect()
2182 */
2183 if (!err || err == -EINPROGRESS)
2184 mptcp_copy_inaddrs(sock->sk, ssock->sk);
2185 else
2186 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
2187
2188unlock:
2189 release_sock(sock->sk);
2190 return err;
2191}
2192
1846static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr,
1847 int peer)
1848{
1849 if (sock->sk->sk_prot == &tcp_prot) {
1850 /* we are being invoked from __sys_accept4, after
1851 * mptcp_accept() has just accepted a non-mp-capable
1852 * flow: sk is a tcp_sk, not an mptcp one.
1853 *
1854 * Hand the socket over to tcp so all further socket ops
1855 * bypass mptcp.
1856 */
1857 sock->ops = &inet_stream_ops;
1858 }
1859
1860 return inet_getname(sock, uaddr, peer);
1861}
1862
1863#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1864static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr,
1865 int peer)
1866{
1867 if (sock->sk->sk_prot == &tcpv6_prot) {
1868 /* we are being invoked from __sys_accept4 after
1869 * mptcp_accept() has accepted a non-mp-capable
1870 * subflow: sk is a tcp_sk, not mptcp.
1871 *
1872 * Hand the socket over to tcp so all further
1873 * socket ops bypass mptcp.
1874 */
1875 sock->ops = &inet6_stream_ops;
1876 }
1877
1878 return inet6_getname(sock, uaddr, peer);
1879}
1880#endif
1881
1882static int mptcp_listen(struct socket *sock, int backlog)
1883{
1884 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1885 struct socket *ssock;
1886 int err;
1887
1888 pr_debug("msk=%p", msk);
1889
1890 lock_sock(sock->sk);
2193static int mptcp_listen(struct socket *sock, int backlog)
2194{
2195 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2196 struct socket *ssock;
2197 int err;
2198
2199 pr_debug("msk=%p", msk);
2200
2201 lock_sock(sock->sk);
1891 ssock = __mptcp_socket_create(msk, TCP_LISTEN);
1892 if (IS_ERR(ssock)) {
1893 err = PTR_ERR(ssock);
2202 ssock = __mptcp_nmpc_socket(msk);
2203 if (!ssock) {
2204 err = -EINVAL;
1894 goto unlock;
1895 }
1896
2205 goto unlock;
2206 }
2207
2208 mptcp_token_destroy(msk);
2209 inet_sk_state_store(sock->sk, TCP_LISTEN);
1897 sock_set_flag(sock->sk, SOCK_RCU_FREE);
1898
1899 err = ssock->ops->listen(ssock, backlog);
1900 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
1901 if (!err)
1902 mptcp_copy_inaddrs(sock->sk, ssock->sk);
1903
1904unlock:
1905 release_sock(sock->sk);
1906 return err;
1907}
1908
2210 sock_set_flag(sock->sk, SOCK_RCU_FREE);
2211
2212 err = ssock->ops->listen(ssock, backlog);
2213 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
2214 if (!err)
2215 mptcp_copy_inaddrs(sock->sk, ssock->sk);
2216
2217unlock:
2218 release_sock(sock->sk);
2219 return err;
2220}
2221
1909static bool is_tcp_proto(const struct proto *p)
1910{
1911#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1912 return p == &tcp_prot || p == &tcpv6_prot;
1913#else
1914 return p == &tcp_prot;
1915#endif
1916}
1917
1918static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
1919 int flags, bool kern)
1920{
1921 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1922 struct socket *ssock;
1923 int err;
1924
1925 pr_debug("msk=%p", msk);
1926
1927 lock_sock(sock->sk);
1928 if (sock->sk->sk_state != TCP_LISTEN)
1929 goto unlock_fail;
1930
1931 ssock = __mptcp_nmpc_socket(msk);
1932 if (!ssock)
1933 goto unlock_fail;
1934
2222static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
2223 int flags, bool kern)
2224{
2225 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2226 struct socket *ssock;
2227 int err;
2228
2229 pr_debug("msk=%p", msk);
2230
2231 lock_sock(sock->sk);
2232 if (sock->sk->sk_state != TCP_LISTEN)
2233 goto unlock_fail;
2234
2235 ssock = __mptcp_nmpc_socket(msk);
2236 if (!ssock)
2237 goto unlock_fail;
2238
2239 clear_bit(MPTCP_DATA_READY, &msk->flags);
1935 sock_hold(ssock->sk);
1936 release_sock(sock->sk);
1937
1938 err = ssock->ops->accept(sock, newsock, flags, kern);
2240 sock_hold(ssock->sk);
2241 release_sock(sock->sk);
2242
2243 err = ssock->ops->accept(sock, newsock, flags, kern);
1939 if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) {
2244 if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
1940 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
1941 struct mptcp_subflow_context *subflow;
1942
1943 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
1944 * This is needed so NOSPACE flag can be set from tcp stack.
1945 */
1946 __mptcp_flush_join_list(msk);
2245 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
2246 struct mptcp_subflow_context *subflow;
2247
2248 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
2249 * This is needed so NOSPACE flag can be set from tcp stack.
2250 */
2251 __mptcp_flush_join_list(msk);
1947 list_for_each_entry(subflow, &msk->conn_list, node) {
2252 mptcp_for_each_subflow(msk, subflow) {
1948 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1949
1950 if (!ssk->sk_socket)
1951 mptcp_sock_graft(ssk, newsock);
1952 }
1953 }
1954
2253 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2254
2255 if (!ssk->sk_socket)
2256 mptcp_sock_graft(ssk, newsock);
2257 }
2258 }
2259
2260 if (inet_csk_listen_poll(ssock->sk))
2261 set_bit(MPTCP_DATA_READY, &msk->flags);
1955 sock_put(ssock->sk);
1956 return err;
1957
1958unlock_fail:
1959 release_sock(sock->sk);
1960 return -EINVAL;
1961}
1962
2262 sock_put(ssock->sk);
2263 return err;
2264
2265unlock_fail:
2266 release_sock(sock->sk);
2267 return -EINVAL;
2268}
2269
2270static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
2271{
2272 return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
2273 0;
2274}
2275
1963static __poll_t mptcp_poll(struct file *file, struct socket *sock,
1964 struct poll_table_struct *wait)
1965{
1966 struct sock *sk = sock->sk;
1967 struct mptcp_sock *msk;
2276static __poll_t mptcp_poll(struct file *file, struct socket *sock,
2277 struct poll_table_struct *wait)
2278{
2279 struct sock *sk = sock->sk;
2280 struct mptcp_sock *msk;
1968 struct socket *ssock;
1969 __poll_t mask = 0;
2281 __poll_t mask = 0;
2282 int state;
1970
1971 msk = mptcp_sk(sk);
2283
2284 msk = mptcp_sk(sk);
1972 lock_sock(sk);
1973 ssock = __mptcp_tcp_fallback(msk);
1974 if (!ssock)
1975 ssock = __mptcp_nmpc_socket(msk);
1976 if (ssock) {
1977 mask = ssock->ops->poll(file, ssock, wait);
1978 release_sock(sk);
1979 return mask;
1980 }
1981
1982 release_sock(sk);
1983 sock_poll_wait(file, sock, wait);
2285 sock_poll_wait(file, sock, wait);
1984 lock_sock(sk);
1985
2286
1986 if (test_bit(MPTCP_DATA_READY, &msk->flags))
1987 mask = EPOLLIN | EPOLLRDNORM;
1988 if (sk_stream_is_writeable(sk) &&
1989 test_bit(MPTCP_SEND_SPACE, &msk->flags))
1990 mask |= EPOLLOUT | EPOLLWRNORM;
2287 state = inet_sk_state_load(sk);
2288 if (state == TCP_LISTEN)
2289 return mptcp_check_readable(msk);
2290
2291 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
2292 mask |= mptcp_check_readable(msk);
2293 if (sk_stream_is_writeable(sk) &&
2294 test_bit(MPTCP_SEND_SPACE, &msk->flags))
2295 mask |= EPOLLOUT | EPOLLWRNORM;
2296 }
1991 if (sk->sk_shutdown & RCV_SHUTDOWN)
1992 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1993
2297 if (sk->sk_shutdown & RCV_SHUTDOWN)
2298 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2299
1994 release_sock(sk);
1995
1996 return mask;
1997}
1998
1999static int mptcp_shutdown(struct socket *sock, int how)
2000{
2001 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2002 struct mptcp_subflow_context *subflow;
2300 return mask;
2301}
2302
2303static int mptcp_shutdown(struct socket *sock, int how)
2304{
2305 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2306 struct mptcp_subflow_context *subflow;
2003 struct socket *ssock;
2004 int ret = 0;
2005
2006 pr_debug("sk=%p, how=%d", msk, how);
2007
2008 lock_sock(sock->sk);
2307 int ret = 0;
2308
2309 pr_debug("sk=%p, how=%d", msk, how);
2310
2311 lock_sock(sock->sk);
2009 ssock = __mptcp_tcp_fallback(msk);
2010 if (ssock) {
2011 release_sock(sock->sk);
2012 return inet_shutdown(ssock, how);
2013 }
2014
2312
2015 if (how == SHUT_WR || how == SHUT_RDWR)
2016 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
2017
2018 how++;
2313 how++;
2019
2020 if ((how & ~SHUTDOWN_MASK) || !how) {
2021 ret = -EINVAL;
2022 goto out_unlock;
2023 }
2024
2025 if (sock->state == SS_CONNECTING) {
2026 if ((1 << sock->sk->sk_state) &
2027 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
2028 sock->state = SS_DISCONNECTING;
2029 else
2030 sock->state = SS_CONNECTED;
2031 }
2032
2314 if ((how & ~SHUTDOWN_MASK) || !how) {
2315 ret = -EINVAL;
2316 goto out_unlock;
2317 }
2318
2319 if (sock->state == SS_CONNECTING) {
2320 if ((1 << sock->sk->sk_state) &
2321 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
2322 sock->state = SS_DISCONNECTING;
2323 else
2324 sock->state = SS_CONNECTED;
2325 }
2326
2033 __mptcp_flush_join_list(msk);
2034 mptcp_for_each_subflow(msk, subflow) {
2035 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2327 /* If we've already sent a FIN, or it's a closed state, skip this. */
2328 if (__mptcp_check_fallback(msk)) {
2329 if (how == SHUT_WR || how == SHUT_RDWR)
2330 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
2036
2331
2037 mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq);
2332 mptcp_for_each_subflow(msk, subflow) {
2333 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2334
2335 mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
2336 }
2337 } else if ((how & SEND_SHUTDOWN) &&
2338 ((1 << sock->sk->sk_state) &
2339 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2340 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) &&
2341 mptcp_close_state(sock->sk)) {
2342 __mptcp_flush_join_list(msk);
2343
2344 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2345 WRITE_ONCE(msk->snd_data_fin_enable, 1);
2346
2347 mptcp_for_each_subflow(msk, subflow) {
2348 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2349
2350 mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
2351 }
2038 }
2039
2352 }
2353
2354 /* Wake up anyone sleeping in poll. */
2355 sock->sk->sk_state_change(sock->sk);
2356
2040out_unlock:
2041 release_sock(sock->sk);
2042
2043 return ret;
2044}
2045
2046static const struct proto_ops mptcp_stream_ops = {
2047 .family = PF_INET,
2048 .owner = THIS_MODULE,
2049 .release = inet_release,
2050 .bind = mptcp_bind,
2051 .connect = mptcp_stream_connect,
2052 .socketpair = sock_no_socketpair,
2053 .accept = mptcp_stream_accept,
2357out_unlock:
2358 release_sock(sock->sk);
2359
2360 return ret;
2361}
2362
2363static const struct proto_ops mptcp_stream_ops = {
2364 .family = PF_INET,
2365 .owner = THIS_MODULE,
2366 .release = inet_release,
2367 .bind = mptcp_bind,
2368 .connect = mptcp_stream_connect,
2369 .socketpair = sock_no_socketpair,
2370 .accept = mptcp_stream_accept,
2054 .getname = mptcp_v4_getname,
2371 .getname = inet_getname,
2055 .poll = mptcp_poll,
2056 .ioctl = inet_ioctl,
2057 .gettstamp = sock_gettstamp,
2058 .listen = mptcp_listen,
2059 .shutdown = mptcp_shutdown,
2060 .setsockopt = sock_common_setsockopt,
2061 .getsockopt = sock_common_getsockopt,
2062 .sendmsg = inet_sendmsg,
2063 .recvmsg = inet_recvmsg,
2064 .mmap = sock_no_mmap,
2065 .sendpage = inet_sendpage,
2372 .poll = mptcp_poll,
2373 .ioctl = inet_ioctl,
2374 .gettstamp = sock_gettstamp,
2375 .listen = mptcp_listen,
2376 .shutdown = mptcp_shutdown,
2377 .setsockopt = sock_common_setsockopt,
2378 .getsockopt = sock_common_getsockopt,
2379 .sendmsg = inet_sendmsg,
2380 .recvmsg = inet_recvmsg,
2381 .mmap = sock_no_mmap,
2382 .sendpage = inet_sendpage,
2066#ifdef CONFIG_COMPAT
2067 .compat_setsockopt = compat_sock_common_setsockopt,
2068 .compat_getsockopt = compat_sock_common_getsockopt,
2069#endif
2070};
2071
2072static struct inet_protosw mptcp_protosw = {
2073 .type = SOCK_STREAM,
2074 .protocol = IPPROTO_MPTCP,
2075 .prot = &mptcp_prot,
2076 .ops = &mptcp_stream_ops,
2077 .flags = INET_PROTOSW_ICSK,
2078};
2079
2383};
2384
2385static struct inet_protosw mptcp_protosw = {
2386 .type = SOCK_STREAM,
2387 .protocol = IPPROTO_MPTCP,
2388 .prot = &mptcp_prot,
2389 .ops = &mptcp_stream_ops,
2390 .flags = INET_PROTOSW_ICSK,
2391};
2392
2080void mptcp_proto_init(void)
2393void __init mptcp_proto_init(void)
2081{
2082 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
2083
2084 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
2085 panic("Failed to allocate MPTCP pcpu counter\n");
2086
2087 mptcp_subflow_init();
2088 mptcp_pm_init();
2394{
2395 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
2396
2397 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
2398 panic("Failed to allocate MPTCP pcpu counter\n");
2399
2400 mptcp_subflow_init();
2401 mptcp_pm_init();
2402 mptcp_token_init();
2089
2090 if (proto_register(&mptcp_prot, 1) != 0)
2091 panic("Failed to register MPTCP proto.\n");
2092
2093 inet_register_protosw(&mptcp_protosw);
2094
2095 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
2096}
2097
2098#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2099static const struct proto_ops mptcp_v6_stream_ops = {
2100 .family = PF_INET6,
2101 .owner = THIS_MODULE,
2102 .release = inet6_release,
2103 .bind = mptcp_bind,
2104 .connect = mptcp_stream_connect,
2105 .socketpair = sock_no_socketpair,
2106 .accept = mptcp_stream_accept,
2403
2404 if (proto_register(&mptcp_prot, 1) != 0)
2405 panic("Failed to register MPTCP proto.\n");
2406
2407 inet_register_protosw(&mptcp_protosw);
2408
2409 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
2410}
2411
2412#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2413static const struct proto_ops mptcp_v6_stream_ops = {
2414 .family = PF_INET6,
2415 .owner = THIS_MODULE,
2416 .release = inet6_release,
2417 .bind = mptcp_bind,
2418 .connect = mptcp_stream_connect,
2419 .socketpair = sock_no_socketpair,
2420 .accept = mptcp_stream_accept,
2107 .getname = mptcp_v6_getname,
2421 .getname = inet6_getname,
2108 .poll = mptcp_poll,
2109 .ioctl = inet6_ioctl,
2110 .gettstamp = sock_gettstamp,
2111 .listen = mptcp_listen,
2112 .shutdown = mptcp_shutdown,
2113 .setsockopt = sock_common_setsockopt,
2114 .getsockopt = sock_common_getsockopt,
2115 .sendmsg = inet6_sendmsg,
2116 .recvmsg = inet6_recvmsg,
2117 .mmap = sock_no_mmap,
2118 .sendpage = inet_sendpage,
2119#ifdef CONFIG_COMPAT
2120 .compat_ioctl = inet6_compat_ioctl,
2422 .poll = mptcp_poll,
2423 .ioctl = inet6_ioctl,
2424 .gettstamp = sock_gettstamp,
2425 .listen = mptcp_listen,
2426 .shutdown = mptcp_shutdown,
2427 .setsockopt = sock_common_setsockopt,
2428 .getsockopt = sock_common_getsockopt,
2429 .sendmsg = inet6_sendmsg,
2430 .recvmsg = inet6_recvmsg,
2431 .mmap = sock_no_mmap,
2432 .sendpage = inet_sendpage,
2433#ifdef CONFIG_COMPAT
2434 .compat_ioctl = inet6_compat_ioctl,
2121 .compat_setsockopt = compat_sock_common_setsockopt,
2122 .compat_getsockopt = compat_sock_common_getsockopt,
2123#endif
2124};
2125
2126static struct proto mptcp_v6_prot;
2127
2128static void mptcp_v6_destroy(struct sock *sk)
2129{
2130 mptcp_destroy(sk);
2131 inet6_destroy_sock(sk);
2132}
2133
2134static struct inet_protosw mptcp_v6_protosw = {
2135 .type = SOCK_STREAM,
2136 .protocol = IPPROTO_MPTCP,
2137 .prot = &mptcp_v6_prot,
2138 .ops = &mptcp_v6_stream_ops,
2139 .flags = INET_PROTOSW_ICSK,
2140};
2141
2435#endif
2436};
2437
2438static struct proto mptcp_v6_prot;
2439
2440static void mptcp_v6_destroy(struct sock *sk)
2441{
2442 mptcp_destroy(sk);
2443 inet6_destroy_sock(sk);
2444}
2445
2446static struct inet_protosw mptcp_v6_protosw = {
2447 .type = SOCK_STREAM,
2448 .protocol = IPPROTO_MPTCP,
2449 .prot = &mptcp_v6_prot,
2450 .ops = &mptcp_v6_stream_ops,
2451 .flags = INET_PROTOSW_ICSK,
2452};
2453
2142int mptcp_proto_v6_init(void)
2454int __init mptcp_proto_v6_init(void)
2143{
2144 int err;
2145
2146 mptcp_v6_prot = mptcp_prot;
2147 strcpy(mptcp_v6_prot.name, "MPTCPv6");
2148 mptcp_v6_prot.slab = NULL;
2149 mptcp_v6_prot.destroy = mptcp_v6_destroy;
2150 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);

--- 12 unchanged lines hidden ---
2455{
2456 int err;
2457
2458 mptcp_v6_prot = mptcp_prot;
2459 strcpy(mptcp_v6_prot.name, "MPTCPv6");
2460 mptcp_v6_prot.slab = NULL;
2461 mptcp_v6_prot.destroy = mptcp_v6_destroy;
2462 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);

--- 12 unchanged lines hidden ---