subflow.c (9c899aa6ac6ba1e28feac82871d44af0b0e7e05c) subflow.c (40947e13997a1cba4e875893ca6e5d5e61a0689d)
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8

--- 4 unchanged lines hidden (view full) ---

13#include <crypto/sha2.h>
14#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
19#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/ip6_route.h>
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8

--- 4 unchanged lines hidden (view full) ---

13#include <crypto/sha2.h>
14#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
19#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/ip6_route.h>
21#include <net/transp_v6.h>
21#endif
22#include <net/mptcp.h>
23#include <uapi/linux/mptcp.h>
24#include "protocol.h"
25#include "mib.h"
26
22#endif
23#include <net/mptcp.h>
24#include <uapi/linux/mptcp.h>
25#include "protocol.h"
26#include "mib.h"
27
28static void mptcp_subflow_ops_undo_override(struct sock *ssk);
29
27static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
28 enum linux_mptcp_mib_field field)
29{
30 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
31}
32
33static void subflow_req_destructor(struct request_sock *req)
34{

--- 21 unchanged lines hidden (view full) ---

56
57static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
58{
59 return mptcp_is_fully_established((void *)msk) &&
60 READ_ONCE(msk->pm.accept_subflow);
61}
62
63/* validate received token and create truncated hmac and nonce for SYN-ACK */
30static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
31 enum linux_mptcp_mib_field field)
32{
33 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
34}
35
36static void subflow_req_destructor(struct request_sock *req)
37{

--- 21 unchanged lines hidden (view full) ---

59
60static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
61{
62 return mptcp_is_fully_established((void *)msk) &&
63 READ_ONCE(msk->pm.accept_subflow);
64}
65
66/* validate received token and create truncated hmac and nonce for SYN-ACK */
64static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
65 const struct sk_buff *skb)
67static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
66{
68{
67 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
69 struct mptcp_sock *msk = subflow_req->msk;
68 u8 hmac[SHA256_DIGEST_SIZE];
70 u8 hmac[SHA256_DIGEST_SIZE];
71
72 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
73
74 subflow_generate_hmac(msk->local_key, msk->remote_key,
75 subflow_req->local_nonce,
76 subflow_req->remote_nonce, hmac);
77
78 subflow_req->thmac = get_unaligned_be64(hmac);
79}
80
81static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
82{
83 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
69 struct mptcp_sock *msk;
70 int local_id;
71
72 msk = mptcp_token_get_sock(subflow_req->token);
73 if (!msk) {
74 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
75 return NULL;
76 }
77
78 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
79 if (local_id < 0) {
80 sock_put((struct sock *)msk);
81 return NULL;
82 }
83 subflow_req->local_id = local_id;
84
84 struct mptcp_sock *msk;
85 int local_id;
86
87 msk = mptcp_token_get_sock(subflow_req->token);
88 if (!msk) {
89 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
90 return NULL;
91 }
92
93 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
94 if (local_id < 0) {
95 sock_put((struct sock *)msk);
96 return NULL;
97 }
98 subflow_req->local_id = local_id;
99
85 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
86
87 subflow_generate_hmac(msk->local_key, msk->remote_key,
88 subflow_req->local_nonce,
89 subflow_req->remote_nonce, hmac);
90
91 subflow_req->thmac = get_unaligned_be64(hmac);
92 return msk;
93}
94
100 return msk;
101}
102
95static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
103static int __subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
96{
97 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
98
99 subflow_req->mp_capable = 0;
100 subflow_req->mp_join = 0;
101 subflow_req->msk = NULL;
102 mptcp_token_init_request(req);
104{
105 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
106
107 subflow_req->mp_capable = 0;
108 subflow_req->mp_join = 0;
109 subflow_req->msk = NULL;
110 mptcp_token_init_request(req);
111
112#ifdef CONFIG_TCP_MD5SIG
113 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
114 * TCP option space.
115 */
116 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
117 return -EINVAL;
118#endif
119
120 return 0;
103}
104
121}
122
123static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
124{
125 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
126}
127
105/* Init mptcp request socket.
106 *
107 * Returns an error code if a JOIN has failed and a TCP reset
108 * should be sent.
109 */
128/* Init mptcp request socket.
129 *
130 * Returns an error code if a JOIN has failed and a TCP reset
131 * should be sent.
132 */
110static int subflow_check_req(struct request_sock *req,
111 const struct sock *sk_listener,
112 struct sk_buff *skb)
133static int subflow_init_req(struct request_sock *req,
134 const struct sock *sk_listener,
135 struct sk_buff *skb)
113{
114 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
115 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
116 struct mptcp_options_received mp_opt;
136{
137 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
138 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
139 struct mptcp_options_received mp_opt;
140 int ret;
117
118 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
119
141
142 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
143
120#ifdef CONFIG_TCP_MD5SIG
121 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
122 * TCP option space.
123 */
124 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
125 return -EINVAL;
126#endif
144 ret = __subflow_init_req(req, sk_listener);
145 if (ret)
146 return 0;
127
128 mptcp_get_options(skb, &mp_opt);
129
130 if (mp_opt.mp_capable) {
131 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
132
133 if (mp_opt.mp_join)
134 return 0;

--- 31 unchanged lines hidden (view full) ---

166
167 } else if (mp_opt.mp_join && listener->request_mptcp) {
168 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
169 subflow_req->mp_join = 1;
170 subflow_req->backup = mp_opt.backup;
171 subflow_req->remote_id = mp_opt.join_id;
172 subflow_req->token = mp_opt.token;
173 subflow_req->remote_nonce = mp_opt.nonce;
147
148 mptcp_get_options(skb, &mp_opt);
149
150 if (mp_opt.mp_capable) {
151 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
152
153 if (mp_opt.mp_join)
154 return 0;

--- 31 unchanged lines hidden (view full) ---

186
187 } else if (mp_opt.mp_join && listener->request_mptcp) {
188 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
189 subflow_req->mp_join = 1;
190 subflow_req->backup = mp_opt.backup;
191 subflow_req->remote_id = mp_opt.join_id;
192 subflow_req->token = mp_opt.token;
193 subflow_req->remote_nonce = mp_opt.nonce;
174 subflow_req->msk = subflow_token_join_request(req, skb);
194 subflow_req->msk = subflow_token_join_request(req);
175
176 /* Can't fall back to TCP in this case. */
177 if (!subflow_req->msk)
178 return -EPERM;
179
195
196 /* Can't fall back to TCP in this case. */
197 if (!subflow_req->msk)
198 return -EPERM;
199
200 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
201 pr_debug("syn inet_sport=%d %d",
202 ntohs(inet_sk(sk_listener)->inet_sport),
203 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
204 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
205 sock_put((struct sock *)subflow_req->msk);
206 mptcp_token_destroy_request(req);
207 tcp_request_sock_ops.destructor(req);
208 subflow_req->msk = NULL;
209 subflow_req->mp_join = 0;
210 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
211 return -EPERM;
212 }
213 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
214 }
215
216 subflow_req_create_thmac(subflow_req);
217
180 if (unlikely(req->syncookie)) {
181 if (mptcp_can_accept_new_subflow(subflow_req->msk))
182 subflow_init_req_cookie_join_save(subflow_req, skb);
183 }
184
185 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
186 subflow_req->remote_nonce, subflow_req->msk);
187 }

--- 5 unchanged lines hidden (view full) ---

193 const struct sock *sk_listener,
194 struct sk_buff *skb)
195{
196 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
197 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
198 struct mptcp_options_received mp_opt;
199 int err;
200
218 if (unlikely(req->syncookie)) {
219 if (mptcp_can_accept_new_subflow(subflow_req->msk))
220 subflow_init_req_cookie_join_save(subflow_req, skb);
221 }
222
223 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
224 subflow_req->remote_nonce, subflow_req->msk);
225 }

--- 5 unchanged lines hidden (view full) ---

231 const struct sock *sk_listener,
232 struct sk_buff *skb)
233{
234 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
235 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
236 struct mptcp_options_received mp_opt;
237 int err;
238
201 subflow_init_req(req, sk_listener);
239 err = __subflow_init_req(req, sk_listener);
240 if (err)
241 return err;
242
202 mptcp_get_options(skb, &mp_opt);
203
204 if (mp_opt.mp_capable && mp_opt.mp_join)
205 return -EINVAL;
206
207 if (mp_opt.mp_capable && listener->request_mptcp) {
208 if (mp_opt.sndr_key == 0)
209 return -EINVAL;

--- 23 unchanged lines hidden (view full) ---

233 struct sk_buff *skb,
234 struct flowi *fl,
235 struct request_sock *req)
236{
237 struct dst_entry *dst;
238 int err;
239
240 tcp_rsk(req)->is_mptcp = 1;
243 mptcp_get_options(skb, &mp_opt);
244
245 if (mp_opt.mp_capable && mp_opt.mp_join)
246 return -EINVAL;
247
248 if (mp_opt.mp_capable && listener->request_mptcp) {
249 if (mp_opt.sndr_key == 0)
250 return -EINVAL;

--- 23 unchanged lines hidden (view full) ---

274 struct sk_buff *skb,
275 struct flowi *fl,
276 struct request_sock *req)
277{
278 struct dst_entry *dst;
279 int err;
280
281 tcp_rsk(req)->is_mptcp = 1;
241 subflow_init_req(req, sk);
242
243 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
244 if (!dst)
245 return NULL;
246
282
283 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
284 if (!dst)
285 return NULL;
286
247 err = subflow_check_req(req, sk, skb);
287 err = subflow_init_req(req, sk, skb);
248 if (err == 0)
249 return dst;
250
251 dst_release(dst);
252 if (!req->syncookie)
253 tcp_request_sock_ops.send_reset(sk, skb);
254 return NULL;
255}
256
257#if IS_ENABLED(CONFIG_MPTCP_IPV6)
258static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
259 struct sk_buff *skb,
260 struct flowi *fl,
261 struct request_sock *req)
262{
263 struct dst_entry *dst;
264 int err;
265
266 tcp_rsk(req)->is_mptcp = 1;
288 if (err == 0)
289 return dst;
290
291 dst_release(dst);
292 if (!req->syncookie)
293 tcp_request_sock_ops.send_reset(sk, skb);
294 return NULL;
295}
296
297#if IS_ENABLED(CONFIG_MPTCP_IPV6)
298static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
299 struct sk_buff *skb,
300 struct flowi *fl,
301 struct request_sock *req)
302{
303 struct dst_entry *dst;
304 int err;
305
306 tcp_rsk(req)->is_mptcp = 1;
267 subflow_init_req(req, sk);
268
269 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
270 if (!dst)
271 return NULL;
272
307
308 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
309 if (!dst)
310 return NULL;
311
273 err = subflow_check_req(req, sk, skb);
312 err = subflow_init_req(req, sk, skb);
274 if (err == 0)
275 return dst;
276
277 dst_release(dst);
278 if (!req->syncookie)
279 tcp6_request_sock_ops.send_reset(sk, skb);
280 return NULL;
281}

--- 31 unchanged lines hidden (view full) ---

313 tcp_done(ssk);
314 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
315 schedule_work(&mptcp_sk(sk)->work))
316 return; /* worker will put sk for us */
317
318 sock_put(sk);
319}
320
313 if (err == 0)
314 return dst;
315
316 dst_release(dst);
317 if (!req->syncookie)
318 tcp6_request_sock_ops.send_reset(sk, skb);
319 return NULL;
320}

--- 31 unchanged lines hidden (view full) ---

352 tcp_done(ssk);
353 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
354 schedule_work(&mptcp_sk(sk)->work))
355 return; /* worker will put sk for us */
356
357 sock_put(sk);
358}
359
360static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
361{
362 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
363}
364
321static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
322{
323 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
324 struct mptcp_options_received mp_opt;
325 struct sock *parent = subflow->conn;
326
327 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
328
329 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
330 inet_sk_state_store(parent, TCP_ESTABLISHED);
331 parent->sk_state_change(parent);
332 }
333
334 /* be sure no special action on any packet other than syn-ack */
335 if (subflow->conn_finished)
336 return;
337
365static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
366{
367 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
368 struct mptcp_options_received mp_opt;
369 struct sock *parent = subflow->conn;
370
371 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
372
373 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
374 inet_sk_state_store(parent, TCP_ESTABLISHED);
375 parent->sk_state_change(parent);
376 }
377
378 /* be sure no special action on any packet other than syn-ack */
379 if (subflow->conn_finished)
380 return;
381
382 mptcp_propagate_sndbuf(parent, sk);
338 subflow->rel_write_seq = 1;
339 subflow->conn_finished = 1;
340 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
341 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
342
343 mptcp_get_options(skb, &mp_opt);
344 if (subflow->request_mptcp) {
345 if (!mp_opt.mp_capable) {

--- 32 unchanged lines hidden (view full) ---

378 hmac);
379 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
380
381 if (!mptcp_finish_join(sk))
382 goto do_reset;
383
384 subflow->mp_join = 1;
385 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
383 subflow->rel_write_seq = 1;
384 subflow->conn_finished = 1;
385 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
386 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
387
388 mptcp_get_options(skb, &mp_opt);
389 if (subflow->request_mptcp) {
390 if (!mp_opt.mp_capable) {

--- 32 unchanged lines hidden (view full) ---

423 hmac);
424 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
425
426 if (!mptcp_finish_join(sk))
427 goto do_reset;
428
429 subflow->mp_join = 1;
430 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
431
432 if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
433 pr_debug("synack inet_dport=%d %d",
434 ntohs(inet_sk(sk)->inet_dport),
435 ntohs(inet_sk(parent)->inet_dport));
436 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
437 }
386 } else if (mptcp_check_fallback(sk)) {
387fallback:
388 mptcp_rcv_space_init(mptcp_sk(parent), sk);
389 }
390 return;
391
392do_reset:
393 mptcp_subflow_reset(sk);

--- 20 unchanged lines hidden (view full) ---

414 tcp_listendrop(sk);
415 return 0;
416}
417
418#if IS_ENABLED(CONFIG_MPTCP_IPV6)
419static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
420static struct inet_connection_sock_af_ops subflow_v6_specific;
421static struct inet_connection_sock_af_ops subflow_v6m_specific;
438 } else if (mptcp_check_fallback(sk)) {
439fallback:
440 mptcp_rcv_space_init(mptcp_sk(parent), sk);
441 }
442 return;
443
444do_reset:
445 mptcp_subflow_reset(sk);

--- 20 unchanged lines hidden (view full) ---

466 tcp_listendrop(sk);
467 return 0;
468}
469
470#if IS_ENABLED(CONFIG_MPTCP_IPV6)
471static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
472static struct inet_connection_sock_af_ops subflow_v6_specific;
473static struct inet_connection_sock_af_ops subflow_v6m_specific;
474static struct proto tcpv6_prot_override;
422
423static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
424{
425 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
426
427 pr_debug("subflow=%p", subflow);
428
429 if (skb->protocol == htons(ETH_P_IP))

--- 65 unchanged lines hidden (view full) ---

495 struct mptcp_subflow_context *old_ctx)
496{
497 struct inet_connection_sock *icsk = inet_csk(sk);
498
499 mptcp_subflow_tcp_fallback(sk, old_ctx);
500 icsk->icsk_ulp_ops = NULL;
501 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
502 tcp_sk(sk)->is_mptcp = 0;
475
476static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
477{
478 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
479
480 pr_debug("subflow=%p", subflow);
481
482 if (skb->protocol == htons(ETH_P_IP))

--- 65 unchanged lines hidden (view full) ---

548 struct mptcp_subflow_context *old_ctx)
549{
550 struct inet_connection_sock *icsk = inet_csk(sk);
551
552 mptcp_subflow_tcp_fallback(sk, old_ctx);
553 icsk->icsk_ulp_ops = NULL;
554 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
555 tcp_sk(sk)->is_mptcp = 0;
556
557 mptcp_subflow_ops_undo_override(sk);
503}
504
505static void subflow_drop_ctx(struct sock *ssk)
506{
507 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
508
509 if (!ctx)
510 return;

--- 129 unchanged lines hidden (view full) ---

640 /* move the msk reference ownership to the subflow */
641 subflow_req->msk = NULL;
642 ctx->conn = (struct sock *)owner;
643 if (!mptcp_finish_join(child))
644 goto dispose_child;
645
646 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
647 tcp_rsk(req)->drop_req = true;
558}
559
560static void subflow_drop_ctx(struct sock *ssk)
561{
562 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
563
564 if (!ctx)
565 return;

--- 129 unchanged lines hidden (view full) ---

695 /* move the msk reference ownership to the subflow */
696 subflow_req->msk = NULL;
697 ctx->conn = (struct sock *)owner;
698 if (!mptcp_finish_join(child))
699 goto dispose_child;
700
701 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
702 tcp_rsk(req)->drop_req = true;
703
704 if (subflow_use_different_sport(owner, sk)) {
705 pr_debug("ack inet_sport=%d %d",
706 ntohs(inet_sk(sk)->inet_sport),
707 ntohs(inet_sk((struct sock *)owner)->inet_sport));
708 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
709 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
710 goto out;
711 }
712 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
713 }
648 }
649 }
650
651out:
652 /* dispose of the left over mptcp master, if any */
653 if (unlikely(new_msk))
654 mptcp_force_close(new_msk);
655

--- 12 unchanged lines hidden (view full) ---

668 tcp_done(child);
669 req->rsk_ops->send_reset(sk, skb);
670
671 /* The last child reference will be released by the caller */
672 return child;
673}
674
675static struct inet_connection_sock_af_ops subflow_specific;
714 }
715 }
716
717out:
718 /* dispose of the left over mptcp master, if any */
719 if (unlikely(new_msk))
720 mptcp_force_close(new_msk);
721

--- 12 unchanged lines hidden (view full) ---

734 tcp_done(child);
735 req->rsk_ops->send_reset(sk, skb);
736
737 /* The last child reference will be released by the caller */
738 return child;
739}
740
741static struct inet_connection_sock_af_ops subflow_specific;
742static struct proto tcp_prot_override;
676
677enum mapping_status {
678 MAPPING_OK,
679 MAPPING_INVALID,
680 MAPPING_EMPTY,
681 MAPPING_DATA_FIN,
682 MAPPING_DUMMY
683};

--- 197 unchanged lines hidden (view full) ---

881 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
882 tcp_sk(ssk)->copied_seq += incr;
883 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
884 sk_eat_skb(ssk, skb);
885 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
886 subflow->map_valid = 0;
887}
888
743
744enum mapping_status {
745 MAPPING_OK,
746 MAPPING_INVALID,
747 MAPPING_EMPTY,
748 MAPPING_DATA_FIN,
749 MAPPING_DUMMY
750};

--- 197 unchanged lines hidden (view full) ---

948 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
949 tcp_sk(ssk)->copied_seq += incr;
950 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
951 sk_eat_skb(ssk, skb);
952 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
953 subflow->map_valid = 0;
954}
955
956/* sched mptcp worker to remove the subflow if no more data is pending */
957static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
958{
959 struct sock *sk = (struct sock *)msk;
960
961 if (likely(ssk->sk_state != TCP_CLOSE))
962 return;
963
964 if (skb_queue_empty(&ssk->sk_receive_queue) &&
965 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
966 sock_hold(sk);
967 if (!schedule_work(&msk->work))
968 sock_put(sk);
969 }
970}
971
889static bool subflow_check_data_avail(struct sock *ssk)
890{
891 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
892 enum mapping_status status;
893 struct mptcp_sock *msk;
894 struct sk_buff *skb;
895
896 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,

--- 22 unchanged lines hidden (view full) ---

919 subflow->map_data_len = skb->len;
920 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
921 subflow->ssn_offset;
922 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
923 return true;
924 }
925
926 if (status != MAPPING_OK)
972static bool subflow_check_data_avail(struct sock *ssk)
973{
974 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
975 enum mapping_status status;
976 struct mptcp_sock *msk;
977 struct sk_buff *skb;
978
979 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,

--- 22 unchanged lines hidden (view full) ---

1002 subflow->map_data_len = skb->len;
1003 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
1004 subflow->ssn_offset;
1005 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
1006 return true;
1007 }
1008
1009 if (status != MAPPING_OK)
927 return false;
1010 goto no_data;
928
929 skb = skb_peek(&ssk->sk_receive_queue);
930 if (WARN_ON_ONCE(!skb))
1011
1012 skb = skb_peek(&ssk->sk_receive_queue);
1013 if (WARN_ON_ONCE(!skb))
931 return false;
1014 goto no_data;
932
933 /* if msk lacks the remote key, this subflow must provide an
934 * MP_CAPABLE-based mapping
935 */
936 if (unlikely(!READ_ONCE(msk->can_ack))) {
937 if (!subflow->mpc_map) {
938 ssk->sk_err = EBADMSG;
939 goto fatal;

--- 17 unchanged lines hidden (view full) ---

957
958 /* only accept in-sequence mapping. Old values are spurious
959 * retransmission
960 */
961 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
962 }
963 return true;
964
1015
1016 /* if msk lacks the remote key, this subflow must provide an
1017 * MP_CAPABLE-based mapping
1018 */
1019 if (unlikely(!READ_ONCE(msk->can_ack))) {
1020 if (!subflow->mpc_map) {
1021 ssk->sk_err = EBADMSG;
1022 goto fatal;

--- 17 unchanged lines hidden (view full) ---

1040
1041 /* only accept in-sequence mapping. Old values are spurious
1042 * retransmission
1043 */
1044 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1045 }
1046 return true;
1047
1048no_data:
1049 subflow_sched_work_if_closed(msk, ssk);
1050 return false;
965fatal:
966 /* fatal protocol error, close the socket */
967 /* This barrier is coupled with smp_rmb() in tcp_poll() */
968 smp_wmb();
969 ssk->sk_error_report(ssk);
970 tcp_set_state(ssk, TCP_CLOSE);
971 tcp_send_active_reset(ssk, GFP_ATOMIC);
972 subflow->data_avail = 0;

--- 54 unchanged lines hidden (view full) ---

1027 !subflow->mp_join && !(state & TCPF_CLOSE));
1028
1029 if (mptcp_subflow_data_available(sk))
1030 mptcp_data_ready(parent, sk);
1031}
1032
1033static void subflow_write_space(struct sock *ssk)
1034{
1051fatal:
1052 /* fatal protocol error, close the socket */
1053 /* This barrier is coupled with smp_rmb() in tcp_poll() */
1054 smp_wmb();
1055 ssk->sk_error_report(ssk);
1056 tcp_set_state(ssk, TCP_CLOSE);
1057 tcp_send_active_reset(ssk, GFP_ATOMIC);
1058 subflow->data_avail = 0;

--- 54 unchanged lines hidden (view full) ---

1113 !subflow->mp_join && !(state & TCPF_CLOSE));
1114
1115 if (mptcp_subflow_data_available(sk))
1116 mptcp_data_ready(parent, sk);
1117}
1118
1119static void subflow_write_space(struct sock *ssk)
1120{
1035 /* we take action in __mptcp_clean_una() */
1036}
1037
1038void __mptcp_error_report(struct sock *sk)
1039{
1040 struct mptcp_subflow_context *subflow;
1041 struct mptcp_sock *msk = mptcp_sk(sk);
1042
1043 mptcp_for_each_subflow(msk, subflow) {
1044 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1045 int err = sock_error(ssk);
1046
1047 if (!err)
1048 continue;
1049
1050 /* only propagate errors on fallen-back sockets or
1051 * on MPC connect
1052 */
1053 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1054 continue;
1055
1056 inet_sk_state_store(sk, inet_sk_state_load(ssk));
1057 sk->sk_err = -err;
1058
1059 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
1060 smp_wmb();
1061 sk->sk_error_report(sk);
1062 break;
1063 }
1064}
1065
1066static void subflow_error_report(struct sock *ssk)
1067{
1068 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1069
1121 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1122
1070 mptcp_data_lock(sk);
1071 if (!sock_owned_by_user(sk))
1072 __mptcp_error_report(sk);
1073 else
1074 set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags);
1075 mptcp_data_unlock(sk);
1123 mptcp_propagate_sndbuf(sk, ssk);
1124 mptcp_write_space(sk);
1076}
1077
1078static struct inet_connection_sock_af_ops *
1079subflow_default_af_ops(struct sock *sk)
1080{
1081#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1082 if (sk->sk_family == AF_INET6)
1083 return &subflow_v6_specific;

--- 16 unchanged lines hidden (view full) ---

1100 if (likely(icsk->icsk_af_ops == target))
1101 return;
1102
1103 subflow->icsk_af_ops = icsk->icsk_af_ops;
1104 icsk->icsk_af_ops = target;
1105}
1106#endif
1107
1125}
1126
1127static struct inet_connection_sock_af_ops *
1128subflow_default_af_ops(struct sock *sk)
1129{
1130#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1131 if (sk->sk_family == AF_INET6)
1132 return &subflow_v6_specific;

--- 16 unchanged lines hidden (view full) ---

1149 if (likely(icsk->icsk_af_ops == target))
1150 return;
1151
1152 subflow->icsk_af_ops = icsk->icsk_af_ops;
1153 icsk->icsk_af_ops = target;
1154}
1155#endif
1156
1108static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1109 struct sockaddr_storage *addr)
1157void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1158 struct sockaddr_storage *addr,
1159 unsigned short family)
1110{
1111 memset(addr, 0, sizeof(*addr));
1160{
1161 memset(addr, 0, sizeof(*addr));
1112 addr->ss_family = info->family;
1162 addr->ss_family = family;
1113 if (addr->ss_family == AF_INET) {
1114 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1115
1163 if (addr->ss_family == AF_INET) {
1164 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1165
1116 in_addr->sin_addr = info->addr;
1166 if (info->family == AF_INET)
1167 in_addr->sin_addr = info->addr;
1168#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1169 else if (ipv6_addr_v4mapped(&info->addr6))
1170 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1171#endif
1117 in_addr->sin_port = info->port;
1118 }
1119#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1120 else if (addr->ss_family == AF_INET6) {
1121 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1122
1172 in_addr->sin_port = info->port;
1173 }
1174#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1175 else if (addr->ss_family == AF_INET6) {
1176 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1177
1123 in6_addr->sin6_addr = info->addr6;
1178 if (info->family == AF_INET)
1179 ipv6_addr_set_v4mapped(info->addr.s_addr,
1180 &in6_addr->sin6_addr);
1181 else
1182 in6_addr->sin6_addr = info->addr6;
1124 in6_addr->sin6_port = info->port;
1125 }
1126#endif
1127}
1128
1129int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1130 const struct mptcp_addr_info *remote)
1131{

--- 27 unchanged lines hidden (view full) ---

1159 goto failed;
1160
1161 local_id = err;
1162 }
1163
1164 subflow->remote_key = msk->remote_key;
1165 subflow->local_key = msk->local_key;
1166 subflow->token = msk->token;
1183 in6_addr->sin6_port = info->port;
1184 }
1185#endif
1186}
1187
1188int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1189 const struct mptcp_addr_info *remote)
1190{

--- 27 unchanged lines hidden (view full) ---

1218 goto failed;
1219
1220 local_id = err;
1221 }
1222
1223 subflow->remote_key = msk->remote_key;
1224 subflow->local_key = msk->local_key;
1225 subflow->token = msk->token;
1167 mptcp_info2sockaddr(loc, &addr);
1226 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1168
1169 addrlen = sizeof(struct sockaddr_in);
1170#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1227
1228 addrlen = sizeof(struct sockaddr_in);
1229#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1171 if (loc->family == AF_INET6)
1230 if (addr.ss_family == AF_INET6)
1172 addrlen = sizeof(struct sockaddr_in6);
1173#endif
1174 ssk->sk_bound_dev_if = loc->ifindex;
1175 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1176 if (err)
1177 goto failed;
1178
1179 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1180 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1181 remote_token, local_id, remote_id);
1182 subflow->remote_token = remote_token;
1183 subflow->local_id = local_id;
1184 subflow->remote_id = remote_id;
1185 subflow->request_join = 1;
1186 subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1231 addrlen = sizeof(struct sockaddr_in6);
1232#endif
1233 ssk->sk_bound_dev_if = loc->ifindex;
1234 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1235 if (err)
1236 goto failed;
1237
1238 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1239 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1240 remote_token, local_id, remote_id);
1241 subflow->remote_token = remote_token;
1242 subflow->local_id = local_id;
1243 subflow->remote_id = remote_id;
1244 subflow->request_join = 1;
1245 subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1187 mptcp_info2sockaddr(remote, &addr);
1246 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1188
1189 mptcp_add_pending_subflow(msk, subflow);
1190 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1191 if (err && err != -EINPROGRESS)
1192 goto failed_unlink;
1193
1247
1248 mptcp_add_pending_subflow(msk, subflow);
1249 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1250 if (err && err != -EINPROGRESS)
1251 goto failed_unlink;
1252
1253 /* discard the subflow socket */
1254 mptcp_sock_graft(ssk, sk->sk_socket);
1255 iput(SOCK_INODE(sf));
1194 return err;
1195
1196failed_unlink:
1197 spin_lock_bh(&msk->join_list_lock);
1198 list_del(&subflow->node);
1199 spin_unlock_bh(&msk->join_list_lock);
1200
1201failed:

--- 21 unchanged lines hidden (view full) ---

1223
1224 cgroup_sk_free(child_skcd);
1225 *child_skcd = *parent_skcd;
1226 cgroup_sk_clone(child_skcd);
1227 }
1228#endif /* CONFIG_SOCK_CGROUP_DATA */
1229}
1230
1256 return err;
1257
1258failed_unlink:
1259 spin_lock_bh(&msk->join_list_lock);
1260 list_del(&subflow->node);
1261 spin_unlock_bh(&msk->join_list_lock);
1262
1263failed:

--- 21 unchanged lines hidden (view full) ---

1285
1286 cgroup_sk_free(child_skcd);
1287 *child_skcd = *parent_skcd;
1288 cgroup_sk_clone(child_skcd);
1289 }
1290#endif /* CONFIG_SOCK_CGROUP_DATA */
1291}
1292
1293static void mptcp_subflow_ops_override(struct sock *ssk)
1294{
1295#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1296 if (ssk->sk_prot == &tcpv6_prot)
1297 ssk->sk_prot = &tcpv6_prot_override;
1298 else
1299#endif
1300 ssk->sk_prot = &tcp_prot_override;
1301}
1302
1303static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1304{
1305#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1306 if (ssk->sk_prot == &tcpv6_prot_override)
1307 ssk->sk_prot = &tcpv6_prot;
1308 else
1309#endif
1310 ssk->sk_prot = &tcp_prot;
1311}
1231int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1232{
1233 struct mptcp_subflow_context *subflow;
1234 struct net *net = sock_net(sk);
1235 struct socket *sf;
1236 int err;
1237
1238 /* un-accepted server sockets can reach here - on bad configuration

--- 39 unchanged lines hidden (view full) ---

1278 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1279
1280 subflow = mptcp_subflow_ctx(sf->sk);
1281 pr_debug("subflow=%p", subflow);
1282
1283 *new_sock = sf;
1284 sock_hold(sk);
1285 subflow->conn = sk;
1312int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1313{
1314 struct mptcp_subflow_context *subflow;
1315 struct net *net = sock_net(sk);
1316 struct socket *sf;
1317 int err;
1318
1319 /* un-accepted server sockets can reach here - on bad configuration

--- 39 unchanged lines hidden (view full) ---

1359 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1360
1361 subflow = mptcp_subflow_ctx(sf->sk);
1362 pr_debug("subflow=%p", subflow);
1363
1364 *new_sock = sf;
1365 sock_hold(sk);
1366 subflow->conn = sk;
1367 mptcp_subflow_ops_override(sf->sk);
1286
1287 return 0;
1288}
1289
1290static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1291 gfp_t priority)
1292{
1293 struct inet_connection_sock *icsk = inet_csk(sk);
1294 struct mptcp_subflow_context *ctx;
1295
1296 ctx = kzalloc(sizeof(*ctx), priority);
1297 if (!ctx)
1298 return NULL;
1299
1300 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1301 INIT_LIST_HEAD(&ctx->node);
1368
1369 return 0;
1370}
1371
1372static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1373 gfp_t priority)
1374{
1375 struct inet_connection_sock *icsk = inet_csk(sk);
1376 struct mptcp_subflow_context *ctx;
1377
1378 ctx = kzalloc(sizeof(*ctx), priority);
1379 if (!ctx)
1380 return NULL;
1381
1382 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1383 INIT_LIST_HEAD(&ctx->node);
1384 INIT_LIST_HEAD(&ctx->delegated_node);
1302
1303 pr_debug("subflow=%p", ctx);
1304
1305 ctx->tcp_sock = sk;
1306
1307 return ctx;
1308}
1309

--- 16 unchanged lines hidden (view full) ---

1326static void subflow_state_change(struct sock *sk)
1327{
1328 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1329 struct sock *parent = subflow->conn;
1330
1331 __subflow_state_change(sk);
1332
1333 if (subflow_simultaneous_connect(sk)) {
1385
1386 pr_debug("subflow=%p", ctx);
1387
1388 ctx->tcp_sock = sk;
1389
1390 return ctx;
1391}
1392

--- 16 unchanged lines hidden (view full) ---

1409static void subflow_state_change(struct sock *sk)
1410{
1411 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1412 struct sock *parent = subflow->conn;
1413
1414 __subflow_state_change(sk);
1415
1416 if (subflow_simultaneous_connect(sk)) {
1417 mptcp_propagate_sndbuf(parent, sk);
1334 mptcp_do_fallback(sk);
1335 mptcp_rcv_space_init(mptcp_sk(parent), sk);
1336 pr_fallback(mptcp_sk(parent));
1337 subflow->conn_finished = 1;
1338 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1339 inet_sk_state_store(parent, TCP_ESTABLISHED);
1340 parent->sk_state_change(parent);
1341 }
1342 }
1343
1344 /* as recvmsg() does not acquire the subflow socket for ssk selection
1345 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1346 * the data available machinery here.
1347 */
1348 if (mptcp_subflow_data_available(sk))
1349 mptcp_data_ready(parent, sk);
1350
1418 mptcp_do_fallback(sk);
1419 mptcp_rcv_space_init(mptcp_sk(parent), sk);
1420 pr_fallback(mptcp_sk(parent));
1421 subflow->conn_finished = 1;
1422 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1423 inet_sk_state_store(parent, TCP_ESTABLISHED);
1424 parent->sk_state_change(parent);
1425 }
1426 }
1427
1428 /* as recvmsg() does not acquire the subflow socket for ssk selection
1429 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1430 * the data available machinery here.
1431 */
1432 if (mptcp_subflow_data_available(sk))
1433 mptcp_data_ready(parent, sk);
1434
1435 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1436
1351 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1352 !subflow->rx_eof && subflow_is_done(sk)) {
1353 subflow->rx_eof = 1;
1354 mptcp_subflow_eof(parent);
1355 }
1356}
1357
1358static int subflow_ulp_init(struct sock *sk)

--- 20 unchanged lines hidden (view full) ---

1379 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1380
1381 tp->is_mptcp = 1;
1382 ctx->icsk_af_ops = icsk->icsk_af_ops;
1383 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1384 ctx->tcp_data_ready = sk->sk_data_ready;
1385 ctx->tcp_state_change = sk->sk_state_change;
1386 ctx->tcp_write_space = sk->sk_write_space;
1437 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1438 !subflow->rx_eof && subflow_is_done(sk)) {
1439 subflow->rx_eof = 1;
1440 mptcp_subflow_eof(parent);
1441 }
1442}
1443
1444static int subflow_ulp_init(struct sock *sk)

--- 20 unchanged lines hidden (view full) ---

1465 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1466
1467 tp->is_mptcp = 1;
1468 ctx->icsk_af_ops = icsk->icsk_af_ops;
1469 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1470 ctx->tcp_data_ready = sk->sk_data_ready;
1471 ctx->tcp_state_change = sk->sk_state_change;
1472 ctx->tcp_write_space = sk->sk_write_space;
1387 ctx->tcp_error_report = sk->sk_error_report;
1388 sk->sk_data_ready = subflow_data_ready;
1389 sk->sk_write_space = subflow_write_space;
1390 sk->sk_state_change = subflow_state_change;
1473 sk->sk_data_ready = subflow_data_ready;
1474 sk->sk_write_space = subflow_write_space;
1475 sk->sk_state_change = subflow_state_change;
1391 sk->sk_error_report = subflow_error_report;
1392out:
1393 return err;
1394}
1395
1396static void subflow_ulp_release(struct sock *ssk)
1397{
1398 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1399 bool release = true;

--- 7 unchanged lines hidden (view full) ---

1407 /* if the msk has been orphaned, keep the ctx
1408 * alive, will be freed by __mptcp_close_ssk(),
1409 * when the subflow is still unaccepted
1410 */
1411 release = ctx->disposable || list_empty(&ctx->node);
1412 sock_put(sk);
1413 }
1414
1476out:
1477 return err;
1478}
1479
1480static void subflow_ulp_release(struct sock *ssk)
1481{
1482 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1483 bool release = true;

--- 7 unchanged lines hidden (view full) ---

1491 /* if the msk has been orphaned, keep the ctx
1492 * alive, will be freed by __mptcp_close_ssk(),
1493 * when the subflow is still unaccepted
1494 */
1495 release = ctx->disposable || list_empty(&ctx->node);
1496 sock_put(sk);
1497 }
1498
1499 mptcp_subflow_ops_undo_override(ssk);
1415 if (release)
1416 kfree_rcu(ctx, rcu);
1417}
1418
1419static void subflow_ulp_clone(const struct request_sock *req,
1420 struct sock *newsk,
1421 const gfp_t priority)
1422{

--- 13 unchanged lines hidden (view full) ---

1436 return;
1437 }
1438
1439 new_ctx->conn_finished = 1;
1440 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1441 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1442 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1443 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1500 if (release)
1501 kfree_rcu(ctx, rcu);
1502}
1503
1504static void subflow_ulp_clone(const struct request_sock *req,
1505 struct sock *newsk,
1506 const gfp_t priority)
1507{

--- 13 unchanged lines hidden (view full) ---

1521 return;
1522 }
1523
1524 new_ctx->conn_finished = 1;
1525 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1526 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1527 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1528 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1444 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1445 new_ctx->rel_write_seq = 1;
1446 new_ctx->tcp_sock = newsk;
1447
1448 if (subflow_req->mp_capable) {
1449 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1450 * is fully established only after we receive the remote key
1451 */
1452 new_ctx->mp_capable = 1;

--- 8 unchanged lines hidden (view full) ---

1461 new_ctx->backup = subflow_req->backup;
1462 new_ctx->local_id = subflow_req->local_id;
1463 new_ctx->remote_id = subflow_req->remote_id;
1464 new_ctx->token = subflow_req->token;
1465 new_ctx->thmac = subflow_req->thmac;
1466 }
1467}
1468
1529 new_ctx->rel_write_seq = 1;
1530 new_ctx->tcp_sock = newsk;
1531
1532 if (subflow_req->mp_capable) {
1533 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1534 * is fully established only after we receive the remote key
1535 */
1536 new_ctx->mp_capable = 1;

--- 8 unchanged lines hidden (view full) ---

1545 new_ctx->backup = subflow_req->backup;
1546 new_ctx->local_id = subflow_req->local_id;
1547 new_ctx->remote_id = subflow_req->remote_id;
1548 new_ctx->token = subflow_req->token;
1549 new_ctx->thmac = subflow_req->thmac;
1550 }
1551}
1552
1553static void tcp_release_cb_override(struct sock *ssk)
1554{
1555 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1556
1557 if (mptcp_subflow_has_delegated_action(subflow))
1558 mptcp_subflow_process_delegated(ssk);
1559
1560 tcp_release_cb(ssk);
1561}
1562
1469static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1470 .name = "mptcp",
1471 .owner = THIS_MODULE,
1472 .init = subflow_ulp_init,
1473 .release = subflow_ulp_release,
1474 .clone = subflow_ulp_clone,
1475};
1476

--- 24 unchanged lines hidden (view full) ---

1501 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1502 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
1503
1504 subflow_specific = ipv4_specific;
1505 subflow_specific.conn_request = subflow_v4_conn_request;
1506 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1507 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1508
1563static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1564 .name = "mptcp",
1565 .owner = THIS_MODULE,
1566 .init = subflow_ulp_init,
1567 .release = subflow_ulp_release,
1568 .clone = subflow_ulp_clone,
1569};
1570

--- 24 unchanged lines hidden (view full) ---

1595 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1596 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
1597
1598 subflow_specific = ipv4_specific;
1599 subflow_specific.conn_request = subflow_v4_conn_request;
1600 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1601 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1602
1603 tcp_prot_override = tcp_prot;
1604 tcp_prot_override.release_cb = tcp_release_cb_override;
1605
1509#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1510 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1511 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
1512
1513 subflow_v6_specific = ipv6_specific;
1514 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1515 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1516 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1517
1518 subflow_v6m_specific = subflow_v6_specific;
1519 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1520 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1521 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1522 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1523 subflow_v6m_specific.net_frag_header_len = 0;
1606#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1607 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1608 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
1609
1610 subflow_v6_specific = ipv6_specific;
1611 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1612 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1613 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1614
1615 subflow_v6m_specific = subflow_v6_specific;
1616 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1617 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1618 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1619 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1620 subflow_v6m_specific.net_frag_header_len = 0;
1621
1622 tcpv6_prot_override = tcpv6_prot;
1623 tcpv6_prot_override.release_cb = tcp_release_cb_override;
1524#endif
1525
1526 mptcp_diag_subflow_init(&subflow_ulp_ops);
1527
1528 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1529 panic("MPTCP: failed to register subflows to ULP\n");
1530}
1624#endif
1625
1626 mptcp_diag_subflow_init(&subflow_ulp_ops);
1627
1628 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1629 panic("MPTCP: failed to register subflows to ULP\n");
1630}