1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #ifndef __MPTCP_PROTOCOL_H 8 #define __MPTCP_PROTOCOL_H 9 10 #include <linux/random.h> 11 #include <net/tcp.h> 12 #include <net/inet_connection_sock.h> 13 14 #define MPTCP_SUPPORTED_VERSION 1 15 16 /* MPTCP option bits */ 17 #define OPTION_MPTCP_MPC_SYN BIT(0) 18 #define OPTION_MPTCP_MPC_SYNACK BIT(1) 19 #define OPTION_MPTCP_MPC_ACK BIT(2) 20 #define OPTION_MPTCP_MPJ_SYN BIT(3) 21 #define OPTION_MPTCP_MPJ_SYNACK BIT(4) 22 #define OPTION_MPTCP_MPJ_ACK BIT(5) 23 #define OPTION_MPTCP_ADD_ADDR BIT(6) 24 #define OPTION_MPTCP_ADD_ADDR6 BIT(7) 25 #define OPTION_MPTCP_RM_ADDR BIT(8) 26 27 /* MPTCP option subtypes */ 28 #define MPTCPOPT_MP_CAPABLE 0 29 #define MPTCPOPT_MP_JOIN 1 30 #define MPTCPOPT_DSS 2 31 #define MPTCPOPT_ADD_ADDR 3 32 #define MPTCPOPT_RM_ADDR 4 33 #define MPTCPOPT_MP_PRIO 5 34 #define MPTCPOPT_MP_FAIL 6 35 #define MPTCPOPT_MP_FASTCLOSE 7 36 37 /* MPTCP suboption lengths */ 38 #define TCPOLEN_MPTCP_MPC_SYN 4 39 #define TCPOLEN_MPTCP_MPC_SYNACK 12 40 #define TCPOLEN_MPTCP_MPC_ACK 20 41 #define TCPOLEN_MPTCP_MPC_ACK_DATA 22 42 #define TCPOLEN_MPTCP_MPJ_SYN 12 43 #define TCPOLEN_MPTCP_MPJ_SYNACK 16 44 #define TCPOLEN_MPTCP_MPJ_ACK 24 45 #define TCPOLEN_MPTCP_DSS_BASE 4 46 #define TCPOLEN_MPTCP_DSS_ACK32 4 47 #define TCPOLEN_MPTCP_DSS_ACK64 8 48 #define TCPOLEN_MPTCP_DSS_MAP32 10 49 #define TCPOLEN_MPTCP_DSS_MAP64 14 50 #define TCPOLEN_MPTCP_DSS_CHECKSUM 2 51 #define TCPOLEN_MPTCP_ADD_ADDR 16 52 #define TCPOLEN_MPTCP_ADD_ADDR_PORT 18 53 #define TCPOLEN_MPTCP_ADD_ADDR_BASE 8 54 #define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT 10 55 #define TCPOLEN_MPTCP_ADD_ADDR6 28 56 #define TCPOLEN_MPTCP_ADD_ADDR6_PORT 30 57 #define TCPOLEN_MPTCP_ADD_ADDR6_BASE 20 58 #define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT 22 59 #define TCPOLEN_MPTCP_PORT_LEN 2 60 #define TCPOLEN_MPTCP_RM_ADDR_BASE 4 61 62 /* MPTCP MP_JOIN flags */ 63 #define MPTCPOPT_BACKUP BIT(0) 64 #define MPTCPOPT_HMAC_LEN 20 65 #define MPTCPOPT_THMAC_LEN 8 66 67 /* MPTCP MP_CAPABLE flags */ 68 #define MPTCP_VERSION_MASK (0x0F) 69 #define MPTCP_CAP_CHECKSUM_REQD BIT(7) 70 #define MPTCP_CAP_EXTENSIBILITY BIT(6) 71 #define MPTCP_CAP_HMAC_SHA256 BIT(0) 72 #define MPTCP_CAP_FLAG_MASK (0x3F) 73 74 /* MPTCP DSS flags */ 75 #define MPTCP_DSS_DATA_FIN BIT(4) 76 #define MPTCP_DSS_DSN64 BIT(3) 77 #define MPTCP_DSS_HAS_MAP BIT(2) 78 #define MPTCP_DSS_ACK64 BIT(1) 79 #define MPTCP_DSS_HAS_ACK BIT(0) 80 #define MPTCP_DSS_FLAG_MASK (0x1F) 81 82 /* MPTCP ADD_ADDR flags */ 83 #define MPTCP_ADDR_ECHO BIT(0) 84 #define MPTCP_ADDR_IPVERSION_4 4 85 #define MPTCP_ADDR_IPVERSION_6 6 86 87 /* MPTCP socket flags */ 88 #define MPTCP_DATA_READY 0 89 #define MPTCP_NOSPACE 1 90 #define MPTCP_WORK_RTX 2 91 #define MPTCP_WORK_EOF 3 92 #define MPTCP_FALLBACK_DONE 4 93 #define MPTCP_WORK_CLOSE_SUBFLOW 5 94 95 static inline bool before64(__u64 seq1, __u64 seq2) 96 { 97 return (__s64)(seq1 - seq2) < 0; 98 } 99 100 #define after64(seq2, seq1) before64(seq1, seq2) 101 102 struct mptcp_options_received { 103 u64 sndr_key; 104 u64 rcvr_key; 105 u64 data_ack; 106 u64 data_seq; 107 u32 subflow_seq; 108 u16 data_len; 109 u16 mp_capable : 1, 110 mp_join : 1, 111 dss : 1, 112 add_addr : 1, 113 rm_addr : 1, 114 family : 4, 115 echo : 1, 116 backup : 1; 117 u32 token; 118 u32 nonce; 119 u64 thmac; 120 u8 hmac[20]; 121 u8 join_id; 122 u8 use_map:1, 123 dsn64:1, 124 data_fin:1, 125 use_ack:1, 126 ack64:1, 127 mpc_map:1, 128 __unused:2; 129 u8 addr_id; 130 u8 rm_id; 131 union { 132 struct in_addr addr; 133 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 134 struct in6_addr addr6; 135 #endif 136 }; 137 u64 ahmac; 138 u16 port; 139 }; 140 141 static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field) 142 { 143 return htonl((TCPOPT_MPTCP << 24) | (len << 16) | (subopt << 12) | 144 ((nib & 0xF) << 8) | field); 145 } 146 147 struct mptcp_addr_info { 148 sa_family_t family; 149 __be16 port; 150 u8 id; 151 u8 flags; 152 int ifindex; 153 union { 154 struct in_addr addr; 155 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 156 struct in6_addr addr6; 157 #endif 158 }; 159 }; 160 161 enum mptcp_pm_status { 162 MPTCP_PM_ADD_ADDR_RECEIVED, 163 MPTCP_PM_ADD_ADDR_SEND_ACK, 164 MPTCP_PM_RM_ADDR_RECEIVED, 165 MPTCP_PM_ESTABLISHED, 166 MPTCP_PM_SUBFLOW_ESTABLISHED, 167 }; 168 169 enum mptcp_add_addr_status { 170 MPTCP_ADD_ADDR_SIGNAL, 171 MPTCP_ADD_ADDR_ECHO, 172 MPTCP_ADD_ADDR_IPV6, 173 }; 174 175 struct mptcp_pm_data { 176 struct mptcp_addr_info local; 177 struct mptcp_addr_info remote; 178 struct list_head anno_list; 179 180 spinlock_t lock; /*protects the whole PM data */ 181 182 u8 add_addr_signal; 183 bool rm_addr_signal; 184 bool server_side; 185 bool work_pending; 186 bool accept_addr; 187 bool accept_subflow; 188 u8 add_addr_signaled; 189 u8 add_addr_accepted; 190 u8 local_addr_used; 191 u8 subflows; 192 u8 add_addr_signal_max; 193 u8 add_addr_accept_max; 194 u8 local_addr_max; 195 u8 subflows_max; 196 u8 status; 197 u8 rm_id; 198 }; 199 200 struct mptcp_data_frag { 201 struct list_head list; 202 u64 data_seq; 203 u16 data_len; 204 u16 offset; 205 u16 overhead; 206 u16 already_sent; 207 struct page *page; 208 }; 209 210 /* MPTCP connection sock */ 211 struct mptcp_sock { 212 /* inet_connection_sock must be the first member */ 213 struct inet_connection_sock sk; 214 u64 local_key; 215 u64 remote_key; 216 u64 write_seq; 217 u64 snd_nxt; 218 u64 ack_seq; 219 u64 rcv_wnd_sent; 220 u64 rcv_data_fin_seq; 221 struct sock *last_snd; 222 int snd_burst; 223 int old_wspace; 224 atomic64_t snd_una; 225 atomic64_t wnd_end; 226 unsigned long timer_ival; 227 u32 token; 228 int rmem_pending; 229 unsigned long flags; 230 bool can_ack; 231 bool fully_established; 232 bool rcv_data_fin; 233 bool snd_data_fin_enable; 234 bool use_64bit_ack; /* Set when we received a 64-bit DSN */ 235 spinlock_t join_list_lock; 236 struct sock *ack_hint; 237 struct work_struct work; 238 struct sk_buff *ooo_last_skb; 239 struct rb_root out_of_order_queue; 240 struct list_head conn_list; 241 struct list_head rtx_queue; 242 struct mptcp_data_frag *first_pending; 243 struct list_head join_list; 244 struct skb_ext *cached_ext; /* for the next sendmsg */ 245 struct socket *subflow; /* outgoing connect/listener/!mp_capable */ 246 struct sock *first; 247 struct mptcp_pm_data pm; 248 struct { 249 u32 space; /* bytes copied in last measurement window */ 250 u32 copied; /* bytes copied in this measurement window */ 251 u64 time; /* start time of measurement window */ 252 u64 rtt_us; /* last maximum rtt of subflows */ 253 } rcvq_space; 254 }; 255 256 #define mptcp_for_each_subflow(__msk, __subflow) \ 257 list_for_each_entry(__subflow, &((__msk)->conn_list), node) 258 259 static inline struct mptcp_sock *mptcp_sk(const struct sock *sk) 260 { 261 return (struct mptcp_sock *)sk; 262 } 263 264 static inline int __mptcp_space(const struct sock *sk) 265 { 266 return tcp_space(sk) + READ_ONCE(mptcp_sk(sk)->rmem_pending); 267 } 268 269 static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk) 270 { 271 const struct mptcp_sock *msk = mptcp_sk(sk); 272 273 return READ_ONCE(msk->first_pending); 274 } 275 276 static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk) 277 { 278 struct mptcp_sock *msk = mptcp_sk(sk); 279 struct mptcp_data_frag *cur; 280 281 cur = msk->first_pending; 282 return list_is_last(&cur->list, &msk->rtx_queue) ? NULL : 283 list_next_entry(cur, list); 284 } 285 286 static inline struct mptcp_data_frag *mptcp_pending_tail(const struct sock *sk) 287 { 288 struct mptcp_sock *msk = mptcp_sk(sk); 289 290 if (!msk->first_pending) 291 return NULL; 292 293 if (WARN_ON_ONCE(list_empty(&msk->rtx_queue))) 294 return NULL; 295 296 return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list); 297 } 298 299 static inline struct mptcp_data_frag *mptcp_rtx_tail(const struct sock *sk) 300 { 301 struct mptcp_sock *msk = mptcp_sk(sk); 302 303 if (!before64(msk->snd_nxt, atomic64_read(&msk->snd_una))) 304 return NULL; 305 306 return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list); 307 } 308 309 static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk) 310 { 311 struct mptcp_sock *msk = mptcp_sk(sk); 312 313 return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list); 314 } 315 316 struct mptcp_subflow_request_sock { 317 struct tcp_request_sock sk; 318 u16 mp_capable : 1, 319 mp_join : 1, 320 backup : 1; 321 u8 local_id; 322 u8 remote_id; 323 u64 local_key; 324 u64 idsn; 325 u32 token; 326 u32 ssn_offset; 327 u64 thmac; 328 u32 local_nonce; 329 u32 remote_nonce; 330 struct mptcp_sock *msk; 331 struct hlist_nulls_node token_node; 332 }; 333 334 static inline struct mptcp_subflow_request_sock * 335 mptcp_subflow_rsk(const struct request_sock *rsk) 336 { 337 return (struct mptcp_subflow_request_sock *)rsk; 338 } 339 340 enum mptcp_data_avail { 341 MPTCP_SUBFLOW_NODATA, 342 MPTCP_SUBFLOW_DATA_AVAIL, 343 MPTCP_SUBFLOW_OOO_DATA 344 }; 345 346 /* MPTCP subflow context */ 347 struct mptcp_subflow_context { 348 struct list_head node;/* conn_list of subflows */ 349 u64 local_key; 350 u64 remote_key; 351 u64 idsn; 352 u64 map_seq; 353 u32 snd_isn; 354 u32 token; 355 u32 rel_write_seq; 356 u32 map_subflow_seq; 357 u32 ssn_offset; 358 u32 map_data_len; 359 u32 request_mptcp : 1, /* send MP_CAPABLE */ 360 request_join : 1, /* send MP_JOIN */ 361 request_bkup : 1, 362 mp_capable : 1, /* remote is MPTCP capable */ 363 mp_join : 1, /* remote is JOINing */ 364 fully_established : 1, /* path validated */ 365 pm_notified : 1, /* PM hook called for established status */ 366 conn_finished : 1, 367 map_valid : 1, 368 mpc_map : 1, 369 backup : 1, 370 rx_eof : 1, 371 can_ack : 1, /* only after processing the remote a key */ 372 disposable : 1; /* ctx can be free at ulp release time */ 373 enum mptcp_data_avail data_avail; 374 u32 remote_nonce; 375 u64 thmac; 376 u32 local_nonce; 377 u32 remote_token; 378 u8 hmac[MPTCPOPT_HMAC_LEN]; 379 u8 local_id; 380 u8 remote_id; 381 382 struct sock *tcp_sock; /* tcp sk backpointer */ 383 struct sock *conn; /* parent mptcp_sock */ 384 const struct inet_connection_sock_af_ops *icsk_af_ops; 385 void (*tcp_data_ready)(struct sock *sk); 386 void (*tcp_state_change)(struct sock *sk); 387 void (*tcp_write_space)(struct sock *sk); 388 389 struct rcu_head rcu; 390 }; 391 392 static inline struct mptcp_subflow_context * 393 mptcp_subflow_ctx(const struct sock *sk) 394 { 395 struct inet_connection_sock *icsk = inet_csk(sk); 396 397 /* Use RCU on icsk_ulp_data only for sock diag code */ 398 return (__force struct mptcp_subflow_context *)icsk->icsk_ulp_data; 399 } 400 401 static inline struct sock * 402 mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow) 403 { 404 return subflow->tcp_sock; 405 } 406 407 static inline u64 408 mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow) 409 { 410 return tcp_sk(mptcp_subflow_tcp_sock(subflow))->copied_seq - 411 subflow->ssn_offset - 412 subflow->map_subflow_seq; 413 } 414 415 static inline u64 416 mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow) 417 { 418 return subflow->map_seq + mptcp_subflow_get_map_offset(subflow); 419 } 420 421 static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk, 422 struct mptcp_subflow_context *subflow) 423 { 424 sock_hold(mptcp_subflow_tcp_sock(subflow)); 425 spin_lock_bh(&msk->join_list_lock); 426 list_add_tail(&subflow->node, &msk->join_list); 427 spin_unlock_bh(&msk->join_list_lock); 428 } 429 430 int mptcp_is_enabled(struct net *net); 431 unsigned int mptcp_get_add_addr_timeout(struct net *net); 432 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, 433 struct mptcp_options_received *mp_opt); 434 bool mptcp_subflow_data_available(struct sock *sk); 435 void __init mptcp_subflow_init(void); 436 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how); 437 void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, 438 struct mptcp_subflow_context *subflow); 439 void mptcp_subflow_reset(struct sock *ssk); 440 441 /* called with sk socket lock held */ 442 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, 443 const struct mptcp_addr_info *remote); 444 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock); 445 446 static inline void mptcp_subflow_tcp_fallback(struct sock *sk, 447 struct mptcp_subflow_context *ctx) 448 { 449 sk->sk_data_ready = ctx->tcp_data_ready; 450 sk->sk_state_change = ctx->tcp_state_change; 451 sk->sk_write_space = ctx->tcp_write_space; 452 453 inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops; 454 } 455 456 void __init mptcp_proto_init(void); 457 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 458 int __init mptcp_proto_v6_init(void); 459 #endif 460 461 struct sock *mptcp_sk_clone(const struct sock *sk, 462 const struct mptcp_options_received *mp_opt, 463 struct request_sock *req); 464 void mptcp_get_options(const struct sk_buff *skb, 465 struct mptcp_options_received *mp_opt); 466 467 void mptcp_finish_connect(struct sock *sk); 468 static inline bool mptcp_is_fully_established(struct sock *sk) 469 { 470 return inet_sk_state_load(sk) == TCP_ESTABLISHED && 471 READ_ONCE(mptcp_sk(sk)->fully_established); 472 } 473 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk); 474 void mptcp_data_ready(struct sock *sk, struct sock *ssk); 475 bool mptcp_finish_join(struct sock *sk); 476 bool mptcp_schedule_work(struct sock *sk); 477 void mptcp_data_acked(struct sock *sk); 478 void mptcp_subflow_eof(struct sock *sk); 479 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit); 480 void __mptcp_flush_join_list(struct mptcp_sock *msk); 481 static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk) 482 { 483 return READ_ONCE(msk->snd_data_fin_enable) && 484 READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt); 485 } 486 487 void mptcp_destroy_common(struct mptcp_sock *msk); 488 489 void __init mptcp_token_init(void); 490 static inline void mptcp_token_init_request(struct request_sock *req) 491 { 492 mptcp_subflow_rsk(req)->token_node.pprev = NULL; 493 } 494 495 int mptcp_token_new_request(struct request_sock *req); 496 void mptcp_token_destroy_request(struct request_sock *req); 497 int mptcp_token_new_connect(struct sock *sk); 498 void mptcp_token_accept(struct mptcp_subflow_request_sock *r, 499 struct mptcp_sock *msk); 500 bool mptcp_token_exists(u32 token); 501 struct mptcp_sock *mptcp_token_get_sock(u32 token); 502 struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, 503 long *s_num); 504 void mptcp_token_destroy(struct mptcp_sock *msk); 505 506 void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn); 507 508 void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac); 509 510 void __init mptcp_pm_init(void); 511 void mptcp_pm_data_init(struct mptcp_sock *msk); 512 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side); 513 void mptcp_pm_fully_established(struct mptcp_sock *msk); 514 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk); 515 void mptcp_pm_connection_closed(struct mptcp_sock *msk); 516 void mptcp_pm_subflow_established(struct mptcp_sock *msk, 517 struct mptcp_subflow_context *subflow); 518 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id); 519 void mptcp_pm_add_addr_received(struct mptcp_sock *msk, 520 const struct mptcp_addr_info *addr); 521 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk); 522 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id); 523 void mptcp_pm_free_anno_list(struct mptcp_sock *msk); 524 struct mptcp_pm_add_entry * 525 mptcp_pm_del_add_timer(struct mptcp_sock *msk, 526 struct mptcp_addr_info *addr); 527 528 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 529 const struct mptcp_addr_info *addr, 530 bool echo); 531 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id); 532 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id); 533 534 static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk) 535 { 536 return READ_ONCE(msk->pm.add_addr_signal) & BIT(MPTCP_ADD_ADDR_SIGNAL); 537 } 538 539 static inline bool mptcp_pm_should_add_signal_echo(struct mptcp_sock *msk) 540 { 541 return READ_ONCE(msk->pm.add_addr_signal) & BIT(MPTCP_ADD_ADDR_ECHO); 542 } 543 544 static inline bool mptcp_pm_should_add_signal_ipv6(struct mptcp_sock *msk) 545 { 546 return READ_ONCE(msk->pm.add_addr_signal) & BIT(MPTCP_ADD_ADDR_IPV6); 547 } 548 549 static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk) 550 { 551 return READ_ONCE(msk->pm.rm_addr_signal); 552 } 553 554 static inline unsigned int mptcp_add_addr_len(int family, bool echo) 555 { 556 if (family == AF_INET) 557 return echo ? TCPOLEN_MPTCP_ADD_ADDR_BASE 558 : TCPOLEN_MPTCP_ADD_ADDR; 559 return echo ? TCPOLEN_MPTCP_ADD_ADDR6_BASE : TCPOLEN_MPTCP_ADD_ADDR6; 560 } 561 562 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 563 struct mptcp_addr_info *saddr, bool *echo); 564 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 565 u8 *rm_id); 566 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); 567 568 void __init mptcp_pm_nl_init(void); 569 void mptcp_pm_nl_data_init(struct mptcp_sock *msk); 570 void mptcp_pm_nl_fully_established(struct mptcp_sock *msk); 571 void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk); 572 void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk); 573 void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk); 574 void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk); 575 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id); 576 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); 577 578 static inline struct mptcp_ext *mptcp_get_ext(struct sk_buff *skb) 579 { 580 return (struct mptcp_ext *)skb_ext_find(skb, SKB_EXT_MPTCP); 581 } 582 583 void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops); 584 585 static inline bool __mptcp_check_fallback(const struct mptcp_sock *msk) 586 { 587 return test_bit(MPTCP_FALLBACK_DONE, &msk->flags); 588 } 589 590 static inline bool mptcp_check_fallback(const struct sock *sk) 591 { 592 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 593 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 594 595 return __mptcp_check_fallback(msk); 596 } 597 598 static inline void __mptcp_do_fallback(struct mptcp_sock *msk) 599 { 600 if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) { 601 pr_debug("TCP fallback already done (msk=%p)", msk); 602 return; 603 } 604 set_bit(MPTCP_FALLBACK_DONE, &msk->flags); 605 } 606 607 static inline void mptcp_do_fallback(struct sock *sk) 608 { 609 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 610 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 611 612 __mptcp_do_fallback(msk); 613 } 614 615 #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a) 616 617 static inline bool subflow_simultaneous_connect(struct sock *sk) 618 { 619 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 620 struct sock *parent = subflow->conn; 621 622 return sk->sk_state == TCP_ESTABLISHED && 623 !mptcp_sk(parent)->pm.server_side && 624 !subflow->conn_finished; 625 } 626 627 #ifdef CONFIG_SYN_COOKIES 628 void subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req, 629 struct sk_buff *skb); 630 bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req, 631 struct sk_buff *skb); 632 void __init mptcp_join_cookie_init(void); 633 #else 634 static inline void 635 subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req, 636 struct sk_buff *skb) {} 637 static inline bool 638 mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req, 639 struct sk_buff *skb) 640 { 641 return false; 642 } 643 644 static inline void mptcp_join_cookie_init(void) {} 645 #endif 646 647 #endif /* __MPTCP_PROTOCOL_H */ 648