1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #ifndef __MPTCP_PROTOCOL_H 8 #define __MPTCP_PROTOCOL_H 9 10 #include <linux/random.h> 11 #include <net/tcp.h> 12 #include <net/inet_connection_sock.h> 13 #include <uapi/linux/mptcp.h> 14 15 #define MPTCP_SUPPORTED_VERSION 1 16 17 /* MPTCP option bits */ 18 #define OPTION_MPTCP_MPC_SYN BIT(0) 19 #define OPTION_MPTCP_MPC_SYNACK BIT(1) 20 #define OPTION_MPTCP_MPC_ACK BIT(2) 21 #define OPTION_MPTCP_MPJ_SYN BIT(3) 22 #define OPTION_MPTCP_MPJ_SYNACK BIT(4) 23 #define OPTION_MPTCP_MPJ_ACK BIT(5) 24 #define OPTION_MPTCP_ADD_ADDR BIT(6) 25 #define OPTION_MPTCP_ADD_ADDR6 BIT(7) 26 #define OPTION_MPTCP_RM_ADDR BIT(8) 27 #define OPTION_MPTCP_FASTCLOSE BIT(9) 28 #define OPTION_MPTCP_PRIO BIT(10) 29 30 /* MPTCP option subtypes */ 31 #define MPTCPOPT_MP_CAPABLE 0 32 #define MPTCPOPT_MP_JOIN 1 33 #define MPTCPOPT_DSS 2 34 #define MPTCPOPT_ADD_ADDR 3 35 #define MPTCPOPT_RM_ADDR 4 36 #define MPTCPOPT_MP_PRIO 5 37 #define MPTCPOPT_MP_FAIL 6 38 #define MPTCPOPT_MP_FASTCLOSE 7 39 40 /* MPTCP suboption lengths */ 41 #define TCPOLEN_MPTCP_MPC_SYN 4 42 #define TCPOLEN_MPTCP_MPC_SYNACK 12 43 #define TCPOLEN_MPTCP_MPC_ACK 20 44 #define TCPOLEN_MPTCP_MPC_ACK_DATA 22 45 #define TCPOLEN_MPTCP_MPJ_SYN 12 46 #define TCPOLEN_MPTCP_MPJ_SYNACK 16 47 #define TCPOLEN_MPTCP_MPJ_ACK 24 48 #define TCPOLEN_MPTCP_DSS_BASE 4 49 #define TCPOLEN_MPTCP_DSS_ACK32 4 50 #define TCPOLEN_MPTCP_DSS_ACK64 8 51 #define TCPOLEN_MPTCP_DSS_MAP32 10 52 #define TCPOLEN_MPTCP_DSS_MAP64 14 53 #define TCPOLEN_MPTCP_DSS_CHECKSUM 2 54 #define TCPOLEN_MPTCP_ADD_ADDR 16 55 #define TCPOLEN_MPTCP_ADD_ADDR_PORT 20 56 #define TCPOLEN_MPTCP_ADD_ADDR_BASE 8 57 #define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT 12 58 #define TCPOLEN_MPTCP_ADD_ADDR6 28 59 #define TCPOLEN_MPTCP_ADD_ADDR6_PORT 32 60 #define TCPOLEN_MPTCP_ADD_ADDR6_BASE 20 61 #define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT 24 62 #define TCPOLEN_MPTCP_PORT_LEN 4 63 #define TCPOLEN_MPTCP_RM_ADDR_BASE 4 64 #define TCPOLEN_MPTCP_PRIO 3 65 #define TCPOLEN_MPTCP_PRIO_ALIGN 4 66 #define TCPOLEN_MPTCP_FASTCLOSE 12 67 68 /* MPTCP MP_JOIN flags */ 69 #define MPTCPOPT_BACKUP BIT(0) 70 #define MPTCPOPT_HMAC_LEN 20 71 #define MPTCPOPT_THMAC_LEN 8 72 73 /* MPTCP MP_CAPABLE flags */ 74 #define MPTCP_VERSION_MASK (0x0F) 75 #define MPTCP_CAP_CHECKSUM_REQD BIT(7) 76 #define MPTCP_CAP_EXTENSIBILITY BIT(6) 77 #define MPTCP_CAP_HMAC_SHA256 BIT(0) 78 #define MPTCP_CAP_FLAG_MASK (0x3F) 79 80 /* MPTCP DSS flags */ 81 #define MPTCP_DSS_DATA_FIN BIT(4) 82 #define MPTCP_DSS_DSN64 BIT(3) 83 #define MPTCP_DSS_HAS_MAP BIT(2) 84 #define MPTCP_DSS_ACK64 BIT(1) 85 #define MPTCP_DSS_HAS_ACK BIT(0) 86 #define MPTCP_DSS_FLAG_MASK (0x1F) 87 88 /* MPTCP ADD_ADDR flags */ 89 #define MPTCP_ADDR_ECHO BIT(0) 90 #define MPTCP_ADDR_IPVERSION_4 4 91 #define MPTCP_ADDR_IPVERSION_6 6 92 93 /* MPTCP MP_PRIO flags */ 94 #define MPTCP_PRIO_BKUP BIT(0) 95 96 /* MPTCP socket flags */ 97 #define MPTCP_DATA_READY 0 98 #define MPTCP_NOSPACE 1 99 #define MPTCP_WORK_RTX 2 100 #define MPTCP_WORK_EOF 3 101 #define MPTCP_FALLBACK_DONE 4 102 #define MPTCP_WORK_CLOSE_SUBFLOW 5 103 #define MPTCP_PUSH_PENDING 6 104 #define MPTCP_CLEAN_UNA 7 105 #define MPTCP_ERROR_REPORT 8 106 107 static inline bool before64(__u64 seq1, __u64 seq2) 108 { 109 return (__s64)(seq1 - seq2) < 0; 110 } 111 112 #define after64(seq2, seq1) before64(seq1, seq2) 113 114 struct mptcp_options_received { 115 u64 sndr_key; 116 u64 rcvr_key; 117 u64 data_ack; 118 u64 data_seq; 119 u32 subflow_seq; 120 u16 data_len; 121 u16 mp_capable : 1, 122 mp_join : 1, 123 fastclose : 1, 124 dss : 1, 125 add_addr : 1, 126 rm_addr : 1, 127 mp_prio : 1, 128 family : 4, 129 echo : 1, 130 backup : 1; 131 u32 token; 132 u32 nonce; 133 u64 thmac; 134 u8 hmac[MPTCPOPT_HMAC_LEN]; 135 u8 join_id; 136 u8 use_map:1, 137 dsn64:1, 138 data_fin:1, 139 use_ack:1, 140 ack64:1, 141 mpc_map:1, 142 __unused:2; 143 u8 addr_id; 144 u8 rm_id; 145 union { 146 struct in_addr addr; 147 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 148 struct in6_addr addr6; 149 #endif 150 }; 151 u64 ahmac; 152 u16 port; 153 }; 154 155 static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field) 156 { 157 return htonl((TCPOPT_MPTCP << 24) | (len << 16) | (subopt << 12) | 158 ((nib & 0xF) << 8) | field); 159 } 160 161 struct mptcp_addr_info { 162 sa_family_t family; 163 __be16 port; 164 u8 id; 165 u8 flags; 166 int ifindex; 167 union { 168 struct in_addr addr; 169 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 170 struct in6_addr addr6; 171 #endif 172 }; 173 }; 174 175 enum mptcp_pm_status { 176 MPTCP_PM_ADD_ADDR_RECEIVED, 177 MPTCP_PM_ADD_ADDR_SEND_ACK, 178 MPTCP_PM_RM_ADDR_RECEIVED, 179 MPTCP_PM_ESTABLISHED, 180 MPTCP_PM_ALREADY_ESTABLISHED, /* persistent status, set after ESTABLISHED event */ 181 MPTCP_PM_SUBFLOW_ESTABLISHED, 182 }; 183 184 enum mptcp_addr_signal_status { 185 MPTCP_ADD_ADDR_SIGNAL, 186 MPTCP_ADD_ADDR_ECHO, 187 MPTCP_ADD_ADDR_IPV6, 188 MPTCP_ADD_ADDR_PORT, 189 MPTCP_RM_ADDR_SIGNAL, 190 }; 191 192 struct mptcp_pm_data { 193 struct mptcp_addr_info local; 194 struct mptcp_addr_info remote; 195 struct list_head anno_list; 196 197 spinlock_t lock; /*protects the whole PM data */ 198 199 u8 addr_signal; 200 bool server_side; 201 bool work_pending; 202 bool accept_addr; 203 bool accept_subflow; 204 u8 add_addr_signaled; 205 u8 add_addr_accepted; 206 u8 local_addr_used; 207 u8 subflows; 208 u8 status; 209 u8 rm_id; 210 }; 211 212 struct mptcp_data_frag { 213 struct list_head list; 214 u64 data_seq; 215 u16 data_len; 216 u16 offset; 217 u16 overhead; 218 u16 already_sent; 219 struct page *page; 220 }; 221 222 /* MPTCP connection sock */ 223 struct mptcp_sock { 224 /* inet_connection_sock must be the first member */ 225 struct inet_connection_sock sk; 226 u64 local_key; 227 u64 remote_key; 228 u64 write_seq; 229 u64 snd_nxt; 230 u64 ack_seq; 231 u64 rcv_wnd_sent; 232 u64 rcv_data_fin_seq; 233 int wmem_reserved; 234 struct sock *last_snd; 235 int snd_burst; 236 int old_wspace; 237 u64 snd_una; 238 u64 wnd_end; 239 unsigned long timer_ival; 240 u32 token; 241 int rmem_released; 242 unsigned long flags; 243 bool can_ack; 244 bool fully_established; 245 bool rcv_data_fin; 246 bool snd_data_fin_enable; 247 bool rcv_fastclose; 248 bool use_64bit_ack; /* Set when we received a 64-bit DSN */ 249 spinlock_t join_list_lock; 250 struct sock *ack_hint; 251 struct work_struct work; 252 struct sk_buff *ooo_last_skb; 253 struct rb_root out_of_order_queue; 254 struct sk_buff_head receive_queue; 255 struct sk_buff_head skb_tx_cache; /* this is wmem accounted */ 256 int tx_pending_data; 257 int size_goal_cache; 258 struct list_head conn_list; 259 struct list_head rtx_queue; 260 struct mptcp_data_frag *first_pending; 261 struct list_head join_list; 262 struct socket *subflow; /* outgoing connect/listener/!mp_capable */ 263 struct sock *first; 264 struct mptcp_pm_data pm; 265 struct { 266 u32 space; /* bytes copied in last measurement window */ 267 u32 copied; /* bytes copied in this measurement window */ 268 u64 time; /* start time of measurement window */ 269 u64 rtt_us; /* last maximum rtt of subflows */ 270 } rcvq_space; 271 }; 272 273 #define mptcp_lock_sock(___sk, cb) do { \ 274 struct sock *__sk = (___sk); /* silence macro reuse warning */ \ 275 might_sleep(); \ 276 spin_lock_bh(&__sk->sk_lock.slock); \ 277 if (__sk->sk_lock.owned) \ 278 __lock_sock(__sk); \ 279 cb; \ 280 __sk->sk_lock.owned = 1; \ 281 spin_unlock(&__sk->sk_lock.slock); \ 282 mutex_acquire(&__sk->sk_lock.dep_map, 0, 0, _RET_IP_); \ 283 local_bh_enable(); \ 284 } while (0) 285 286 #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock) 287 #define mptcp_data_unlock(sk) spin_unlock_bh(&(sk)->sk_lock.slock) 288 289 #define mptcp_for_each_subflow(__msk, __subflow) \ 290 list_for_each_entry(__subflow, &((__msk)->conn_list), node) 291 292 static inline void msk_owned_by_me(const struct mptcp_sock *msk) 293 { 294 sock_owned_by_me((const struct sock *)msk); 295 } 296 297 static inline struct mptcp_sock *mptcp_sk(const struct sock *sk) 298 { 299 return (struct mptcp_sock *)sk; 300 } 301 302 static inline int __mptcp_space(const struct sock *sk) 303 { 304 return tcp_space(sk) + READ_ONCE(mptcp_sk(sk)->rmem_released); 305 } 306 307 static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk) 308 { 309 const struct mptcp_sock *msk = mptcp_sk(sk); 310 311 return READ_ONCE(msk->first_pending); 312 } 313 314 static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk) 315 { 316 struct mptcp_sock *msk = mptcp_sk(sk); 317 struct mptcp_data_frag *cur; 318 319 cur = msk->first_pending; 320 return list_is_last(&cur->list, &msk->rtx_queue) ? NULL : 321 list_next_entry(cur, list); 322 } 323 324 static inline struct mptcp_data_frag *mptcp_pending_tail(const struct sock *sk) 325 { 326 struct mptcp_sock *msk = mptcp_sk(sk); 327 328 if (!msk->first_pending) 329 return NULL; 330 331 if (WARN_ON_ONCE(list_empty(&msk->rtx_queue))) 332 return NULL; 333 334 return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list); 335 } 336 337 static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk) 338 { 339 struct mptcp_sock *msk = mptcp_sk(sk); 340 341 if (msk->snd_una == READ_ONCE(msk->snd_nxt)) 342 return NULL; 343 344 return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list); 345 } 346 347 struct mptcp_subflow_request_sock { 348 struct tcp_request_sock sk; 349 u16 mp_capable : 1, 350 mp_join : 1, 351 backup : 1; 352 u8 local_id; 353 u8 remote_id; 354 u64 local_key; 355 u64 idsn; 356 u32 token; 357 u32 ssn_offset; 358 u64 thmac; 359 u32 local_nonce; 360 u32 remote_nonce; 361 struct mptcp_sock *msk; 362 struct hlist_nulls_node token_node; 363 }; 364 365 static inline struct mptcp_subflow_request_sock * 366 mptcp_subflow_rsk(const struct request_sock *rsk) 367 { 368 return (struct mptcp_subflow_request_sock *)rsk; 369 } 370 371 enum mptcp_data_avail { 372 MPTCP_SUBFLOW_NODATA, 373 MPTCP_SUBFLOW_DATA_AVAIL, 374 MPTCP_SUBFLOW_OOO_DATA 375 }; 376 377 struct mptcp_delegated_action { 378 struct napi_struct napi; 379 struct list_head head; 380 }; 381 382 DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions); 383 384 #define MPTCP_DELEGATE_SEND 0 385 386 /* MPTCP subflow context */ 387 struct mptcp_subflow_context { 388 struct list_head node;/* conn_list of subflows */ 389 u64 local_key; 390 u64 remote_key; 391 u64 idsn; 392 u64 map_seq; 393 u32 snd_isn; 394 u32 token; 395 u32 rel_write_seq; 396 u32 map_subflow_seq; 397 u32 ssn_offset; 398 u32 map_data_len; 399 u32 request_mptcp : 1, /* send MP_CAPABLE */ 400 request_join : 1, /* send MP_JOIN */ 401 request_bkup : 1, 402 mp_capable : 1, /* remote is MPTCP capable */ 403 mp_join : 1, /* remote is JOINing */ 404 fully_established : 1, /* path validated */ 405 pm_notified : 1, /* PM hook called for established status */ 406 conn_finished : 1, 407 map_valid : 1, 408 mpc_map : 1, 409 backup : 1, 410 send_mp_prio : 1, 411 rx_eof : 1, 412 can_ack : 1, /* only after processing the remote a key */ 413 disposable : 1; /* ctx can be free at ulp release time */ 414 enum mptcp_data_avail data_avail; 415 u32 remote_nonce; 416 u64 thmac; 417 u32 local_nonce; 418 u32 remote_token; 419 u8 hmac[MPTCPOPT_HMAC_LEN]; 420 u8 local_id; 421 u8 remote_id; 422 423 long delegated_status; 424 struct list_head delegated_node; /* link into delegated_action, protected by local BH */ 425 426 struct sock *tcp_sock; /* tcp sk backpointer */ 427 struct sock *conn; /* parent mptcp_sock */ 428 const struct inet_connection_sock_af_ops *icsk_af_ops; 429 void (*tcp_data_ready)(struct sock *sk); 430 void (*tcp_state_change)(struct sock *sk); 431 void (*tcp_write_space)(struct sock *sk); 432 void (*tcp_error_report)(struct sock *sk); 433 434 struct rcu_head rcu; 435 }; 436 437 static inline struct mptcp_subflow_context * 438 mptcp_subflow_ctx(const struct sock *sk) 439 { 440 struct inet_connection_sock *icsk = inet_csk(sk); 441 442 /* Use RCU on icsk_ulp_data only for sock diag code */ 443 return (__force struct mptcp_subflow_context *)icsk->icsk_ulp_data; 444 } 445 446 static inline struct sock * 447 mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow) 448 { 449 return subflow->tcp_sock; 450 } 451 452 static inline u64 453 mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow) 454 { 455 return tcp_sk(mptcp_subflow_tcp_sock(subflow))->copied_seq - 456 subflow->ssn_offset - 457 subflow->map_subflow_seq; 458 } 459 460 static inline u64 461 mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow) 462 { 463 return subflow->map_seq + mptcp_subflow_get_map_offset(subflow); 464 } 465 466 static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk, 467 struct mptcp_subflow_context *subflow) 468 { 469 sock_hold(mptcp_subflow_tcp_sock(subflow)); 470 spin_lock_bh(&msk->join_list_lock); 471 list_add_tail(&subflow->node, &msk->join_list); 472 spin_unlock_bh(&msk->join_list_lock); 473 } 474 475 void mptcp_subflow_process_delegated(struct sock *ssk); 476 477 static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow) 478 { 479 struct mptcp_delegated_action *delegated; 480 bool schedule; 481 482 /* The implied barrier pairs with mptcp_subflow_delegated_done(), and 483 * ensures the below list check sees list updates done prior to status 484 * bit changes 485 */ 486 if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) { 487 /* still on delegated list from previous scheduling */ 488 if (!list_empty(&subflow->delegated_node)) 489 return; 490 491 /* the caller held the subflow bh socket lock */ 492 lockdep_assert_in_softirq(); 493 494 delegated = this_cpu_ptr(&mptcp_delegated_actions); 495 schedule = list_empty(&delegated->head); 496 list_add_tail(&subflow->delegated_node, &delegated->head); 497 sock_hold(mptcp_subflow_tcp_sock(subflow)); 498 if (schedule) 499 napi_schedule(&delegated->napi); 500 } 501 } 502 503 static inline struct mptcp_subflow_context * 504 mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated) 505 { 506 struct mptcp_subflow_context *ret; 507 508 if (list_empty(&delegated->head)) 509 return NULL; 510 511 ret = list_first_entry(&delegated->head, struct mptcp_subflow_context, delegated_node); 512 list_del_init(&ret->delegated_node); 513 return ret; 514 } 515 516 static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow) 517 { 518 return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status); 519 } 520 521 static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow) 522 { 523 /* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before 524 * touching the status bit 525 */ 526 smp_wmb(); 527 clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status); 528 } 529 530 int mptcp_is_enabled(struct net *net); 531 unsigned int mptcp_get_add_addr_timeout(struct net *net); 532 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, 533 struct mptcp_options_received *mp_opt); 534 bool mptcp_subflow_data_available(struct sock *sk); 535 void __init mptcp_subflow_init(void); 536 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how); 537 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, 538 struct mptcp_subflow_context *subflow); 539 void mptcp_subflow_reset(struct sock *ssk); 540 void mptcp_sock_graft(struct sock *sk, struct socket *parent); 541 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk); 542 543 /* called with sk socket lock held */ 544 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, 545 const struct mptcp_addr_info *remote); 546 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock); 547 void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 548 struct sockaddr_storage *addr, 549 unsigned short family); 550 551 static inline void mptcp_subflow_tcp_fallback(struct sock *sk, 552 struct mptcp_subflow_context *ctx) 553 { 554 sk->sk_data_ready = ctx->tcp_data_ready; 555 sk->sk_state_change = ctx->tcp_state_change; 556 sk->sk_write_space = ctx->tcp_write_space; 557 sk->sk_error_report = ctx->tcp_error_report; 558 559 inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops; 560 } 561 562 void __init mptcp_proto_init(void); 563 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 564 int __init mptcp_proto_v6_init(void); 565 #endif 566 567 struct sock *mptcp_sk_clone(const struct sock *sk, 568 const struct mptcp_options_received *mp_opt, 569 struct request_sock *req); 570 void mptcp_get_options(const struct sk_buff *skb, 571 struct mptcp_options_received *mp_opt); 572 573 void mptcp_finish_connect(struct sock *sk); 574 static inline bool mptcp_is_fully_established(struct sock *sk) 575 { 576 return inet_sk_state_load(sk) == TCP_ESTABLISHED && 577 READ_ONCE(mptcp_sk(sk)->fully_established); 578 } 579 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk); 580 void mptcp_data_ready(struct sock *sk, struct sock *ssk); 581 bool mptcp_finish_join(struct sock *sk); 582 bool mptcp_schedule_work(struct sock *sk); 583 void __mptcp_check_push(struct sock *sk, struct sock *ssk); 584 void __mptcp_data_acked(struct sock *sk); 585 void __mptcp_error_report(struct sock *sk); 586 void mptcp_subflow_eof(struct sock *sk); 587 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit); 588 void __mptcp_flush_join_list(struct mptcp_sock *msk); 589 static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk) 590 { 591 return READ_ONCE(msk->snd_data_fin_enable) && 592 READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt); 593 } 594 595 static inline bool mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk) 596 { 597 if ((sk->sk_userlocks & SOCK_SNDBUF_LOCK) || ssk->sk_sndbuf <= READ_ONCE(sk->sk_sndbuf)) 598 return false; 599 600 WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf); 601 return true; 602 } 603 604 static inline void mptcp_write_space(struct sock *sk) 605 { 606 if (sk_stream_is_writeable(sk)) { 607 /* pairs with memory barrier in mptcp_poll */ 608 smp_mb(); 609 if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags)) 610 sk_stream_write_space(sk); 611 } 612 } 613 614 void mptcp_destroy_common(struct mptcp_sock *msk); 615 616 void __init mptcp_token_init(void); 617 static inline void mptcp_token_init_request(struct request_sock *req) 618 { 619 mptcp_subflow_rsk(req)->token_node.pprev = NULL; 620 } 621 622 int mptcp_token_new_request(struct request_sock *req); 623 void mptcp_token_destroy_request(struct request_sock *req); 624 int mptcp_token_new_connect(struct sock *sk); 625 void mptcp_token_accept(struct mptcp_subflow_request_sock *r, 626 struct mptcp_sock *msk); 627 bool mptcp_token_exists(u32 token); 628 struct mptcp_sock *mptcp_token_get_sock(u32 token); 629 struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, 630 long *s_num); 631 void mptcp_token_destroy(struct mptcp_sock *msk); 632 633 void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn); 634 635 void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac); 636 637 void __init mptcp_pm_init(void); 638 void mptcp_pm_data_init(struct mptcp_sock *msk); 639 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side); 640 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp); 641 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk); 642 void mptcp_pm_connection_closed(struct mptcp_sock *msk); 643 void mptcp_pm_subflow_established(struct mptcp_sock *msk, 644 struct mptcp_subflow_context *subflow); 645 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id); 646 void mptcp_pm_add_addr_received(struct mptcp_sock *msk, 647 const struct mptcp_addr_info *addr); 648 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk); 649 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id); 650 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup); 651 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, 652 struct mptcp_addr_info *addr, 653 u8 bkup); 654 void mptcp_pm_free_anno_list(struct mptcp_sock *msk); 655 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk); 656 struct mptcp_pm_add_entry * 657 mptcp_pm_del_add_timer(struct mptcp_sock *msk, 658 struct mptcp_addr_info *addr); 659 660 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 661 const struct mptcp_addr_info *addr, 662 bool echo, bool port); 663 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id); 664 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id); 665 666 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, 667 const struct sock *ssk, gfp_t gfp); 668 void mptcp_event_addr_announced(const struct mptcp_sock *msk, const struct mptcp_addr_info *info); 669 void mptcp_event_addr_removed(const struct mptcp_sock *msk, u8 id); 670 671 static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk) 672 { 673 return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_SIGNAL); 674 } 675 676 static inline bool mptcp_pm_should_add_signal_echo(struct mptcp_sock *msk) 677 { 678 return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_ECHO); 679 } 680 681 static inline bool mptcp_pm_should_add_signal_ipv6(struct mptcp_sock *msk) 682 { 683 return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_IPV6); 684 } 685 686 static inline bool mptcp_pm_should_add_signal_port(struct mptcp_sock *msk) 687 { 688 return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_PORT); 689 } 690 691 static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk) 692 { 693 return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_RM_ADDR_SIGNAL); 694 } 695 696 static inline unsigned int mptcp_add_addr_len(int family, bool echo, bool port) 697 { 698 u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE; 699 700 if (family == AF_INET6) 701 len = TCPOLEN_MPTCP_ADD_ADDR6_BASE; 702 if (!echo) 703 len += MPTCPOPT_THMAC_LEN; 704 if (port) 705 len += TCPOLEN_MPTCP_PORT_LEN; 706 707 return len; 708 } 709 710 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 711 struct mptcp_addr_info *saddr, bool *echo, bool *port); 712 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 713 u8 *rm_id); 714 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); 715 716 void __init mptcp_pm_nl_init(void); 717 void mptcp_pm_nl_data_init(struct mptcp_sock *msk); 718 void mptcp_pm_nl_work(struct mptcp_sock *msk); 719 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id); 720 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); 721 unsigned int mptcp_pm_get_add_addr_signal_max(struct mptcp_sock *msk); 722 unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk); 723 unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk); 724 unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk); 725 726 static inline struct mptcp_ext *mptcp_get_ext(struct sk_buff *skb) 727 { 728 return (struct mptcp_ext *)skb_ext_find(skb, SKB_EXT_MPTCP); 729 } 730 731 void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops); 732 733 static inline bool __mptcp_check_fallback(const struct mptcp_sock *msk) 734 { 735 return test_bit(MPTCP_FALLBACK_DONE, &msk->flags); 736 } 737 738 static inline bool mptcp_check_fallback(const struct sock *sk) 739 { 740 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 741 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 742 743 return __mptcp_check_fallback(msk); 744 } 745 746 static inline void __mptcp_do_fallback(struct mptcp_sock *msk) 747 { 748 if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) { 749 pr_debug("TCP fallback already done (msk=%p)", msk); 750 return; 751 } 752 set_bit(MPTCP_FALLBACK_DONE, &msk->flags); 753 } 754 755 static inline void mptcp_do_fallback(struct sock *sk) 756 { 757 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 758 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 759 760 __mptcp_do_fallback(msk); 761 } 762 763 #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a) 764 765 static inline bool subflow_simultaneous_connect(struct sock *sk) 766 { 767 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 768 struct sock *parent = subflow->conn; 769 770 return sk->sk_state == TCP_ESTABLISHED && 771 !mptcp_sk(parent)->pm.server_side && 772 !subflow->conn_finished; 773 } 774 775 #ifdef CONFIG_SYN_COOKIES 776 void subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req, 777 struct sk_buff *skb); 778 bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req, 779 struct sk_buff *skb); 780 void __init mptcp_join_cookie_init(void); 781 #else 782 static inline void 783 subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req, 784 struct sk_buff *skb) {} 785 static inline bool 786 mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req, 787 struct sk_buff *skb) 788 { 789 return false; 790 } 791 792 static inline void mptcp_join_cookie_init(void) {} 793 #endif 794 795 #endif /* __MPTCP_PROTOCOL_H */ 796