1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the TCP module. 8 * 9 * Version: @(#)tcp.h 1.0.5 05/23/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 */ 14 #ifndef _TCP_H 15 #define _TCP_H 16 17 #define FASTRETRANS_DEBUG 1 18 19 #include <linux/list.h> 20 #include <linux/tcp.h> 21 #include <linux/bug.h> 22 #include <linux/slab.h> 23 #include <linux/cache.h> 24 #include <linux/percpu.h> 25 #include <linux/skbuff.h> 26 #include <linux/cryptohash.h> 27 #include <linux/kref.h> 28 #include <linux/ktime.h> 29 30 #include <net/inet_connection_sock.h> 31 #include <net/inet_timewait_sock.h> 32 #include <net/inet_hashtables.h> 33 #include <net/checksum.h> 34 #include <net/request_sock.h> 35 #include <net/sock_reuseport.h> 36 #include <net/sock.h> 37 #include <net/snmp.h> 38 #include <net/ip.h> 39 #include <net/tcp_states.h> 40 #include <net/inet_ecn.h> 41 #include <net/dst.h> 42 43 #include <linux/seq_file.h> 44 #include <linux/memcontrol.h> 45 #include <linux/bpf-cgroup.h> 46 #include <linux/siphash.h> 47 48 extern struct inet_hashinfo tcp_hashinfo; 49 50 extern struct percpu_counter tcp_orphan_count; 51 void tcp_time_wait(struct sock *sk, int state, int timeo); 52 53 #define MAX_TCP_HEADER (128 + MAX_HEADER) 54 #define MAX_TCP_OPTION_SPACE 40 55 #define TCP_MIN_SND_MSS 48 56 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) 57 58 /* 59 * Never offer a window over 32767 without using window scaling. Some 60 * poor stacks do signed 16bit maths! 61 */ 62 #define MAX_TCP_WINDOW 32767U 63 64 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ 65 #define TCP_MIN_MSS 88U 66 67 /* The initial MTU to use for probing */ 68 #define TCP_BASE_MSS 1024 69 70 /* probing interval, default to 10 minutes as per RFC4821 */ 71 #define TCP_PROBE_INTERVAL 600 72 73 /* Specify interval when tcp mtu probing will stop */ 74 #define TCP_PROBE_THRESHOLD 8 75 76 /* After receiving this amount of duplicate ACKs fast retransmit starts. */ 77 #define TCP_FASTRETRANS_THRESH 3 78 79 /* Maximal number of ACKs sent quickly to accelerate slow-start. */ 80 #define TCP_MAX_QUICKACKS 16U 81 82 /* Maximal number of window scale according to RFC1323 */ 83 #define TCP_MAX_WSCALE 14U 84 85 /* urg_data states */ 86 #define TCP_URG_VALID 0x0100 87 #define TCP_URG_NOTYET 0x0200 88 #define TCP_URG_READ 0x0400 89 90 #define TCP_RETR1 3 /* 91 * This is how many retries it does before it 92 * tries to figure out if the gateway is 93 * down. Minimal RFC value is 3; it corresponds 94 * to ~3sec-8min depending on RTO. 95 */ 96 97 #define TCP_RETR2 15 /* 98 * This should take at least 99 * 90 minutes to time out. 100 * RFC1122 says that the limit is 100 sec. 101 * 15 is ~13-30min depending on RTO. 102 */ 103 104 #define TCP_SYN_RETRIES 6 /* This is how many retries are done 105 * when active opening a connection. 106 * RFC1122 says the minimum retry MUST 107 * be at least 180secs. Nevertheless 108 * this value is corresponding to 109 * 63secs of retransmission with the 110 * current initial RTO. 111 */ 112 113 #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done 114 * when passive opening a connection. 115 * This is corresponding to 31secs of 116 * retransmission with the current 117 * initial RTO. 118 */ 119 120 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 121 * state, about 60 seconds */ 122 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 123 /* BSD style FIN_WAIT2 deadlock breaker. 124 * It used to be 3min, new value is 60sec, 125 * to combine FIN-WAIT-2 timeout with 126 * TIME-WAIT timer. 127 */ 128 129 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ 130 #if HZ >= 100 131 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ 132 #define TCP_ATO_MIN ((unsigned)(HZ/25)) 133 #else 134 #define TCP_DELACK_MIN 4U 135 #define TCP_ATO_MIN 4U 136 #endif 137 #define TCP_RTO_MAX ((unsigned)(120*HZ)) 138 #define TCP_RTO_MIN ((unsigned)(HZ/5)) 139 #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ 140 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ 141 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now 142 * used as a fallback RTO for the 143 * initial data transmission if no 144 * valid RTT sample has been acquired, 145 * most likely due to retrans in 3WHS. 146 */ 147 148 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes 149 * for local resources. 150 */ 151 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ 152 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ 153 #define TCP_KEEPALIVE_INTVL (75*HZ) 154 155 #define MAX_TCP_KEEPIDLE 32767 156 #define MAX_TCP_KEEPINTVL 32767 157 #define MAX_TCP_KEEPCNT 127 158 #define MAX_TCP_SYNCNT 127 159 160 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ 161 162 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) 163 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated 164 * after this time. It should be equal 165 * (or greater than) TCP_TIMEWAIT_LEN 166 * to provide reliability equal to one 167 * provided by timewait state. 168 */ 169 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host 170 * timestamps. It must be less than 171 * minimal timewait lifetime. 172 */ 173 /* 174 * TCP option 175 */ 176 177 #define TCPOPT_NOP 1 /* Padding */ 178 #define TCPOPT_EOL 0 /* End of options */ 179 #define TCPOPT_MSS 2 /* Segment size negotiating */ 180 #define TCPOPT_WINDOW 3 /* Window scaling */ 181 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ 182 #define TCPOPT_SACK 5 /* SACK Block */ 183 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ 184 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ 185 #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ 186 #define TCPOPT_EXP 254 /* Experimental */ 187 /* Magic number to be after the option value for sharing TCP 188 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt 189 */ 190 #define TCPOPT_FASTOPEN_MAGIC 0xF989 191 #define TCPOPT_SMC_MAGIC 0xE2D4C3D9 192 193 /* 194 * TCP option lengths 195 */ 196 197 #define TCPOLEN_MSS 4 198 #define TCPOLEN_WINDOW 3 199 #define TCPOLEN_SACK_PERM 2 200 #define TCPOLEN_TIMESTAMP 10 201 #define TCPOLEN_MD5SIG 18 202 #define TCPOLEN_FASTOPEN_BASE 2 203 #define TCPOLEN_EXP_FASTOPEN_BASE 4 204 #define TCPOLEN_EXP_SMC_BASE 6 205 206 /* But this is what stacks really send out. */ 207 #define TCPOLEN_TSTAMP_ALIGNED 12 208 #define TCPOLEN_WSCALE_ALIGNED 4 209 #define TCPOLEN_SACKPERM_ALIGNED 4 210 #define TCPOLEN_SACK_BASE 2 211 #define TCPOLEN_SACK_BASE_ALIGNED 4 212 #define TCPOLEN_SACK_PERBLOCK 8 213 #define TCPOLEN_MD5SIG_ALIGNED 20 214 #define TCPOLEN_MSS_ALIGNED 4 215 #define TCPOLEN_EXP_SMC_BASE_ALIGNED 8 216 217 /* Flags in tp->nonagle */ 218 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 219 #define TCP_NAGLE_CORK 2 /* Socket is corked */ 220 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ 221 222 /* TCP thin-stream limits */ 223 #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ 224 225 /* TCP initial congestion window as per rfc6928 */ 226 #define TCP_INIT_CWND 10 227 228 /* Bit Flags for sysctl_tcp_fastopen */ 229 #define TFO_CLIENT_ENABLE 1 230 #define TFO_SERVER_ENABLE 2 231 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 232 233 /* Accept SYN data w/o any cookie option */ 234 #define TFO_SERVER_COOKIE_NOT_REQD 0x200 235 236 /* Force enable TFO on all listeners, i.e., not requiring the 237 * TCP_FASTOPEN socket option. 238 */ 239 #define TFO_SERVER_WO_SOCKOPT1 0x400 240 241 242 /* sysctl variables for tcp */ 243 extern int sysctl_tcp_max_orphans; 244 extern long sysctl_tcp_mem[3]; 245 246 #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */ 247 #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */ 248 #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */ 249 250 extern atomic_long_t tcp_memory_allocated; 251 extern struct percpu_counter tcp_sockets_allocated; 252 extern unsigned long tcp_memory_pressure; 253 254 /* optimized version of sk_under_memory_pressure() for TCP sockets */ 255 static inline bool tcp_under_memory_pressure(const struct sock *sk) 256 { 257 if (mem_cgroup_sockets_enabled && sk->sk_memcg && 258 mem_cgroup_under_socket_pressure(sk->sk_memcg)) 259 return true; 260 261 return tcp_memory_pressure; 262 } 263 /* 264 * The next routines deal with comparing 32 bit unsigned ints 265 * and worry about wraparound (automatic with unsigned arithmetic). 266 */ 267 268 static inline bool before(__u32 seq1, __u32 seq2) 269 { 270 return (__s32)(seq1-seq2) < 0; 271 } 272 #define after(seq2, seq1) before(seq1, seq2) 273 274 /* is s2<=s1<=s3 ? */ 275 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) 276 { 277 return seq3 - seq2 >= seq1 - seq2; 278 } 279 280 static inline bool tcp_out_of_memory(struct sock *sk) 281 { 282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 283 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) 284 return true; 285 return false; 286 } 287 288 void sk_forced_mem_schedule(struct sock *sk, int size); 289 290 static inline bool tcp_too_many_orphans(struct sock *sk, int shift) 291 { 292 struct percpu_counter *ocp = sk->sk_prot->orphan_count; 293 int orphans = percpu_counter_read_positive(ocp); 294 295 if (orphans << shift > sysctl_tcp_max_orphans) { 296 orphans = percpu_counter_sum_positive(ocp); 297 if (orphans << shift > sysctl_tcp_max_orphans) 298 return true; 299 } 300 return false; 301 } 302 303 bool tcp_check_oom(struct sock *sk, int shift); 304 305 306 extern struct proto tcp_prot; 307 308 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) 309 #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field) 310 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) 311 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) 312 313 void tcp_tasklet_init(void); 314 315 int tcp_v4_err(struct sk_buff *skb, u32); 316 317 void tcp_shutdown(struct sock *sk, int how); 318 319 int tcp_v4_early_demux(struct sk_buff *skb); 320 int tcp_v4_rcv(struct sk_buff *skb); 321 322 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); 323 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 324 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); 325 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 326 int flags); 327 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, 328 size_t size, int flags); 329 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 330 size_t size, int flags); 331 void tcp_release_cb(struct sock *sk); 332 void tcp_wfree(struct sk_buff *skb); 333 void tcp_write_timer_handler(struct sock *sk); 334 void tcp_delack_timer_handler(struct sock *sk); 335 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); 336 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); 337 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); 338 void tcp_rcv_space_adjust(struct sock *sk); 339 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 340 void tcp_twsk_destructor(struct sock *sk); 341 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, 342 struct pipe_inode_info *pipe, size_t len, 343 unsigned int flags); 344 345 void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); 346 static inline void tcp_dec_quickack_mode(struct sock *sk, 347 const unsigned int pkts) 348 { 349 struct inet_connection_sock *icsk = inet_csk(sk); 350 351 if (icsk->icsk_ack.quick) { 352 if (pkts >= icsk->icsk_ack.quick) { 353 icsk->icsk_ack.quick = 0; 354 /* Leaving quickack mode we deflate ATO. */ 355 icsk->icsk_ack.ato = TCP_ATO_MIN; 356 } else 357 icsk->icsk_ack.quick -= pkts; 358 } 359 } 360 361 #define TCP_ECN_OK 1 362 #define TCP_ECN_QUEUE_CWR 2 363 #define TCP_ECN_DEMAND_CWR 4 364 #define TCP_ECN_SEEN 8 365 366 enum tcp_tw_status { 367 TCP_TW_SUCCESS = 0, 368 TCP_TW_RST = 1, 369 TCP_TW_ACK = 2, 370 TCP_TW_SYN = 3 371 }; 372 373 374 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, 375 struct sk_buff *skb, 376 const struct tcphdr *th); 377 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 378 struct request_sock *req, bool fastopen, 379 bool *lost_race); 380 int tcp_child_process(struct sock *parent, struct sock *child, 381 struct sk_buff *skb); 382 void tcp_enter_loss(struct sock *sk); 383 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag); 384 void tcp_clear_retrans(struct tcp_sock *tp); 385 void tcp_update_metrics(struct sock *sk); 386 void tcp_init_metrics(struct sock *sk); 387 void tcp_metrics_init(void); 388 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); 389 void tcp_close(struct sock *sk, long timeout); 390 void tcp_init_sock(struct sock *sk); 391 void tcp_init_transfer(struct sock *sk, int bpf_op); 392 __poll_t tcp_poll(struct file *file, struct socket *sock, 393 struct poll_table_struct *wait); 394 int tcp_getsockopt(struct sock *sk, int level, int optname, 395 char __user *optval, int __user *optlen); 396 int tcp_setsockopt(struct sock *sk, int level, int optname, 397 char __user *optval, unsigned int optlen); 398 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 399 char __user *optval, int __user *optlen); 400 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 401 char __user *optval, unsigned int optlen); 402 void tcp_set_keepalive(struct sock *sk, int val); 403 void tcp_syn_ack_timeout(const struct request_sock *req); 404 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, 405 int flags, int *addr_len); 406 int tcp_set_rcvlowat(struct sock *sk, int val); 407 void tcp_data_ready(struct sock *sk); 408 #ifdef CONFIG_MMU 409 int tcp_mmap(struct file *file, struct socket *sock, 410 struct vm_area_struct *vma); 411 #endif 412 void tcp_parse_options(const struct net *net, const struct sk_buff *skb, 413 struct tcp_options_received *opt_rx, 414 int estab, struct tcp_fastopen_cookie *foc); 415 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); 416 417 /* 418 * BPF SKB-less helpers 419 */ 420 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph, 421 struct tcphdr *th, u32 *cookie); 422 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, 423 struct tcphdr *th, u32 *cookie); 424 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, 425 const struct tcp_request_sock_ops *af_ops, 426 struct sock *sk, struct tcphdr *th); 427 /* 428 * TCP v4 functions exported for the inet6 API 429 */ 430 431 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); 432 void tcp_v4_mtu_reduced(struct sock *sk); 433 void tcp_req_err(struct sock *sk, u32 seq, bool abort); 434 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 435 struct sock *tcp_create_openreq_child(const struct sock *sk, 436 struct request_sock *req, 437 struct sk_buff *skb); 438 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); 439 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 440 struct request_sock *req, 441 struct dst_entry *dst, 442 struct request_sock *req_unhash, 443 bool *own_req); 444 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 445 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 446 int tcp_connect(struct sock *sk); 447 enum tcp_synack_type { 448 TCP_SYNACK_NORMAL, 449 TCP_SYNACK_FASTOPEN, 450 TCP_SYNACK_COOKIE, 451 }; 452 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 453 struct request_sock *req, 454 struct tcp_fastopen_cookie *foc, 455 enum tcp_synack_type synack_type); 456 int tcp_disconnect(struct sock *sk, int flags); 457 458 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); 459 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); 460 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); 461 462 /* From syncookies.c */ 463 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, 464 struct request_sock *req, 465 struct dst_entry *dst, u32 tsoff); 466 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, 467 u32 cookie); 468 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); 469 #ifdef CONFIG_SYN_COOKIES 470 471 /* Syncookies use a monotonic timer which increments every 60 seconds. 472 * This counter is used both as a hash input and partially encoded into 473 * the cookie value. A cookie is only validated further if the delta 474 * between the current counter value and the encoded one is less than this, 475 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if 476 * the counter advances immediately after a cookie is generated). 477 */ 478 #define MAX_SYNCOOKIE_AGE 2 479 #define TCP_SYNCOOKIE_PERIOD (60 * HZ) 480 #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD) 481 482 /* syncookies: remember time of last synqueue overflow 483 * But do not dirty this field too often (once per second is enough) 484 * It is racy as we do not hold a lock, but race is very minor. 485 */ 486 static inline void tcp_synq_overflow(const struct sock *sk) 487 { 488 unsigned int last_overflow; 489 unsigned int now = jiffies; 490 491 if (sk->sk_reuseport) { 492 struct sock_reuseport *reuse; 493 494 reuse = rcu_dereference(sk->sk_reuseport_cb); 495 if (likely(reuse)) { 496 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 497 if (time_after32(now, last_overflow + HZ)) 498 WRITE_ONCE(reuse->synq_overflow_ts, now); 499 return; 500 } 501 } 502 503 last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 504 if (time_after32(now, last_overflow + HZ)) 505 tcp_sk(sk)->rx_opt.ts_recent_stamp = now; 506 } 507 508 /* syncookies: no recent synqueue overflow on this listening socket? */ 509 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) 510 { 511 unsigned int last_overflow; 512 unsigned int now = jiffies; 513 514 if (sk->sk_reuseport) { 515 struct sock_reuseport *reuse; 516 517 reuse = rcu_dereference(sk->sk_reuseport_cb); 518 if (likely(reuse)) { 519 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 520 return time_after32(now, last_overflow + 521 TCP_SYNCOOKIE_VALID); 522 } 523 } 524 525 last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 526 return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID); 527 } 528 529 static inline u32 tcp_cookie_time(void) 530 { 531 u64 val = get_jiffies_64(); 532 533 do_div(val, TCP_SYNCOOKIE_PERIOD); 534 return val; 535 } 536 537 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, 538 u16 *mssp); 539 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); 540 u64 cookie_init_timestamp(struct request_sock *req); 541 bool cookie_timestamp_decode(const struct net *net, 542 struct tcp_options_received *opt); 543 bool cookie_ecn_ok(const struct tcp_options_received *opt, 544 const struct net *net, const struct dst_entry *dst); 545 546 /* From net/ipv6/syncookies.c */ 547 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, 548 u32 cookie); 549 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 550 551 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, 552 const struct tcphdr *th, u16 *mssp); 553 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); 554 #endif 555 /* tcp_output.c */ 556 557 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 558 int nonagle); 559 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 560 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 561 void tcp_retransmit_timer(struct sock *sk); 562 void tcp_xmit_retransmit_queue(struct sock *); 563 void tcp_simple_retransmit(struct sock *); 564 void tcp_enter_recovery(struct sock *sk, bool ece_ack); 565 int tcp_trim_head(struct sock *, struct sk_buff *, u32); 566 enum tcp_queue { 567 TCP_FRAG_IN_WRITE_QUEUE, 568 TCP_FRAG_IN_RTX_QUEUE, 569 }; 570 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, 571 struct sk_buff *skb, u32 len, 572 unsigned int mss_now, gfp_t gfp); 573 574 void tcp_send_probe0(struct sock *); 575 void tcp_send_partial(struct sock *); 576 int tcp_write_wakeup(struct sock *, int mib); 577 void tcp_send_fin(struct sock *sk); 578 void tcp_send_active_reset(struct sock *sk, gfp_t priority); 579 int tcp_send_synack(struct sock *); 580 void tcp_push_one(struct sock *, unsigned int mss_now); 581 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); 582 void tcp_send_ack(struct sock *sk); 583 void tcp_send_delayed_ack(struct sock *sk); 584 void tcp_send_loss_probe(struct sock *sk); 585 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto); 586 void tcp_skb_collapse_tstamp(struct sk_buff *skb, 587 const struct sk_buff *next_skb); 588 589 /* tcp_input.c */ 590 void tcp_rearm_rto(struct sock *sk); 591 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); 592 void tcp_reset(struct sock *sk); 593 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); 594 void tcp_fin(struct sock *sk); 595 596 /* tcp_timer.c */ 597 void tcp_init_xmit_timers(struct sock *); 598 static inline void tcp_clear_xmit_timers(struct sock *sk) 599 { 600 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) 601 __sock_put(sk); 602 603 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) 604 __sock_put(sk); 605 606 inet_csk_clear_xmit_timers(sk); 607 } 608 609 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 610 unsigned int tcp_current_mss(struct sock *sk); 611 612 /* Bound MSS / TSO packet size with the half of the window */ 613 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) 614 { 615 int cutoff; 616 617 /* When peer uses tiny windows, there is no use in packetizing 618 * to sub-MSS pieces for the sake of SWS or making sure there 619 * are enough packets in the pipe for fast recovery. 620 * 621 * On the other hand, for extremely large MSS devices, handling 622 * smaller than MSS windows in this way does make sense. 623 */ 624 if (tp->max_window > TCP_MSS_DEFAULT) 625 cutoff = (tp->max_window >> 1); 626 else 627 cutoff = tp->max_window; 628 629 if (cutoff && pktsize > cutoff) 630 return max_t(int, cutoff, 68U - tp->tcp_header_len); 631 else 632 return pktsize; 633 } 634 635 /* tcp.c */ 636 void tcp_get_info(struct sock *, struct tcp_info *); 637 638 /* Read 'sendfile()'-style from a TCP socket */ 639 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 640 sk_read_actor_t recv_actor); 641 642 void tcp_initialize_rcv_mss(struct sock *sk); 643 644 int tcp_mtu_to_mss(struct sock *sk, int pmtu); 645 int tcp_mss_to_mtu(struct sock *sk, int mss); 646 void tcp_mtup_init(struct sock *sk); 647 void tcp_init_buffer_space(struct sock *sk); 648 649 static inline void tcp_bound_rto(const struct sock *sk) 650 { 651 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 652 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; 653 } 654 655 static inline u32 __tcp_set_rto(const struct tcp_sock *tp) 656 { 657 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us); 658 } 659 660 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 661 { 662 tp->pred_flags = htonl((tp->tcp_header_len << 26) | 663 ntohl(TCP_FLAG_ACK) | 664 snd_wnd); 665 } 666 667 static inline void tcp_fast_path_on(struct tcp_sock *tp) 668 { 669 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); 670 } 671 672 static inline void tcp_fast_path_check(struct sock *sk) 673 { 674 struct tcp_sock *tp = tcp_sk(sk); 675 676 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) && 677 tp->rcv_wnd && 678 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && 679 !tp->urg_data) 680 tcp_fast_path_on(tp); 681 } 682 683 /* Compute the actual rto_min value */ 684 static inline u32 tcp_rto_min(struct sock *sk) 685 { 686 const struct dst_entry *dst = __sk_dst_get(sk); 687 u32 rto_min = TCP_RTO_MIN; 688 689 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 690 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 691 return rto_min; 692 } 693 694 static inline u32 tcp_rto_min_us(struct sock *sk) 695 { 696 return jiffies_to_usecs(tcp_rto_min(sk)); 697 } 698 699 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst) 700 { 701 return dst_metric_locked(dst, RTAX_CC_ALGO); 702 } 703 704 /* Minimum RTT in usec. ~0 means not available. */ 705 static inline u32 tcp_min_rtt(const struct tcp_sock *tp) 706 { 707 return minmax_get(&tp->rtt_min); 708 } 709 710 /* Compute the actual receive window we are currently advertising. 711 * Rcv_nxt can be after the window if our peer push more data 712 * than the offered window. 713 */ 714 static inline u32 tcp_receive_window(const struct tcp_sock *tp) 715 { 716 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; 717 718 if (win < 0) 719 win = 0; 720 return (u32) win; 721 } 722 723 /* Choose a new window, without checks for shrinking, and without 724 * scaling applied to the result. The caller does these things 725 * if necessary. This is a "raw" window selection. 726 */ 727 u32 __tcp_select_window(struct sock *sk); 728 729 void tcp_send_window_probe(struct sock *sk); 730 731 /* TCP uses 32bit jiffies to save some space. 732 * Note that this is different from tcp_time_stamp, which 733 * historically has been the same until linux-4.13. 734 */ 735 #define tcp_jiffies32 ((u32)jiffies) 736 737 /* 738 * Deliver a 32bit value for TCP timestamp option (RFC 7323) 739 * It is no longer tied to jiffies, but to 1 ms clock. 740 * Note: double check if you want to use tcp_jiffies32 instead of this. 741 */ 742 #define TCP_TS_HZ 1000 743 744 static inline u64 tcp_clock_ns(void) 745 { 746 return ktime_get_ns(); 747 } 748 749 static inline u64 tcp_clock_us(void) 750 { 751 return div_u64(tcp_clock_ns(), NSEC_PER_USEC); 752 } 753 754 /* This should only be used in contexts where tp->tcp_mstamp is up to date */ 755 static inline u32 tcp_time_stamp(const struct tcp_sock *tp) 756 { 757 return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ); 758 } 759 760 /* Could use tcp_clock_us() / 1000, but this version uses a single divide */ 761 static inline u32 tcp_time_stamp_raw(void) 762 { 763 return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ); 764 } 765 766 void tcp_mstamp_refresh(struct tcp_sock *tp); 767 768 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) 769 { 770 return max_t(s64, t1 - t0, 0); 771 } 772 773 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) 774 { 775 return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ); 776 } 777 778 /* provide the departure time in us unit */ 779 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) 780 { 781 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC); 782 } 783 784 785 #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) 786 787 #define TCPHDR_FIN 0x01 788 #define TCPHDR_SYN 0x02 789 #define TCPHDR_RST 0x04 790 #define TCPHDR_PSH 0x08 791 #define TCPHDR_ACK 0x10 792 #define TCPHDR_URG 0x20 793 #define TCPHDR_ECE 0x40 794 #define TCPHDR_CWR 0x80 795 796 #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR) 797 798 /* This is what the send packet queuing engine uses to pass 799 * TCP per-packet control information to the transmission code. 800 * We also store the host-order sequence numbers in here too. 801 * This is 44 bytes if IPV6 is enabled. 802 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. 803 */ 804 struct tcp_skb_cb { 805 __u32 seq; /* Starting sequence number */ 806 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ 807 union { 808 /* Note : tcp_tw_isn is used in input path only 809 * (isn chosen by tcp_timewait_state_process()) 810 * 811 * tcp_gso_segs/size are used in write queue only, 812 * cf tcp_skb_pcount()/tcp_skb_mss() 813 */ 814 __u32 tcp_tw_isn; 815 struct { 816 u16 tcp_gso_segs; 817 u16 tcp_gso_size; 818 }; 819 }; 820 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ 821 822 __u8 sacked; /* State flags for SACK. */ 823 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ 824 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ 825 #define TCPCB_LOST 0x04 /* SKB is lost */ 826 #define TCPCB_TAGBITS 0x07 /* All tag bits */ 827 #define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */ 828 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ 829 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \ 830 TCPCB_REPAIRED) 831 832 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ 833 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ 834 eor:1, /* Is skb MSG_EOR marked? */ 835 has_rxtstamp:1, /* SKB has a RX timestamp */ 836 unused:5; 837 __u32 ack_seq; /* Sequence number ACK'd */ 838 union { 839 struct { 840 /* There is space for up to 24 bytes */ 841 __u32 in_flight:30,/* Bytes in flight at transmit */ 842 is_app_limited:1, /* cwnd not fully used? */ 843 unused:1; 844 /* pkts S/ACKed so far upon tx of skb, incl retrans: */ 845 __u32 delivered; 846 /* start of send pipeline phase */ 847 u64 first_tx_mstamp; 848 /* when we reached the "delivered" count */ 849 u64 delivered_mstamp; 850 } tx; /* only used for outgoing skbs */ 851 union { 852 struct inet_skb_parm h4; 853 #if IS_ENABLED(CONFIG_IPV6) 854 struct inet6_skb_parm h6; 855 #endif 856 } header; /* For incoming skbs */ 857 struct { 858 __u32 flags; 859 struct sock *sk_redir; 860 void *data_end; 861 } bpf; 862 }; 863 }; 864 865 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 866 867 static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) 868 { 869 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); 870 } 871 872 static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb) 873 { 874 return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS; 875 } 876 877 static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb) 878 { 879 return TCP_SKB_CB(skb)->bpf.sk_redir; 880 } 881 882 static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb) 883 { 884 TCP_SKB_CB(skb)->bpf.sk_redir = NULL; 885 } 886 887 #if IS_ENABLED(CONFIG_IPV6) 888 /* This is the variant of inet6_iif() that must be used by TCP, 889 * as TCP moves IP6CB into a different location in skb->cb[] 890 */ 891 static inline int tcp_v6_iif(const struct sk_buff *skb) 892 { 893 return TCP_SKB_CB(skb)->header.h6.iif; 894 } 895 896 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) 897 { 898 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); 899 900 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; 901 } 902 903 /* TCP_SKB_CB reference means this can not be used from early demux */ 904 static inline int tcp_v6_sdif(const struct sk_buff *skb) 905 { 906 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 907 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags)) 908 return TCP_SKB_CB(skb)->header.h6.iif; 909 #endif 910 return 0; 911 } 912 #endif 913 914 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) 915 { 916 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 917 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 918 skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) 919 return true; 920 #endif 921 return false; 922 } 923 924 /* TCP_SKB_CB reference means this can not be used from early demux */ 925 static inline int tcp_v4_sdif(struct sk_buff *skb) 926 { 927 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 928 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 929 return TCP_SKB_CB(skb)->header.h4.iif; 930 #endif 931 return 0; 932 } 933 934 /* Due to TSO, an SKB can be composed of multiple actual 935 * packets. To keep these tracked properly, we use this. 936 */ 937 static inline int tcp_skb_pcount(const struct sk_buff *skb) 938 { 939 return TCP_SKB_CB(skb)->tcp_gso_segs; 940 } 941 942 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) 943 { 944 TCP_SKB_CB(skb)->tcp_gso_segs = segs; 945 } 946 947 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) 948 { 949 TCP_SKB_CB(skb)->tcp_gso_segs += segs; 950 } 951 952 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */ 953 static inline int tcp_skb_mss(const struct sk_buff *skb) 954 { 955 return TCP_SKB_CB(skb)->tcp_gso_size; 956 } 957 958 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) 959 { 960 return likely(!TCP_SKB_CB(skb)->eor); 961 } 962 963 /* Events passed to congestion control interface */ 964 enum tcp_ca_event { 965 CA_EVENT_TX_START, /* first transmit when no packets in flight */ 966 CA_EVENT_CWND_RESTART, /* congestion window restart */ 967 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ 968 CA_EVENT_LOSS, /* loss timeout */ 969 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ 970 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ 971 }; 972 973 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ 974 enum tcp_ca_ack_event_flags { 975 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */ 976 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */ 977 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */ 978 }; 979 980 /* 981 * Interface for adding new TCP congestion control handlers 982 */ 983 #define TCP_CA_NAME_MAX 16 984 #define TCP_CA_MAX 128 985 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) 986 987 #define TCP_CA_UNSPEC 0 988 989 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */ 990 #define TCP_CONG_NON_RESTRICTED 0x1 991 /* Requires ECN/ECT set on all packets */ 992 #define TCP_CONG_NEEDS_ECN 0x2 993 994 union tcp_cc_info; 995 996 struct ack_sample { 997 u32 pkts_acked; 998 s32 rtt_us; 999 u32 in_flight; 1000 }; 1001 1002 /* A rate sample measures the number of (original/retransmitted) data 1003 * packets delivered "delivered" over an interval of time "interval_us". 1004 * The tcp_rate.c code fills in the rate sample, and congestion 1005 * control modules that define a cong_control function to run at the end 1006 * of ACK processing can optionally chose to consult this sample when 1007 * setting cwnd and pacing rate. 1008 * A sample is invalid if "delivered" or "interval_us" is negative. 1009 */ 1010 struct rate_sample { 1011 u64 prior_mstamp; /* starting timestamp for interval */ 1012 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 1013 s32 delivered; /* number of packets delivered over interval */ 1014 long interval_us; /* time for tp->delivered to incr "delivered" */ 1015 u32 snd_interval_us; /* snd interval for delivered packets */ 1016 u32 rcv_interval_us; /* rcv interval for delivered packets */ 1017 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 1018 int losses; /* number of packets marked lost upon ACK */ 1019 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ 1020 u32 prior_in_flight; /* in flight before this ACK */ 1021 bool is_app_limited; /* is sample from packet with bubble in pipe? */ 1022 bool is_retrans; /* is sample from retransmission? */ 1023 bool is_ack_delayed; /* is this (likely) a delayed ACK? */ 1024 }; 1025 1026 struct tcp_congestion_ops { 1027 struct list_head list; 1028 u32 key; 1029 u32 flags; 1030 1031 /* initialize private data (optional) */ 1032 void (*init)(struct sock *sk); 1033 /* cleanup private data (optional) */ 1034 void (*release)(struct sock *sk); 1035 1036 /* return slow start threshold (required) */ 1037 u32 (*ssthresh)(struct sock *sk); 1038 /* do new cwnd calculation (required) */ 1039 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); 1040 /* call before changing ca_state (optional) */ 1041 void (*set_state)(struct sock *sk, u8 new_state); 1042 /* call when cwnd event occurs (optional) */ 1043 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 1044 /* call when ack arrives (optional) */ 1045 void (*in_ack_event)(struct sock *sk, u32 flags); 1046 /* new value of cwnd after loss (required) */ 1047 u32 (*undo_cwnd)(struct sock *sk); 1048 /* hook for packet ack accounting (optional) */ 1049 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 1050 /* override sysctl_tcp_min_tso_segs */ 1051 u32 (*min_tso_segs)(struct sock *sk); 1052 /* returns the multiplier used in tcp_sndbuf_expand (optional) */ 1053 u32 (*sndbuf_expand)(struct sock *sk); 1054 /* call when packets are delivered to update cwnd and pacing rate, 1055 * after all the ca_state processing. (optional) 1056 */ 1057 void (*cong_control)(struct sock *sk, const struct rate_sample *rs); 1058 /* get info for inet_diag (optional) */ 1059 size_t (*get_info)(struct sock *sk, u32 ext, int *attr, 1060 union tcp_cc_info *info); 1061 1062 char name[TCP_CA_NAME_MAX]; 1063 struct module *owner; 1064 }; 1065 1066 int tcp_register_congestion_control(struct tcp_congestion_ops *type); 1067 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); 1068 1069 void tcp_assign_congestion_control(struct sock *sk); 1070 void tcp_init_congestion_control(struct sock *sk); 1071 void tcp_cleanup_congestion_control(struct sock *sk); 1072 int tcp_set_default_congestion_control(struct net *net, const char *name); 1073 void tcp_get_default_congestion_control(struct net *net, char *name); 1074 void tcp_get_available_congestion_control(char *buf, size_t len); 1075 void tcp_get_allowed_congestion_control(char *buf, size_t len); 1076 int tcp_set_allowed_congestion_control(char *allowed); 1077 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, 1078 bool reinit, bool cap_net_admin); 1079 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); 1080 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); 1081 1082 u32 tcp_reno_ssthresh(struct sock *sk); 1083 u32 tcp_reno_undo_cwnd(struct sock *sk); 1084 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 1085 extern struct tcp_congestion_ops tcp_reno; 1086 1087 struct tcp_congestion_ops *tcp_ca_find_key(u32 key); 1088 u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca); 1089 #ifdef CONFIG_INET 1090 char *tcp_ca_get_name_by_key(u32 key, char *buffer); 1091 #else 1092 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer) 1093 { 1094 return NULL; 1095 } 1096 #endif 1097 1098 static inline bool tcp_ca_needs_ecn(const struct sock *sk) 1099 { 1100 const struct inet_connection_sock *icsk = inet_csk(sk); 1101 1102 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; 1103 } 1104 1105 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 1106 { 1107 struct inet_connection_sock *icsk = inet_csk(sk); 1108 1109 if (icsk->icsk_ca_ops->set_state) 1110 icsk->icsk_ca_ops->set_state(sk, ca_state); 1111 icsk->icsk_ca_state = ca_state; 1112 } 1113 1114 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) 1115 { 1116 const struct inet_connection_sock *icsk = inet_csk(sk); 1117 1118 if (icsk->icsk_ca_ops->cwnd_event) 1119 icsk->icsk_ca_ops->cwnd_event(sk, event); 1120 } 1121 1122 /* From tcp_rate.c */ 1123 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb); 1124 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, 1125 struct rate_sample *rs); 1126 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, 1127 bool is_sack_reneg, struct rate_sample *rs); 1128 void tcp_rate_check_app_limited(struct sock *sk); 1129 1130 /* These functions determine how the current flow behaves in respect of SACK 1131 * handling. SACK is negotiated with the peer, and therefore it can vary 1132 * between different flows. 1133 * 1134 * tcp_is_sack - SACK enabled 1135 * tcp_is_reno - No SACK 1136 */ 1137 static inline int tcp_is_sack(const struct tcp_sock *tp) 1138 { 1139 return likely(tp->rx_opt.sack_ok); 1140 } 1141 1142 static inline bool tcp_is_reno(const struct tcp_sock *tp) 1143 { 1144 return !tcp_is_sack(tp); 1145 } 1146 1147 static inline unsigned int tcp_left_out(const struct tcp_sock *tp) 1148 { 1149 return tp->sacked_out + tp->lost_out; 1150 } 1151 1152 /* This determines how many packets are "in the network" to the best 1153 * of our knowledge. In many cases it is conservative, but where 1154 * detailed information is available from the receiver (via SACK 1155 * blocks etc.) we can make more aggressive calculations. 1156 * 1157 * Use this for decisions involving congestion control, use just 1158 * tp->packets_out to determine if the send queue is empty or not. 1159 * 1160 * Read this equation as: 1161 * 1162 * "Packets sent once on transmission queue" MINUS 1163 * "Packets left network, but not honestly ACKed yet" PLUS 1164 * "Packets fast retransmitted" 1165 */ 1166 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) 1167 { 1168 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; 1169 } 1170 1171 #define TCP_INFINITE_SSTHRESH 0x7fffffff 1172 1173 static inline bool tcp_in_slow_start(const struct tcp_sock *tp) 1174 { 1175 return tp->snd_cwnd < tp->snd_ssthresh; 1176 } 1177 1178 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) 1179 { 1180 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; 1181 } 1182 1183 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) 1184 { 1185 return (TCPF_CA_CWR | TCPF_CA_Recovery) & 1186 (1 << inet_csk(sk)->icsk_ca_state); 1187 } 1188 1189 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1190 * The exception is cwnd reduction phase, when cwnd is decreasing towards 1191 * ssthresh. 1192 */ 1193 static inline __u32 tcp_current_ssthresh(const struct sock *sk) 1194 { 1195 const struct tcp_sock *tp = tcp_sk(sk); 1196 1197 if (tcp_in_cwnd_reduction(sk)) 1198 return tp->snd_ssthresh; 1199 else 1200 return max(tp->snd_ssthresh, 1201 ((tp->snd_cwnd >> 1) + 1202 (tp->snd_cwnd >> 2))); 1203 } 1204 1205 /* Use define here intentionally to get WARN_ON location shown at the caller */ 1206 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 1207 1208 void tcp_enter_cwr(struct sock *sk); 1209 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); 1210 1211 /* The maximum number of MSS of available cwnd for which TSO defers 1212 * sending if not using sysctl_tcp_tso_win_divisor. 1213 */ 1214 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) 1215 { 1216 return 3; 1217 } 1218 1219 /* Returns end sequence number of the receiver's advertised window */ 1220 static inline u32 tcp_wnd_end(const struct tcp_sock *tp) 1221 { 1222 return tp->snd_una + tp->snd_wnd; 1223 } 1224 1225 /* We follow the spirit of RFC2861 to validate cwnd but implement a more 1226 * flexible approach. The RFC suggests cwnd should not be raised unless 1227 * it was fully used previously. And that's exactly what we do in 1228 * congestion avoidance mode. But in slow start we allow cwnd to grow 1229 * as long as the application has used half the cwnd. 1230 * Example : 1231 * cwnd is 10 (IW10), but application sends 9 frames. 1232 * We allow cwnd to reach 18 when all frames are ACKed. 1233 * This check is safe because it's as aggressive as slow start which already 1234 * risks 100% overshoot. The advantage is that we discourage application to 1235 * either send more filler packets or data to artificially blow up the cwnd 1236 * usage, and allow application-limited process to probe bw more aggressively. 1237 */ 1238 static inline bool tcp_is_cwnd_limited(const struct sock *sk) 1239 { 1240 const struct tcp_sock *tp = tcp_sk(sk); 1241 1242 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1243 if (tcp_in_slow_start(tp)) 1244 return tp->snd_cwnd < 2 * tp->max_packets_out; 1245 1246 return tp->is_cwnd_limited; 1247 } 1248 1249 /* BBR congestion control needs pacing. 1250 * Same remark for SO_MAX_PACING_RATE. 1251 * sch_fq packet scheduler is efficiently handling pacing, 1252 * but is not always installed/used. 1253 * Return true if TCP stack should pace packets itself. 1254 */ 1255 static inline bool tcp_needs_internal_pacing(const struct sock *sk) 1256 { 1257 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; 1258 } 1259 1260 /* Return in jiffies the delay before one skb is sent. 1261 * If @skb is NULL, we look at EDT for next packet being sent on the socket. 1262 */ 1263 static inline unsigned long tcp_pacing_delay(const struct sock *sk, 1264 const struct sk_buff *skb) 1265 { 1266 s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns; 1267 1268 pacing_delay -= tcp_sk(sk)->tcp_clock_cache; 1269 1270 return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0; 1271 } 1272 1273 static inline void tcp_reset_xmit_timer(struct sock *sk, 1274 const int what, 1275 unsigned long when, 1276 const unsigned long max_when, 1277 const struct sk_buff *skb) 1278 { 1279 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb), 1280 max_when); 1281 } 1282 1283 /* Something is really bad, we could not queue an additional packet, 1284 * because qdisc is full or receiver sent a 0 window, or we are paced. 1285 * We do not want to add fuel to the fire, or abort too early, 1286 * so make sure the timer we arm now is at least 200ms in the future, 1287 * regardless of current icsk_rto value (as it could be ~2ms) 1288 */ 1289 static inline unsigned long tcp_probe0_base(const struct sock *sk) 1290 { 1291 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); 1292 } 1293 1294 /* Variant of inet_csk_rto_backoff() used for zero window probes */ 1295 static inline unsigned long tcp_probe0_when(const struct sock *sk, 1296 unsigned long max_when) 1297 { 1298 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; 1299 1300 return (unsigned long)min_t(u64, when, max_when); 1301 } 1302 1303 static inline void tcp_check_probe_timer(struct sock *sk) 1304 { 1305 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) 1306 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 1307 tcp_probe0_base(sk), TCP_RTO_MAX, 1308 NULL); 1309 } 1310 1311 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) 1312 { 1313 tp->snd_wl1 = seq; 1314 } 1315 1316 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) 1317 { 1318 tp->snd_wl1 = seq; 1319 } 1320 1321 /* 1322 * Calculate(/check) TCP checksum 1323 */ 1324 static inline __sum16 tcp_v4_check(int len, __be32 saddr, 1325 __be32 daddr, __wsum base) 1326 { 1327 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base); 1328 } 1329 1330 static inline bool tcp_checksum_complete(struct sk_buff *skb) 1331 { 1332 return !skb_csum_unnecessary(skb) && 1333 __skb_checksum_complete(skb); 1334 } 1335 1336 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); 1337 int tcp_filter(struct sock *sk, struct sk_buff *skb); 1338 void tcp_set_state(struct sock *sk, int state); 1339 void tcp_done(struct sock *sk); 1340 int tcp_abort(struct sock *sk, int err); 1341 1342 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) 1343 { 1344 rx_opt->dsack = 0; 1345 rx_opt->num_sacks = 0; 1346 } 1347 1348 u32 tcp_default_init_rwnd(u32 mss); 1349 void tcp_cwnd_restart(struct sock *sk, s32 delta); 1350 1351 static inline void tcp_slow_start_after_idle_check(struct sock *sk) 1352 { 1353 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1354 struct tcp_sock *tp = tcp_sk(sk); 1355 s32 delta; 1356 1357 if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out || 1358 ca_ops->cong_control) 1359 return; 1360 delta = tcp_jiffies32 - tp->lsndtime; 1361 if (delta > inet_csk(sk)->icsk_rto) 1362 tcp_cwnd_restart(sk, delta); 1363 } 1364 1365 /* Determine a window scaling and initial window to offer. */ 1366 void tcp_select_initial_window(const struct sock *sk, int __space, 1367 __u32 mss, __u32 *rcv_wnd, 1368 __u32 *window_clamp, int wscale_ok, 1369 __u8 *rcv_wscale, __u32 init_rcv_wnd); 1370 1371 static inline int tcp_win_from_space(const struct sock *sk, int space) 1372 { 1373 int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale; 1374 1375 return tcp_adv_win_scale <= 0 ? 1376 (space>>(-tcp_adv_win_scale)) : 1377 space - (space>>tcp_adv_win_scale); 1378 } 1379 1380 /* Note: caller must be prepared to deal with negative returns */ 1381 static inline int tcp_space(const struct sock *sk) 1382 { 1383 return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len - 1384 atomic_read(&sk->sk_rmem_alloc)); 1385 } 1386 1387 static inline int tcp_full_space(const struct sock *sk) 1388 { 1389 return tcp_win_from_space(sk, sk->sk_rcvbuf); 1390 } 1391 1392 extern void tcp_openreq_init_rwin(struct request_sock *req, 1393 const struct sock *sk_listener, 1394 const struct dst_entry *dst); 1395 1396 void tcp_enter_memory_pressure(struct sock *sk); 1397 void tcp_leave_memory_pressure(struct sock *sk); 1398 1399 static inline int keepalive_intvl_when(const struct tcp_sock *tp) 1400 { 1401 struct net *net = sock_net((struct sock *)tp); 1402 1403 return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; 1404 } 1405 1406 static inline int keepalive_time_when(const struct tcp_sock *tp) 1407 { 1408 struct net *net = sock_net((struct sock *)tp); 1409 1410 return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; 1411 } 1412 1413 static inline int keepalive_probes(const struct tcp_sock *tp) 1414 { 1415 struct net *net = sock_net((struct sock *)tp); 1416 1417 return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; 1418 } 1419 1420 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) 1421 { 1422 const struct inet_connection_sock *icsk = &tp->inet_conn; 1423 1424 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime, 1425 tcp_jiffies32 - tp->rcv_tstamp); 1426 } 1427 1428 static inline int tcp_fin_time(const struct sock *sk) 1429 { 1430 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; 1431 const int rto = inet_csk(sk)->icsk_rto; 1432 1433 if (fin_timeout < (rto << 2) - (rto >> 1)) 1434 fin_timeout = (rto << 2) - (rto >> 1); 1435 1436 return fin_timeout; 1437 } 1438 1439 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, 1440 int paws_win) 1441 { 1442 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1443 return true; 1444 if (unlikely(!time_before32(ktime_get_seconds(), 1445 rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))) 1446 return true; 1447 /* 1448 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1449 * then following tcp messages have valid values. Ignore 0 value, 1450 * or else 'negative' tsval might forbid us to accept their packets. 1451 */ 1452 if (!rx_opt->ts_recent) 1453 return true; 1454 return false; 1455 } 1456 1457 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, 1458 int rst) 1459 { 1460 if (tcp_paws_check(rx_opt, 0)) 1461 return false; 1462 1463 /* RST segments are not recommended to carry timestamp, 1464 and, if they do, it is recommended to ignore PAWS because 1465 "their cleanup function should take precedence over timestamps." 1466 Certainly, it is mistake. It is necessary to understand the reasons 1467 of this constraint to relax it: if peer reboots, clock may go 1468 out-of-sync and half-open connections will not be reset. 1469 Actually, the problem would be not existing if all 1470 the implementations followed draft about maintaining clock 1471 via reboots. Linux-2.2 DOES NOT! 1472 1473 However, we can relax time bounds for RST segments to MSL. 1474 */ 1475 if (rst && !time_before32(ktime_get_seconds(), 1476 rx_opt->ts_recent_stamp + TCP_PAWS_MSL)) 1477 return false; 1478 return true; 1479 } 1480 1481 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 1482 int mib_idx, u32 *last_oow_ack_time); 1483 1484 static inline void tcp_mib_init(struct net *net) 1485 { 1486 /* See RFC 2012 */ 1487 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1); 1488 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); 1489 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); 1490 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1); 1491 } 1492 1493 /* from STCP */ 1494 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) 1495 { 1496 tp->lost_skb_hint = NULL; 1497 } 1498 1499 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) 1500 { 1501 tcp_clear_retrans_hints_partial(tp); 1502 tp->retransmit_skb_hint = NULL; 1503 } 1504 1505 union tcp_md5_addr { 1506 struct in_addr a4; 1507 #if IS_ENABLED(CONFIG_IPV6) 1508 struct in6_addr a6; 1509 #endif 1510 }; 1511 1512 /* - key database */ 1513 struct tcp_md5sig_key { 1514 struct hlist_node node; 1515 u8 keylen; 1516 u8 family; /* AF_INET or AF_INET6 */ 1517 union tcp_md5_addr addr; 1518 u8 prefixlen; 1519 u8 key[TCP_MD5SIG_MAXKEYLEN]; 1520 struct rcu_head rcu; 1521 }; 1522 1523 /* - sock block */ 1524 struct tcp_md5sig_info { 1525 struct hlist_head head; 1526 struct rcu_head rcu; 1527 }; 1528 1529 /* - pseudo header */ 1530 struct tcp4_pseudohdr { 1531 __be32 saddr; 1532 __be32 daddr; 1533 __u8 pad; 1534 __u8 protocol; 1535 __be16 len; 1536 }; 1537 1538 struct tcp6_pseudohdr { 1539 struct in6_addr saddr; 1540 struct in6_addr daddr; 1541 __be32 len; 1542 __be32 protocol; /* including padding */ 1543 }; 1544 1545 union tcp_md5sum_block { 1546 struct tcp4_pseudohdr ip4; 1547 #if IS_ENABLED(CONFIG_IPV6) 1548 struct tcp6_pseudohdr ip6; 1549 #endif 1550 }; 1551 1552 /* - pool: digest algorithm, hash description and scratch buffer */ 1553 struct tcp_md5sig_pool { 1554 struct ahash_request *md5_req; 1555 void *scratch; 1556 }; 1557 1558 /* - functions */ 1559 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, 1560 const struct sock *sk, const struct sk_buff *skb); 1561 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 1562 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen, 1563 gfp_t gfp); 1564 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, 1565 int family, u8 prefixlen); 1566 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, 1567 const struct sock *addr_sk); 1568 1569 #ifdef CONFIG_TCP_MD5SIG 1570 #include <linux/jump_label.h> 1571 extern struct static_key_false tcp_md5_needed; 1572 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, 1573 const union tcp_md5_addr *addr, 1574 int family); 1575 static inline struct tcp_md5sig_key * 1576 tcp_md5_do_lookup(const struct sock *sk, 1577 const union tcp_md5_addr *addr, 1578 int family) 1579 { 1580 if (!static_branch_unlikely(&tcp_md5_needed)) 1581 return NULL; 1582 return __tcp_md5_do_lookup(sk, addr, family); 1583 } 1584 1585 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) 1586 #else 1587 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, 1588 const union tcp_md5_addr *addr, 1589 int family) 1590 { 1591 return NULL; 1592 } 1593 #define tcp_twsk_md5_key(twsk) NULL 1594 #endif 1595 1596 bool tcp_alloc_md5sig_pool(void); 1597 1598 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); 1599 static inline void tcp_put_md5sig_pool(void) 1600 { 1601 local_bh_enable(); 1602 } 1603 1604 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, 1605 unsigned int header_len); 1606 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1607 const struct tcp_md5sig_key *key); 1608 1609 /* From tcp_fastopen.c */ 1610 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 1611 struct tcp_fastopen_cookie *cookie); 1612 void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 1613 struct tcp_fastopen_cookie *cookie, bool syn_lost, 1614 u16 try_exp); 1615 struct tcp_fastopen_request { 1616 /* Fast Open cookie. Size 0 means a cookie request */ 1617 struct tcp_fastopen_cookie cookie; 1618 struct msghdr *data; /* data in MSG_FASTOPEN */ 1619 size_t size; 1620 int copied; /* queued in tcp_connect() */ 1621 struct ubuf_info *uarg; 1622 }; 1623 void tcp_free_fastopen_req(struct tcp_sock *tp); 1624 void tcp_fastopen_destroy_cipher(struct sock *sk); 1625 void tcp_fastopen_ctx_destroy(struct net *net); 1626 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, 1627 void *primary_key, void *backup_key); 1628 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); 1629 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 1630 struct request_sock *req, 1631 struct tcp_fastopen_cookie *foc, 1632 const struct dst_entry *dst); 1633 void tcp_fastopen_init_key_once(struct net *net); 1634 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, 1635 struct tcp_fastopen_cookie *cookie); 1636 bool tcp_fastopen_defer_connect(struct sock *sk, int *err); 1637 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t) 1638 #define TCP_FASTOPEN_KEY_MAX 2 1639 #define TCP_FASTOPEN_KEY_BUF_LENGTH \ 1640 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX) 1641 1642 /* Fastopen key context */ 1643 struct tcp_fastopen_context { 1644 siphash_key_t key[TCP_FASTOPEN_KEY_MAX]; 1645 int num; 1646 struct rcu_head rcu; 1647 }; 1648 1649 extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; 1650 void tcp_fastopen_active_disable(struct sock *sk); 1651 bool tcp_fastopen_active_should_disable(struct sock *sk); 1652 void tcp_fastopen_active_disable_ofo_check(struct sock *sk); 1653 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired); 1654 1655 /* Caller needs to wrap with rcu_read_(un)lock() */ 1656 static inline 1657 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk) 1658 { 1659 struct tcp_fastopen_context *ctx; 1660 1661 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); 1662 if (!ctx) 1663 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx); 1664 return ctx; 1665 } 1666 1667 static inline 1668 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc, 1669 const struct tcp_fastopen_cookie *orig) 1670 { 1671 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE && 1672 orig->len == foc->len && 1673 !memcmp(orig->val, foc->val, foc->len)) 1674 return true; 1675 return false; 1676 } 1677 1678 static inline 1679 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx) 1680 { 1681 return ctx->num; 1682 } 1683 1684 /* Latencies incurred by various limits for a sender. They are 1685 * chronograph-like stats that are mutually exclusive. 1686 */ 1687 enum tcp_chrono { 1688 TCP_CHRONO_UNSPEC, 1689 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */ 1690 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */ 1691 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */ 1692 __TCP_CHRONO_MAX, 1693 }; 1694 1695 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type); 1696 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type); 1697 1698 /* This helper is needed, because skb->tcp_tsorted_anchor uses 1699 * the same memory storage than skb->destructor/_skb_refdst 1700 */ 1701 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb) 1702 { 1703 skb->destructor = NULL; 1704 skb->_skb_refdst = 0UL; 1705 } 1706 1707 #define tcp_skb_tsorted_save(skb) { \ 1708 unsigned long _save = skb->_skb_refdst; \ 1709 skb->_skb_refdst = 0UL; 1710 1711 #define tcp_skb_tsorted_restore(skb) \ 1712 skb->_skb_refdst = _save; \ 1713 } 1714 1715 void tcp_write_queue_purge(struct sock *sk); 1716 1717 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) 1718 { 1719 return skb_rb_first(&sk->tcp_rtx_queue); 1720 } 1721 1722 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) 1723 { 1724 return skb_rb_last(&sk->tcp_rtx_queue); 1725 } 1726 1727 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) 1728 { 1729 return skb_peek(&sk->sk_write_queue); 1730 } 1731 1732 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) 1733 { 1734 return skb_peek_tail(&sk->sk_write_queue); 1735 } 1736 1737 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1738 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 1739 1740 static inline struct sk_buff *tcp_send_head(const struct sock *sk) 1741 { 1742 return skb_peek(&sk->sk_write_queue); 1743 } 1744 1745 static inline bool tcp_skb_is_last(const struct sock *sk, 1746 const struct sk_buff *skb) 1747 { 1748 return skb_queue_is_last(&sk->sk_write_queue, skb); 1749 } 1750 1751 static inline bool tcp_write_queue_empty(const struct sock *sk) 1752 { 1753 return skb_queue_empty(&sk->sk_write_queue); 1754 } 1755 1756 static inline bool tcp_rtx_queue_empty(const struct sock *sk) 1757 { 1758 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); 1759 } 1760 1761 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) 1762 { 1763 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); 1764 } 1765 1766 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) 1767 { 1768 __skb_queue_tail(&sk->sk_write_queue, skb); 1769 1770 /* Queue it, remembering where we must start sending. */ 1771 if (sk->sk_write_queue.next == skb) 1772 tcp_chrono_start(sk, TCP_CHRONO_BUSY); 1773 } 1774 1775 /* Insert new before skb on the write queue of sk. */ 1776 static inline void tcp_insert_write_queue_before(struct sk_buff *new, 1777 struct sk_buff *skb, 1778 struct sock *sk) 1779 { 1780 __skb_queue_before(&sk->sk_write_queue, skb, new); 1781 } 1782 1783 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) 1784 { 1785 tcp_skb_tsorted_anchor_cleanup(skb); 1786 __skb_unlink(skb, &sk->sk_write_queue); 1787 } 1788 1789 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb); 1790 1791 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) 1792 { 1793 tcp_skb_tsorted_anchor_cleanup(skb); 1794 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); 1795 } 1796 1797 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) 1798 { 1799 list_del(&skb->tcp_tsorted_anchor); 1800 tcp_rtx_queue_unlink(skb, sk); 1801 sk_wmem_free_skb(sk, skb); 1802 } 1803 1804 static inline void tcp_push_pending_frames(struct sock *sk) 1805 { 1806 if (tcp_send_head(sk)) { 1807 struct tcp_sock *tp = tcp_sk(sk); 1808 1809 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); 1810 } 1811 } 1812 1813 /* Start sequence of the skb just after the highest skb with SACKed 1814 * bit, valid only if sacked_out > 0 or when the caller has ensured 1815 * validity by itself. 1816 */ 1817 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) 1818 { 1819 if (!tp->sacked_out) 1820 return tp->snd_una; 1821 1822 if (tp->highest_sack == NULL) 1823 return tp->snd_nxt; 1824 1825 return TCP_SKB_CB(tp->highest_sack)->seq; 1826 } 1827 1828 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) 1829 { 1830 tcp_sk(sk)->highest_sack = skb_rb_next(skb); 1831 } 1832 1833 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) 1834 { 1835 return tcp_sk(sk)->highest_sack; 1836 } 1837 1838 static inline void tcp_highest_sack_reset(struct sock *sk) 1839 { 1840 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); 1841 } 1842 1843 /* Called when old skb is about to be deleted and replaced by new skb */ 1844 static inline void tcp_highest_sack_replace(struct sock *sk, 1845 struct sk_buff *old, 1846 struct sk_buff *new) 1847 { 1848 if (old == tcp_highest_sack(sk)) 1849 tcp_sk(sk)->highest_sack = new; 1850 } 1851 1852 /* This helper checks if socket has IP_TRANSPARENT set */ 1853 static inline bool inet_sk_transparent(const struct sock *sk) 1854 { 1855 switch (sk->sk_state) { 1856 case TCP_TIME_WAIT: 1857 return inet_twsk(sk)->tw_transparent; 1858 case TCP_NEW_SYN_RECV: 1859 return inet_rsk(inet_reqsk(sk))->no_srccheck; 1860 } 1861 return inet_sk(sk)->transparent; 1862 } 1863 1864 /* Determines whether this is a thin stream (which may suffer from 1865 * increased latency). Used to trigger latency-reducing mechanisms. 1866 */ 1867 static inline bool tcp_stream_is_thin(struct tcp_sock *tp) 1868 { 1869 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); 1870 } 1871 1872 /* /proc */ 1873 enum tcp_seq_states { 1874 TCP_SEQ_STATE_LISTENING, 1875 TCP_SEQ_STATE_ESTABLISHED, 1876 }; 1877 1878 void *tcp_seq_start(struct seq_file *seq, loff_t *pos); 1879 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos); 1880 void tcp_seq_stop(struct seq_file *seq, void *v); 1881 1882 struct tcp_seq_afinfo { 1883 sa_family_t family; 1884 }; 1885 1886 struct tcp_iter_state { 1887 struct seq_net_private p; 1888 enum tcp_seq_states state; 1889 struct sock *syn_wait_sk; 1890 int bucket, offset, sbucket, num; 1891 loff_t last_pos; 1892 }; 1893 1894 extern struct request_sock_ops tcp_request_sock_ops; 1895 extern struct request_sock_ops tcp6_request_sock_ops; 1896 1897 void tcp_v4_destroy_sock(struct sock *sk); 1898 1899 struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 1900 netdev_features_t features); 1901 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); 1902 int tcp_gro_complete(struct sk_buff *skb); 1903 1904 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); 1905 1906 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) 1907 { 1908 struct net *net = sock_net((struct sock *)tp); 1909 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; 1910 } 1911 1912 /* @wake is one when sk_stream_write_space() calls us. 1913 * This sends EPOLLOUT only if notsent_bytes is half the limit. 1914 * This mimics the strategy used in sock_def_write_space(). 1915 */ 1916 static inline bool tcp_stream_memory_free(const struct sock *sk, int wake) 1917 { 1918 const struct tcp_sock *tp = tcp_sk(sk); 1919 u32 notsent_bytes = tp->write_seq - tp->snd_nxt; 1920 1921 return (notsent_bytes << wake) < tcp_notsent_lowat(tp); 1922 } 1923 1924 #ifdef CONFIG_PROC_FS 1925 int tcp4_proc_init(void); 1926 void tcp4_proc_exit(void); 1927 #endif 1928 1929 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req); 1930 int tcp_conn_request(struct request_sock_ops *rsk_ops, 1931 const struct tcp_request_sock_ops *af_ops, 1932 struct sock *sk, struct sk_buff *skb); 1933 1934 /* TCP af-specific functions */ 1935 struct tcp_sock_af_ops { 1936 #ifdef CONFIG_TCP_MD5SIG 1937 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk, 1938 const struct sock *addr_sk); 1939 int (*calc_md5_hash)(char *location, 1940 const struct tcp_md5sig_key *md5, 1941 const struct sock *sk, 1942 const struct sk_buff *skb); 1943 int (*md5_parse)(struct sock *sk, 1944 int optname, 1945 char __user *optval, 1946 int optlen); 1947 #endif 1948 }; 1949 1950 struct tcp_request_sock_ops { 1951 u16 mss_clamp; 1952 #ifdef CONFIG_TCP_MD5SIG 1953 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk, 1954 const struct sock *addr_sk); 1955 int (*calc_md5_hash) (char *location, 1956 const struct tcp_md5sig_key *md5, 1957 const struct sock *sk, 1958 const struct sk_buff *skb); 1959 #endif 1960 void (*init_req)(struct request_sock *req, 1961 const struct sock *sk_listener, 1962 struct sk_buff *skb); 1963 #ifdef CONFIG_SYN_COOKIES 1964 __u32 (*cookie_init_seq)(const struct sk_buff *skb, 1965 __u16 *mss); 1966 #endif 1967 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, 1968 const struct request_sock *req); 1969 u32 (*init_seq)(const struct sk_buff *skb); 1970 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb); 1971 int (*send_synack)(const struct sock *sk, struct dst_entry *dst, 1972 struct flowi *fl, struct request_sock *req, 1973 struct tcp_fastopen_cookie *foc, 1974 enum tcp_synack_type synack_type); 1975 }; 1976 1977 #ifdef CONFIG_SYN_COOKIES 1978 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 1979 const struct sock *sk, struct sk_buff *skb, 1980 __u16 *mss) 1981 { 1982 tcp_synq_overflow(sk); 1983 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); 1984 return ops->cookie_init_seq(skb, mss); 1985 } 1986 #else 1987 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 1988 const struct sock *sk, struct sk_buff *skb, 1989 __u16 *mss) 1990 { 1991 return 0; 1992 } 1993 #endif 1994 1995 int tcpv4_offload_init(void); 1996 1997 void tcp_v4_init(void); 1998 void tcp_init(void); 1999 2000 /* tcp_recovery.c */ 2001 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb); 2002 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced); 2003 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, 2004 u32 reo_wnd); 2005 extern void tcp_rack_mark_lost(struct sock *sk); 2006 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, 2007 u64 xmit_time); 2008 extern void tcp_rack_reo_timeout(struct sock *sk); 2009 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs); 2010 2011 /* At how many usecs into the future should the RTO fire? */ 2012 static inline s64 tcp_rto_delta_us(const struct sock *sk) 2013 { 2014 const struct sk_buff *skb = tcp_rtx_queue_head(sk); 2015 u32 rto = inet_csk(sk)->icsk_rto; 2016 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); 2017 2018 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; 2019 } 2020 2021 /* 2022 * Save and compile IPv4 options, return a pointer to it 2023 */ 2024 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net, 2025 struct sk_buff *skb) 2026 { 2027 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; 2028 struct ip_options_rcu *dopt = NULL; 2029 2030 if (opt->optlen) { 2031 int opt_size = sizeof(*dopt) + opt->optlen; 2032 2033 dopt = kmalloc(opt_size, GFP_ATOMIC); 2034 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) { 2035 kfree(dopt); 2036 dopt = NULL; 2037 } 2038 } 2039 return dopt; 2040 } 2041 2042 /* locally generated TCP pure ACKs have skb->truesize == 2 2043 * (check tcp_send_ack() in net/ipv4/tcp_output.c ) 2044 * This is much faster than dissecting the packet to find out. 2045 * (Think of GRE encapsulations, IPv4, IPv6, ...) 2046 */ 2047 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb) 2048 { 2049 return skb->truesize == 2; 2050 } 2051 2052 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb) 2053 { 2054 skb->truesize = 2; 2055 } 2056 2057 static inline int tcp_inq(struct sock *sk) 2058 { 2059 struct tcp_sock *tp = tcp_sk(sk); 2060 int answ; 2061 2062 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 2063 answ = 0; 2064 } else if (sock_flag(sk, SOCK_URGINLINE) || 2065 !tp->urg_data || 2066 before(tp->urg_seq, tp->copied_seq) || 2067 !before(tp->urg_seq, tp->rcv_nxt)) { 2068 2069 answ = tp->rcv_nxt - tp->copied_seq; 2070 2071 /* Subtract 1, if FIN was received */ 2072 if (answ && sock_flag(sk, SOCK_DONE)) 2073 answ--; 2074 } else { 2075 answ = tp->urg_seq - tp->copied_seq; 2076 } 2077 2078 return answ; 2079 } 2080 2081 int tcp_peek_len(struct socket *sock); 2082 2083 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) 2084 { 2085 u16 segs_in; 2086 2087 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 2088 tp->segs_in += segs_in; 2089 if (skb->len > tcp_hdrlen(skb)) 2090 tp->data_segs_in += segs_in; 2091 } 2092 2093 /* 2094 * TCP listen path runs lockless. 2095 * We forced "struct sock" to be const qualified to make sure 2096 * we don't modify one of its field by mistake. 2097 * Here, we increment sk_drops which is an atomic_t, so we can safely 2098 * make sock writable again. 2099 */ 2100 static inline void tcp_listendrop(const struct sock *sk) 2101 { 2102 atomic_inc(&((struct sock *)sk)->sk_drops); 2103 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 2104 } 2105 2106 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer); 2107 2108 /* 2109 * Interface for adding Upper Level Protocols over TCP 2110 */ 2111 2112 #define TCP_ULP_NAME_MAX 16 2113 #define TCP_ULP_MAX 128 2114 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) 2115 2116 struct tcp_ulp_ops { 2117 struct list_head list; 2118 2119 /* initialize ulp */ 2120 int (*init)(struct sock *sk); 2121 /* update ulp */ 2122 void (*update)(struct sock *sk, struct proto *p); 2123 /* cleanup ulp */ 2124 void (*release)(struct sock *sk); 2125 /* diagnostic */ 2126 int (*get_info)(const struct sock *sk, struct sk_buff *skb); 2127 size_t (*get_info_size)(const struct sock *sk); 2128 2129 char name[TCP_ULP_NAME_MAX]; 2130 struct module *owner; 2131 }; 2132 int tcp_register_ulp(struct tcp_ulp_ops *type); 2133 void tcp_unregister_ulp(struct tcp_ulp_ops *type); 2134 int tcp_set_ulp(struct sock *sk, const char *name); 2135 void tcp_get_available_ulp(char *buf, size_t len); 2136 void tcp_cleanup_ulp(struct sock *sk); 2137 void tcp_update_ulp(struct sock *sk, struct proto *p); 2138 2139 #define MODULE_ALIAS_TCP_ULP(name) \ 2140 __MODULE_INFO(alias, alias_userspace, name); \ 2141 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name) 2142 2143 struct sk_msg; 2144 struct sk_psock; 2145 2146 int tcp_bpf_init(struct sock *sk); 2147 void tcp_bpf_reinit(struct sock *sk); 2148 int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes, 2149 int flags); 2150 int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2151 int nonblock, int flags, int *addr_len); 2152 int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, 2153 struct msghdr *msg, int len, int flags); 2154 2155 /* Call BPF_SOCK_OPS program that returns an int. If the return value 2156 * is < 0, then the BPF op failed (for example if the loaded BPF 2157 * program does not support the chosen operation or there is no BPF 2158 * program loaded). 2159 */ 2160 #ifdef CONFIG_BPF 2161 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) 2162 { 2163 struct bpf_sock_ops_kern sock_ops; 2164 int ret; 2165 2166 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 2167 if (sk_fullsock(sk)) { 2168 sock_ops.is_fullsock = 1; 2169 sock_owned_by_me(sk); 2170 } 2171 2172 sock_ops.sk = sk; 2173 sock_ops.op = op; 2174 if (nargs > 0) 2175 memcpy(sock_ops.args, args, nargs * sizeof(*args)); 2176 2177 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); 2178 if (ret == 0) 2179 ret = sock_ops.reply; 2180 else 2181 ret = -1; 2182 return ret; 2183 } 2184 2185 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) 2186 { 2187 u32 args[2] = {arg1, arg2}; 2188 2189 return tcp_call_bpf(sk, op, 2, args); 2190 } 2191 2192 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, 2193 u32 arg3) 2194 { 2195 u32 args[3] = {arg1, arg2, arg3}; 2196 2197 return tcp_call_bpf(sk, op, 3, args); 2198 } 2199 2200 #else 2201 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) 2202 { 2203 return -EPERM; 2204 } 2205 2206 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) 2207 { 2208 return -EPERM; 2209 } 2210 2211 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, 2212 u32 arg3) 2213 { 2214 return -EPERM; 2215 } 2216 2217 #endif 2218 2219 static inline u32 tcp_timeout_init(struct sock *sk) 2220 { 2221 int timeout; 2222 2223 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); 2224 2225 if (timeout <= 0) 2226 timeout = TCP_TIMEOUT_INIT; 2227 return timeout; 2228 } 2229 2230 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) 2231 { 2232 int rwnd; 2233 2234 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); 2235 2236 if (rwnd < 0) 2237 rwnd = 0; 2238 return rwnd; 2239 } 2240 2241 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) 2242 { 2243 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); 2244 } 2245 2246 static inline void tcp_bpf_rtt(struct sock *sk) 2247 { 2248 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) 2249 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL); 2250 } 2251 2252 #if IS_ENABLED(CONFIG_SMC) 2253 extern struct static_key_false tcp_have_smc; 2254 #endif 2255 2256 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2257 void clean_acked_data_enable(struct inet_connection_sock *icsk, 2258 void (*cad)(struct sock *sk, u32 ack_seq)); 2259 void clean_acked_data_disable(struct inet_connection_sock *icsk); 2260 void clean_acked_data_flush(void); 2261 #endif 2262 2263 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 2264 static inline void tcp_add_tx_delay(struct sk_buff *skb, 2265 const struct tcp_sock *tp) 2266 { 2267 if (static_branch_unlikely(&tcp_tx_delay_enabled)) 2268 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC; 2269 } 2270 2271 /* Compute Earliest Departure Time for some control packets 2272 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets. 2273 */ 2274 static inline u64 tcp_transmit_time(const struct sock *sk) 2275 { 2276 if (static_branch_unlikely(&tcp_tx_delay_enabled)) { 2277 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ? 2278 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay; 2279 2280 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC; 2281 } 2282 return 0; 2283 } 2284 2285 #endif /* _TCP_H */ 2286