tcp.h (d0ed4c60abfb9a4ab6cd416d1dea9df6266f8fc7) | tcp.h (40a1227ea845a37ab197dd1caffb60b047fa36b1) |
---|---|
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the TCP module. 7 * 8 * Version: @(#)tcp.h 1.0.5 05/23/93 --- 22 unchanged lines hidden (view full) --- 31#include <linux/kref.h> 32#include <linux/ktime.h> 33 34#include <net/inet_connection_sock.h> 35#include <net/inet_timewait_sock.h> 36#include <net/inet_hashtables.h> 37#include <net/checksum.h> 38#include <net/request_sock.h> | 1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the TCP module. 7 * 8 * Version: @(#)tcp.h 1.0.5 05/23/93 --- 22 unchanged lines hidden (view full) --- 31#include <linux/kref.h> 32#include <linux/ktime.h> 33 34#include <net/inet_connection_sock.h> 35#include <net/inet_timewait_sock.h> 36#include <net/inet_hashtables.h> 37#include <net/checksum.h> 38#include <net/request_sock.h> |
39#include <net/sock_reuseport.h> |
|
39#include <net/sock.h> 40#include <net/snmp.h> 41#include <net/ip.h> 42#include <net/tcp_states.h> 43#include <net/inet_ecn.h> 44#include <net/dst.h> 45 46#include <linux/seq_file.h> --- 421 unchanged lines hidden (view full) --- 468#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD) 469 470/* syncookies: remember time of last synqueue overflow 471 * But do not dirty this field too often (once per second is enough) 472 * It is racy as we do not hold a lock, but race is very minor. 473 */ 474static inline void tcp_synq_overflow(const struct sock *sk) 475{ | 40#include <net/sock.h> 41#include <net/snmp.h> 42#include <net/ip.h> 43#include <net/tcp_states.h> 44#include <net/inet_ecn.h> 45#include <net/dst.h> 46 47#include <linux/seq_file.h> --- 421 unchanged lines hidden (view full) --- 469#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD) 470 471/* syncookies: remember time of last synqueue overflow 472 * But do not dirty this field too often (once per second is enough) 473 * It is racy as we do not hold a lock, but race is very minor. 474 */ 475static inline void tcp_synq_overflow(const struct sock *sk) 476{ |
476 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 477 unsigned long now = jiffies; | 477 unsigned int last_overflow; 478 unsigned int now = jiffies; |
478 | 479 |
479 if (time_after(now, last_overflow + HZ)) | 480 if (sk->sk_reuseport) { 481 struct sock_reuseport *reuse; 482 483 reuse = rcu_dereference(sk->sk_reuseport_cb); 484 if (likely(reuse)) { 485 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 486 if (time_after32(now, last_overflow + HZ)) 487 WRITE_ONCE(reuse->synq_overflow_ts, now); 488 return; 489 } 490 } 491 492 last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 493 if (time_after32(now, last_overflow + HZ)) |
480 tcp_sk(sk)->rx_opt.ts_recent_stamp = now; 481} 482 483/* syncookies: no recent synqueue overflow on this listening socket? */ 484static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) 485{ | 494 tcp_sk(sk)->rx_opt.ts_recent_stamp = now; 495} 496 497/* syncookies: no recent synqueue overflow on this listening socket? */ 498static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) 499{ |
486 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; | 500 unsigned int last_overflow; 501 unsigned int now = jiffies; |
487 | 502 |
488 return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); | 503 if (sk->sk_reuseport) { 504 struct sock_reuseport *reuse; 505 506 reuse = rcu_dereference(sk->sk_reuseport_cb); 507 if (likely(reuse)) { 508 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 509 return time_after32(now, last_overflow + 510 TCP_SYNCOOKIE_VALID); 511 } 512 } 513 514 last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 515 return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID); |
489} 490 491static inline u32 tcp_cookie_time(void) 492{ 493 u64 val = get_jiffies_64(); 494 495 do_div(val, TCP_SYNCOOKIE_PERIOD); 496 return val; --- 461 unchanged lines hidden (view full) --- 958 * setting cwnd and pacing rate. 959 * A sample is invalid if "delivered" or "interval_us" is negative. 960 */ 961struct rate_sample { 962 u64 prior_mstamp; /* starting timestamp for interval */ 963 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 964 s32 delivered; /* number of packets delivered over interval */ 965 long interval_us; /* time for tp->delivered to incr "delivered" */ | 516} 517 518static inline u32 tcp_cookie_time(void) 519{ 520 u64 val = get_jiffies_64(); 521 522 do_div(val, TCP_SYNCOOKIE_PERIOD); 523 return val; --- 461 unchanged lines hidden (view full) --- 985 * setting cwnd and pacing rate. 986 * A sample is invalid if "delivered" or "interval_us" is negative. 987 */ 988struct rate_sample { 989 u64 prior_mstamp; /* starting timestamp for interval */ 990 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 991 s32 delivered; /* number of packets delivered over interval */ 992 long interval_us; /* time for tp->delivered to incr "delivered" */ |
993 u32 snd_interval_us; /* snd interval for delivered packets */ 994 u32 rcv_interval_us; /* rcv interval for delivered packets */ |
|
966 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 967 int losses; /* number of packets marked lost upon ACK */ 968 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ 969 u32 prior_in_flight; /* in flight before this ACK */ 970 bool is_app_limited; /* is sample from packet with bubble in pipe? */ 971 bool is_retrans; /* is sample from retransmission? */ 972 bool is_ack_delayed; /* is this (likely) a delayed ACK? */ 973}; --- 215 unchanged lines hidden (view full) --- 1189 1190 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1191 if (tcp_in_slow_start(tp)) 1192 return tp->snd_cwnd < 2 * tp->max_packets_out; 1193 1194 return tp->is_cwnd_limited; 1195} 1196 | 995 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 996 int losses; /* number of packets marked lost upon ACK */ 997 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ 998 u32 prior_in_flight; /* in flight before this ACK */ 999 bool is_app_limited; /* is sample from packet with bubble in pipe? */ 1000 bool is_retrans; /* is sample from retransmission? */ 1001 bool is_ack_delayed; /* is this (likely) a delayed ACK? */ 1002}; --- 215 unchanged lines hidden (view full) --- 1218 1219 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1220 if (tcp_in_slow_start(tp)) 1221 return tp->snd_cwnd < 2 * tp->max_packets_out; 1222 1223 return tp->is_cwnd_limited; 1224} 1225 |
1226/* BBR congestion control needs pacing. 1227 * Same remark for SO_MAX_PACING_RATE. 1228 * sch_fq packet scheduler is efficiently handling pacing, 1229 * but is not always installed/used. 1230 * Return true if TCP stack should pace packets itself. 1231 */ 1232static inline bool tcp_needs_internal_pacing(const struct sock *sk) 1233{ 1234 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; 1235} 1236 |
|
1197/* Something is really bad, we could not queue an additional packet, 1198 * because qdisc is full or receiver sent a 0 window. 1199 * We do not want to add fuel to the fire, or abort too early, 1200 * so make sure the timer we arm now is at least 200ms in the future, 1201 * regardless of current icsk_rto value (as it could be ~2ms) 1202 */ 1203static inline unsigned long tcp_probe0_base(const struct sock *sk) 1204{ --- 161 unchanged lines hidden (view full) --- 1366 return fin_timeout; 1367} 1368 1369static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, 1370 int paws_win) 1371{ 1372 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1373 return true; | 1237/* Something is really bad, we could not queue an additional packet, 1238 * because qdisc is full or receiver sent a 0 window. 1239 * We do not want to add fuel to the fire, or abort too early, 1240 * so make sure the timer we arm now is at least 200ms in the future, 1241 * regardless of current icsk_rto value (as it could be ~2ms) 1242 */ 1243static inline unsigned long tcp_probe0_base(const struct sock *sk) 1244{ --- 161 unchanged lines hidden (view full) --- 1406 return fin_timeout; 1407} 1408 1409static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, 1410 int paws_win) 1411{ 1412 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1413 return true; |
1374 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) | 1414 if (unlikely(!time_before32(ktime_get_seconds(), 1415 rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))) |
1375 return true; 1376 /* 1377 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1378 * then following tcp messages have valid values. Ignore 0 value, 1379 * or else 'negative' tsval might forbid us to accept their packets. 1380 */ 1381 if (!rx_opt->ts_recent) 1382 return true; --- 13 unchanged lines hidden (view full) --- 1396 of this constraint to relax it: if peer reboots, clock may go 1397 out-of-sync and half-open connections will not be reset. 1398 Actually, the problem would be not existing if all 1399 the implementations followed draft about maintaining clock 1400 via reboots. Linux-2.2 DOES NOT! 1401 1402 However, we can relax time bounds for RST segments to MSL. 1403 */ | 1416 return true; 1417 /* 1418 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1419 * then following tcp messages have valid values. Ignore 0 value, 1420 * or else 'negative' tsval might forbid us to accept their packets. 1421 */ 1422 if (!rx_opt->ts_recent) 1423 return true; --- 13 unchanged lines hidden (view full) --- 1437 of this constraint to relax it: if peer reboots, clock may go 1438 out-of-sync and half-open connections will not be reset. 1439 Actually, the problem would be not existing if all 1440 the implementations followed draft about maintaining clock 1441 via reboots. Linux-2.2 DOES NOT! 1442 1443 However, we can relax time bounds for RST segments to MSL. 1444 */ |
1404 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) | 1445 if (rst && !time_before32(ktime_get_seconds(), 1446 rx_opt->ts_recent_stamp + TCP_PAWS_MSL)) |
1405 return false; 1406 return true; 1407} 1408 1409bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 1410 int mib_idx, u32 *last_oow_ack_time); 1411 1412static inline void tcp_mib_init(struct net *net) --- 369 unchanged lines hidden (view full) --- 1782 1783extern struct request_sock_ops tcp_request_sock_ops; 1784extern struct request_sock_ops tcp6_request_sock_ops; 1785 1786void tcp_v4_destroy_sock(struct sock *sk); 1787 1788struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 1789 netdev_features_t features); | 1447 return false; 1448 return true; 1449} 1450 1451bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 1452 int mib_idx, u32 *last_oow_ack_time); 1453 1454static inline void tcp_mib_init(struct net *net) --- 369 unchanged lines hidden (view full) --- 1824 1825extern struct request_sock_ops tcp_request_sock_ops; 1826extern struct request_sock_ops tcp6_request_sock_ops; 1827 1828void tcp_v4_destroy_sock(struct sock *sk); 1829 1830struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 1831 netdev_features_t features); |
1790struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); | 1832struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); |
1791int tcp_gro_complete(struct sk_buff *skb); 1792 1793void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); 1794 1795static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) 1796{ 1797 struct net *net = sock_net((struct sock *)tp); 1798 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; --- 330 unchanged lines hidden --- | 1833int tcp_gro_complete(struct sk_buff *skb); 1834 1835void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); 1836 1837static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) 1838{ 1839 struct net *net = sock_net((struct sock *)tp); 1840 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; --- 330 unchanged lines hidden --- |