xref: /openbmc/linux/include/net/tcp.h (revision 309786c2)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the TCP module.
8  *
9  * Version:	@(#)tcp.h	1.0.5	05/23/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  */
14 #ifndef _TCP_H
15 #define _TCP_H
16 
17 #define FASTRETRANS_DEBUG 1
18 
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 
30 #include <net/inet_connection_sock.h>
31 #include <net/inet_timewait_sock.h>
32 #include <net/inet_hashtables.h>
33 #include <net/checksum.h>
34 #include <net/request_sock.h>
35 #include <net/sock_reuseport.h>
36 #include <net/sock.h>
37 #include <net/snmp.h>
38 #include <net/ip.h>
39 #include <net/tcp_states.h>
40 #include <net/inet_ecn.h>
41 #include <net/dst.h>
42 #include <net/mptcp.h>
43 
44 #include <linux/seq_file.h>
45 #include <linux/memcontrol.h>
46 #include <linux/bpf-cgroup.h>
47 #include <linux/siphash.h>
48 
49 extern struct inet_hashinfo tcp_hashinfo;
50 
51 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
52 int tcp_orphan_count_sum(void);
53 
54 void tcp_time_wait(struct sock *sk, int state, int timeo);
55 
56 #define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
57 #define MAX_TCP_OPTION_SPACE 40
58 #define TCP_MIN_SND_MSS		48
59 #define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
60 
61 /*
62  * Never offer a window over 32767 without using window scaling. Some
63  * poor stacks do signed 16bit maths!
64  */
65 #define MAX_TCP_WINDOW		32767U
66 
67 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68 #define TCP_MIN_MSS		88U
69 
70 /* The initial MTU to use for probing */
71 #define TCP_BASE_MSS		1024
72 
73 /* probing interval, default to 10 minutes as per RFC4821 */
74 #define TCP_PROBE_INTERVAL	600
75 
76 /* Specify interval when tcp mtu probing will stop */
77 #define TCP_PROBE_THRESHOLD	8
78 
79 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
80 #define TCP_FASTRETRANS_THRESH 3
81 
82 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
83 #define TCP_MAX_QUICKACKS	16U
84 
85 /* Maximal number of window scale according to RFC1323 */
86 #define TCP_MAX_WSCALE		14U
87 
88 /* urg_data states */
89 #define TCP_URG_VALID	0x0100
90 #define TCP_URG_NOTYET	0x0200
91 #define TCP_URG_READ	0x0400
92 
93 #define TCP_RETR1	3	/*
94 				 * This is how many retries it does before it
95 				 * tries to figure out if the gateway is
96 				 * down. Minimal RFC value is 3; it corresponds
97 				 * to ~3sec-8min depending on RTO.
98 				 */
99 
100 #define TCP_RETR2	15	/*
101 				 * This should take at least
102 				 * 90 minutes to time out.
103 				 * RFC1122 says that the limit is 100 sec.
104 				 * 15 is ~13-30min depending on RTO.
105 				 */
106 
107 #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
108 				 * when active opening a connection.
109 				 * RFC1122 says the minimum retry MUST
110 				 * be at least 180secs.  Nevertheless
111 				 * this value is corresponding to
112 				 * 63secs of retransmission with the
113 				 * current initial RTO.
114 				 */
115 
116 #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
117 				 * when passive opening a connection.
118 				 * This is corresponding to 31secs of
119 				 * retransmission with the current
120 				 * initial RTO.
121 				 */
122 
123 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
124 				  * state, about 60 seconds	*/
125 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
126                                  /* BSD style FIN_WAIT2 deadlock breaker.
127 				  * It used to be 3min, new value is 60sec,
128 				  * to combine FIN-WAIT-2 timeout with
129 				  * TIME-WAIT timer.
130 				  */
131 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
132 
133 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
134 #if HZ >= 100
135 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
136 #define TCP_ATO_MIN	((unsigned)(HZ/25))
137 #else
138 #define TCP_DELACK_MIN	4U
139 #define TCP_ATO_MIN	4U
140 #endif
141 #define TCP_RTO_MAX	((unsigned)(120*HZ))
142 #define TCP_RTO_MIN	((unsigned)(HZ/5))
143 #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
144 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
145 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
146 						 * used as a fallback RTO for the
147 						 * initial data transmission if no
148 						 * valid RTT sample has been acquired,
149 						 * most likely due to retrans in 3WHS.
150 						 */
151 
152 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
153 					                 * for local resources.
154 					                 */
155 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
156 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
157 #define TCP_KEEPALIVE_INTVL	(75*HZ)
158 
159 #define MAX_TCP_KEEPIDLE	32767
160 #define MAX_TCP_KEEPINTVL	32767
161 #define MAX_TCP_KEEPCNT		127
162 #define MAX_TCP_SYNCNT		127
163 
164 #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
165 
166 #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
167 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
168 					 * after this time. It should be equal
169 					 * (or greater than) TCP_TIMEWAIT_LEN
170 					 * to provide reliability equal to one
171 					 * provided by timewait state.
172 					 */
173 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
174 					 * timestamps. It must be less than
175 					 * minimal timewait lifetime.
176 					 */
177 /*
178  *	TCP option
179  */
180 
181 #define TCPOPT_NOP		1	/* Padding */
182 #define TCPOPT_EOL		0	/* End of options */
183 #define TCPOPT_MSS		2	/* Segment size negotiating */
184 #define TCPOPT_WINDOW		3	/* Window scaling */
185 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
186 #define TCPOPT_SACK             5       /* SACK Block */
187 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
188 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
189 #define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
190 #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
191 #define TCPOPT_EXP		254	/* Experimental */
192 /* Magic number to be after the option value for sharing TCP
193  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
194  */
195 #define TCPOPT_FASTOPEN_MAGIC	0xF989
196 #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
197 
198 /*
199  *     TCP option lengths
200  */
201 
202 #define TCPOLEN_MSS            4
203 #define TCPOLEN_WINDOW         3
204 #define TCPOLEN_SACK_PERM      2
205 #define TCPOLEN_TIMESTAMP      10
206 #define TCPOLEN_MD5SIG         18
207 #define TCPOLEN_FASTOPEN_BASE  2
208 #define TCPOLEN_EXP_FASTOPEN_BASE  4
209 #define TCPOLEN_EXP_SMC_BASE   6
210 
211 /* But this is what stacks really send out. */
212 #define TCPOLEN_TSTAMP_ALIGNED		12
213 #define TCPOLEN_WSCALE_ALIGNED		4
214 #define TCPOLEN_SACKPERM_ALIGNED	4
215 #define TCPOLEN_SACK_BASE		2
216 #define TCPOLEN_SACK_BASE_ALIGNED	4
217 #define TCPOLEN_SACK_PERBLOCK		8
218 #define TCPOLEN_MD5SIG_ALIGNED		20
219 #define TCPOLEN_MSS_ALIGNED		4
220 #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
221 
222 /* Flags in tp->nonagle */
223 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
224 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
225 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
226 
227 /* TCP thin-stream limits */
228 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
229 
230 /* TCP initial congestion window as per rfc6928 */
231 #define TCP_INIT_CWND		10
232 
233 /* Bit Flags for sysctl_tcp_fastopen */
234 #define	TFO_CLIENT_ENABLE	1
235 #define	TFO_SERVER_ENABLE	2
236 #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
237 
238 /* Accept SYN data w/o any cookie option */
239 #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
240 
241 /* Force enable TFO on all listeners, i.e., not requiring the
242  * TCP_FASTOPEN socket option.
243  */
244 #define	TFO_SERVER_WO_SOCKOPT1	0x400
245 
246 
247 /* sysctl variables for tcp */
248 extern int sysctl_tcp_max_orphans;
249 extern long sysctl_tcp_mem[3];
250 
251 #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
252 #define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
253 #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
254 
255 extern atomic_long_t tcp_memory_allocated;
256 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
257 
258 extern struct percpu_counter tcp_sockets_allocated;
259 extern unsigned long tcp_memory_pressure;
260 
261 /* optimized version of sk_under_memory_pressure() for TCP sockets */
262 static inline bool tcp_under_memory_pressure(const struct sock *sk)
263 {
264 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
265 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
266 		return true;
267 
268 	return READ_ONCE(tcp_memory_pressure);
269 }
270 /*
271  * The next routines deal with comparing 32 bit unsigned ints
272  * and worry about wraparound (automatic with unsigned arithmetic).
273  */
274 
275 static inline bool before(__u32 seq1, __u32 seq2)
276 {
277         return (__s32)(seq1-seq2) < 0;
278 }
279 #define after(seq2, seq1) 	before(seq1, seq2)
280 
281 /* is s2<=s1<=s3 ? */
282 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
283 {
284 	return seq3 - seq2 >= seq1 - seq2;
285 }
286 
287 static inline bool tcp_out_of_memory(struct sock *sk)
288 {
289 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
290 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
291 		return true;
292 	return false;
293 }
294 
295 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
296 {
297 	sk_wmem_queued_add(sk, -skb->truesize);
298 	if (!skb_zcopy_pure(skb))
299 		sk_mem_uncharge(sk, skb->truesize);
300 	else
301 		sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
302 	__kfree_skb(skb);
303 }
304 
305 void sk_forced_mem_schedule(struct sock *sk, int size);
306 
307 bool tcp_check_oom(struct sock *sk, int shift);
308 
309 
310 extern struct proto tcp_prot;
311 
312 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
313 #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
314 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
315 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
316 
317 void tcp_tasklet_init(void);
318 
319 int tcp_v4_err(struct sk_buff *skb, u32);
320 
321 void tcp_shutdown(struct sock *sk, int how);
322 
323 int tcp_v4_early_demux(struct sk_buff *skb);
324 int tcp_v4_rcv(struct sk_buff *skb);
325 
326 void tcp_remove_empty_skb(struct sock *sk);
327 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
328 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
329 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
330 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
331 			 size_t size, struct ubuf_info *uarg);
332 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
333 		 int flags);
334 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
335 			size_t size, int flags);
336 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
337 		 size_t size, int flags);
338 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
339 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
340 	      int size_goal);
341 void tcp_release_cb(struct sock *sk);
342 void tcp_wfree(struct sk_buff *skb);
343 void tcp_write_timer_handler(struct sock *sk);
344 void tcp_delack_timer_handler(struct sock *sk);
345 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
346 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
347 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
348 void tcp_rcv_space_adjust(struct sock *sk);
349 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
350 void tcp_twsk_destructor(struct sock *sk);
351 void tcp_twsk_purge(struct list_head *net_exit_list, int family);
352 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
353 			struct pipe_inode_info *pipe, size_t len,
354 			unsigned int flags);
355 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
356 				     bool force_schedule);
357 
358 void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
359 static inline void tcp_dec_quickack_mode(struct sock *sk,
360 					 const unsigned int pkts)
361 {
362 	struct inet_connection_sock *icsk = inet_csk(sk);
363 
364 	if (icsk->icsk_ack.quick) {
365 		if (pkts >= icsk->icsk_ack.quick) {
366 			icsk->icsk_ack.quick = 0;
367 			/* Leaving quickack mode we deflate ATO. */
368 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
369 		} else
370 			icsk->icsk_ack.quick -= pkts;
371 	}
372 }
373 
374 #define	TCP_ECN_OK		1
375 #define	TCP_ECN_QUEUE_CWR	2
376 #define	TCP_ECN_DEMAND_CWR	4
377 #define	TCP_ECN_SEEN		8
378 
379 enum tcp_tw_status {
380 	TCP_TW_SUCCESS = 0,
381 	TCP_TW_RST = 1,
382 	TCP_TW_ACK = 2,
383 	TCP_TW_SYN = 3
384 };
385 
386 
387 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
388 					      struct sk_buff *skb,
389 					      const struct tcphdr *th);
390 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
391 			   struct request_sock *req, bool fastopen,
392 			   bool *lost_race);
393 int tcp_child_process(struct sock *parent, struct sock *child,
394 		      struct sk_buff *skb);
395 void tcp_enter_loss(struct sock *sk);
396 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
397 void tcp_clear_retrans(struct tcp_sock *tp);
398 void tcp_update_metrics(struct sock *sk);
399 void tcp_init_metrics(struct sock *sk);
400 void tcp_metrics_init(void);
401 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
402 void __tcp_close(struct sock *sk, long timeout);
403 void tcp_close(struct sock *sk, long timeout);
404 void tcp_init_sock(struct sock *sk);
405 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
406 __poll_t tcp_poll(struct file *file, struct socket *sock,
407 		      struct poll_table_struct *wait);
408 int do_tcp_getsockopt(struct sock *sk, int level,
409 		      int optname, sockptr_t optval, sockptr_t optlen);
410 int tcp_getsockopt(struct sock *sk, int level, int optname,
411 		   char __user *optval, int __user *optlen);
412 bool tcp_bpf_bypass_getsockopt(int level, int optname);
413 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
414 		      sockptr_t optval, unsigned int optlen);
415 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
416 		   unsigned int optlen);
417 void tcp_set_keepalive(struct sock *sk, int val);
418 void tcp_syn_ack_timeout(const struct request_sock *req);
419 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
420 		int flags, int *addr_len);
421 int tcp_set_rcvlowat(struct sock *sk, int val);
422 int tcp_set_window_clamp(struct sock *sk, int val);
423 void tcp_update_recv_tstamps(struct sk_buff *skb,
424 			     struct scm_timestamping_internal *tss);
425 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
426 			struct scm_timestamping_internal *tss);
427 void tcp_data_ready(struct sock *sk);
428 #ifdef CONFIG_MMU
429 int tcp_mmap(struct file *file, struct socket *sock,
430 	     struct vm_area_struct *vma);
431 #endif
432 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
433 		       struct tcp_options_received *opt_rx,
434 		       int estab, struct tcp_fastopen_cookie *foc);
435 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
436 
437 /*
438  *	BPF SKB-less helpers
439  */
440 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
441 			 struct tcphdr *th, u32 *cookie);
442 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
443 			 struct tcphdr *th, u32 *cookie);
444 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
445 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
446 			  const struct tcp_request_sock_ops *af_ops,
447 			  struct sock *sk, struct tcphdr *th);
448 /*
449  *	TCP v4 functions exported for the inet6 API
450  */
451 
452 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
453 void tcp_v4_mtu_reduced(struct sock *sk);
454 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
455 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
456 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
457 struct sock *tcp_create_openreq_child(const struct sock *sk,
458 				      struct request_sock *req,
459 				      struct sk_buff *skb);
460 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
461 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
462 				  struct request_sock *req,
463 				  struct dst_entry *dst,
464 				  struct request_sock *req_unhash,
465 				  bool *own_req);
466 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
467 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
468 int tcp_connect(struct sock *sk);
469 enum tcp_synack_type {
470 	TCP_SYNACK_NORMAL,
471 	TCP_SYNACK_FASTOPEN,
472 	TCP_SYNACK_COOKIE,
473 };
474 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
475 				struct request_sock *req,
476 				struct tcp_fastopen_cookie *foc,
477 				enum tcp_synack_type synack_type,
478 				struct sk_buff *syn_skb);
479 int tcp_disconnect(struct sock *sk, int flags);
480 
481 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
482 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
483 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
484 
485 /* From syncookies.c */
486 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
487 				 struct request_sock *req,
488 				 struct dst_entry *dst, u32 tsoff);
489 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
490 		      u32 cookie);
491 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
492 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
493 					    const struct tcp_request_sock_ops *af_ops,
494 					    struct sock *sk, struct sk_buff *skb);
495 #ifdef CONFIG_SYN_COOKIES
496 
497 /* Syncookies use a monotonic timer which increments every 60 seconds.
498  * This counter is used both as a hash input and partially encoded into
499  * the cookie value.  A cookie is only validated further if the delta
500  * between the current counter value and the encoded one is less than this,
501  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
502  * the counter advances immediately after a cookie is generated).
503  */
504 #define MAX_SYNCOOKIE_AGE	2
505 #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
506 #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
507 
508 /* syncookies: remember time of last synqueue overflow
509  * But do not dirty this field too often (once per second is enough)
510  * It is racy as we do not hold a lock, but race is very minor.
511  */
512 static inline void tcp_synq_overflow(const struct sock *sk)
513 {
514 	unsigned int last_overflow;
515 	unsigned int now = jiffies;
516 
517 	if (sk->sk_reuseport) {
518 		struct sock_reuseport *reuse;
519 
520 		reuse = rcu_dereference(sk->sk_reuseport_cb);
521 		if (likely(reuse)) {
522 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
523 			if (!time_between32(now, last_overflow,
524 					    last_overflow + HZ))
525 				WRITE_ONCE(reuse->synq_overflow_ts, now);
526 			return;
527 		}
528 	}
529 
530 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
531 	if (!time_between32(now, last_overflow, last_overflow + HZ))
532 		WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
533 }
534 
535 /* syncookies: no recent synqueue overflow on this listening socket? */
536 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
537 {
538 	unsigned int last_overflow;
539 	unsigned int now = jiffies;
540 
541 	if (sk->sk_reuseport) {
542 		struct sock_reuseport *reuse;
543 
544 		reuse = rcu_dereference(sk->sk_reuseport_cb);
545 		if (likely(reuse)) {
546 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
547 			return !time_between32(now, last_overflow - HZ,
548 					       last_overflow +
549 					       TCP_SYNCOOKIE_VALID);
550 		}
551 	}
552 
553 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
554 
555 	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
556 	 * then we're under synflood. However, we have to use
557 	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
558 	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
559 	 * jiffies but before we store .ts_recent_stamp into last_overflow,
560 	 * which could lead to rejecting a valid syncookie.
561 	 */
562 	return !time_between32(now, last_overflow - HZ,
563 			       last_overflow + TCP_SYNCOOKIE_VALID);
564 }
565 
566 static inline u32 tcp_cookie_time(void)
567 {
568 	u64 val = get_jiffies_64();
569 
570 	do_div(val, TCP_SYNCOOKIE_PERIOD);
571 	return val;
572 }
573 
574 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
575 			      u16 *mssp);
576 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
577 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
578 bool cookie_timestamp_decode(const struct net *net,
579 			     struct tcp_options_received *opt);
580 bool cookie_ecn_ok(const struct tcp_options_received *opt,
581 		   const struct net *net, const struct dst_entry *dst);
582 
583 /* From net/ipv6/syncookies.c */
584 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
585 		      u32 cookie);
586 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
587 
588 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
589 			      const struct tcphdr *th, u16 *mssp);
590 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
591 #endif
592 /* tcp_output.c */
593 
594 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
595 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
596 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
597 			       int nonagle);
598 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
599 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
600 void tcp_retransmit_timer(struct sock *sk);
601 void tcp_xmit_retransmit_queue(struct sock *);
602 void tcp_simple_retransmit(struct sock *);
603 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
604 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
605 enum tcp_queue {
606 	TCP_FRAG_IN_WRITE_QUEUE,
607 	TCP_FRAG_IN_RTX_QUEUE,
608 };
609 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
610 		 struct sk_buff *skb, u32 len,
611 		 unsigned int mss_now, gfp_t gfp);
612 
613 void tcp_send_probe0(struct sock *);
614 void tcp_send_partial(struct sock *);
615 int tcp_write_wakeup(struct sock *, int mib);
616 void tcp_send_fin(struct sock *sk);
617 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
618 int tcp_send_synack(struct sock *);
619 void tcp_push_one(struct sock *, unsigned int mss_now);
620 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
621 void tcp_send_ack(struct sock *sk);
622 void tcp_send_delayed_ack(struct sock *sk);
623 void tcp_send_loss_probe(struct sock *sk);
624 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
625 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
626 			     const struct sk_buff *next_skb);
627 
628 /* tcp_input.c */
629 void tcp_rearm_rto(struct sock *sk);
630 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
631 void tcp_reset(struct sock *sk, struct sk_buff *skb);
632 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
633 void tcp_fin(struct sock *sk);
634 void tcp_check_space(struct sock *sk);
635 
636 /* tcp_timer.c */
637 void tcp_init_xmit_timers(struct sock *);
638 static inline void tcp_clear_xmit_timers(struct sock *sk)
639 {
640 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
641 		__sock_put(sk);
642 
643 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
644 		__sock_put(sk);
645 
646 	inet_csk_clear_xmit_timers(sk);
647 }
648 
649 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
650 unsigned int tcp_current_mss(struct sock *sk);
651 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
652 
653 /* Bound MSS / TSO packet size with the half of the window */
654 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
655 {
656 	int cutoff;
657 
658 	/* When peer uses tiny windows, there is no use in packetizing
659 	 * to sub-MSS pieces for the sake of SWS or making sure there
660 	 * are enough packets in the pipe for fast recovery.
661 	 *
662 	 * On the other hand, for extremely large MSS devices, handling
663 	 * smaller than MSS windows in this way does make sense.
664 	 */
665 	if (tp->max_window > TCP_MSS_DEFAULT)
666 		cutoff = (tp->max_window >> 1);
667 	else
668 		cutoff = tp->max_window;
669 
670 	if (cutoff && pktsize > cutoff)
671 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
672 	else
673 		return pktsize;
674 }
675 
676 /* tcp.c */
677 void tcp_get_info(struct sock *, struct tcp_info *);
678 
679 /* Read 'sendfile()'-style from a TCP socket */
680 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
681 		  sk_read_actor_t recv_actor);
682 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
683 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
684 void tcp_read_done(struct sock *sk, size_t len);
685 
686 void tcp_initialize_rcv_mss(struct sock *sk);
687 
688 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
689 int tcp_mss_to_mtu(struct sock *sk, int mss);
690 void tcp_mtup_init(struct sock *sk);
691 
692 static inline void tcp_bound_rto(const struct sock *sk)
693 {
694 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
695 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
696 }
697 
698 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
699 {
700 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
701 }
702 
703 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
704 {
705 	/* mptcp hooks are only on the slow path */
706 	if (sk_is_mptcp((struct sock *)tp))
707 		return;
708 
709 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
710 			       ntohl(TCP_FLAG_ACK) |
711 			       snd_wnd);
712 }
713 
714 static inline void tcp_fast_path_on(struct tcp_sock *tp)
715 {
716 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
717 }
718 
719 static inline void tcp_fast_path_check(struct sock *sk)
720 {
721 	struct tcp_sock *tp = tcp_sk(sk);
722 
723 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
724 	    tp->rcv_wnd &&
725 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
726 	    !tp->urg_data)
727 		tcp_fast_path_on(tp);
728 }
729 
730 /* Compute the actual rto_min value */
731 static inline u32 tcp_rto_min(struct sock *sk)
732 {
733 	const struct dst_entry *dst = __sk_dst_get(sk);
734 	u32 rto_min = inet_csk(sk)->icsk_rto_min;
735 
736 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
737 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
738 	return rto_min;
739 }
740 
741 static inline u32 tcp_rto_min_us(struct sock *sk)
742 {
743 	return jiffies_to_usecs(tcp_rto_min(sk));
744 }
745 
746 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
747 {
748 	return dst_metric_locked(dst, RTAX_CC_ALGO);
749 }
750 
751 /* Minimum RTT in usec. ~0 means not available. */
752 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
753 {
754 	return minmax_get(&tp->rtt_min);
755 }
756 
757 /* Compute the actual receive window we are currently advertising.
758  * Rcv_nxt can be after the window if our peer push more data
759  * than the offered window.
760  */
761 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
762 {
763 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
764 
765 	if (win < 0)
766 		win = 0;
767 	return (u32) win;
768 }
769 
770 /* Choose a new window, without checks for shrinking, and without
771  * scaling applied to the result.  The caller does these things
772  * if necessary.  This is a "raw" window selection.
773  */
774 u32 __tcp_select_window(struct sock *sk);
775 
776 void tcp_send_window_probe(struct sock *sk);
777 
778 /* TCP uses 32bit jiffies to save some space.
779  * Note that this is different from tcp_time_stamp, which
780  * historically has been the same until linux-4.13.
781  */
782 #define tcp_jiffies32 ((u32)jiffies)
783 
784 /*
785  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
786  * It is no longer tied to jiffies, but to 1 ms clock.
787  * Note: double check if you want to use tcp_jiffies32 instead of this.
788  */
789 #define TCP_TS_HZ	1000
790 
791 static inline u64 tcp_clock_ns(void)
792 {
793 	return ktime_get_ns();
794 }
795 
796 static inline u64 tcp_clock_us(void)
797 {
798 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
799 }
800 
801 /* This should only be used in contexts where tp->tcp_mstamp is up to date */
802 static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
803 {
804 	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
805 }
806 
807 /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
808 static inline u32 tcp_ns_to_ts(u64 ns)
809 {
810 	return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
811 }
812 
813 /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
814 static inline u32 tcp_time_stamp_raw(void)
815 {
816 	return tcp_ns_to_ts(tcp_clock_ns());
817 }
818 
819 void tcp_mstamp_refresh(struct tcp_sock *tp);
820 
821 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
822 {
823 	return max_t(s64, t1 - t0, 0);
824 }
825 
826 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
827 {
828 	return tcp_ns_to_ts(skb->skb_mstamp_ns);
829 }
830 
831 /* provide the departure time in us unit */
832 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
833 {
834 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
835 }
836 
837 
838 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
839 
840 #define TCPHDR_FIN 0x01
841 #define TCPHDR_SYN 0x02
842 #define TCPHDR_RST 0x04
843 #define TCPHDR_PSH 0x08
844 #define TCPHDR_ACK 0x10
845 #define TCPHDR_URG 0x20
846 #define TCPHDR_ECE 0x40
847 #define TCPHDR_CWR 0x80
848 
849 #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
850 
851 /* This is what the send packet queuing engine uses to pass
852  * TCP per-packet control information to the transmission code.
853  * We also store the host-order sequence numbers in here too.
854  * This is 44 bytes if IPV6 is enabled.
855  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
856  */
857 struct tcp_skb_cb {
858 	__u32		seq;		/* Starting sequence number	*/
859 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
860 	union {
861 		/* Note : tcp_tw_isn is used in input path only
862 		 *	  (isn chosen by tcp_timewait_state_process())
863 		 *
864 		 * 	  tcp_gso_segs/size are used in write queue only,
865 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
866 		 */
867 		__u32		tcp_tw_isn;
868 		struct {
869 			u16	tcp_gso_segs;
870 			u16	tcp_gso_size;
871 		};
872 	};
873 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
874 
875 	__u8		sacked;		/* State flags for SACK.	*/
876 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
877 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
878 #define TCPCB_LOST		0x04	/* SKB is lost			*/
879 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
880 #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp_ns)	*/
881 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
882 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
883 				TCPCB_REPAIRED)
884 
885 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
886 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
887 			eor:1,		/* Is skb MSG_EOR marked? */
888 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
889 			unused:5;
890 	__u32		ack_seq;	/* Sequence number ACK'd	*/
891 	union {
892 		struct {
893 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
894 			/* There is space for up to 24 bytes */
895 			__u32 is_app_limited:1, /* cwnd not fully used? */
896 			      delivered_ce:20,
897 			      unused:11;
898 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
899 			__u32 delivered;
900 			/* start of send pipeline phase */
901 			u64 first_tx_mstamp;
902 			/* when we reached the "delivered" count */
903 			u64 delivered_mstamp;
904 		} tx;   /* only used for outgoing skbs */
905 		union {
906 			struct inet_skb_parm	h4;
907 #if IS_ENABLED(CONFIG_IPV6)
908 			struct inet6_skb_parm	h6;
909 #endif
910 		} header;	/* For incoming skbs */
911 	};
912 };
913 
914 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
915 
916 extern const struct inet_connection_sock_af_ops ipv4_specific;
917 
918 #if IS_ENABLED(CONFIG_IPV6)
919 /* This is the variant of inet6_iif() that must be used by TCP,
920  * as TCP moves IP6CB into a different location in skb->cb[]
921  */
922 static inline int tcp_v6_iif(const struct sk_buff *skb)
923 {
924 	return TCP_SKB_CB(skb)->header.h6.iif;
925 }
926 
927 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
928 {
929 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
930 
931 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
932 }
933 
934 /* TCP_SKB_CB reference means this can not be used from early demux */
935 static inline int tcp_v6_sdif(const struct sk_buff *skb)
936 {
937 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
938 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
939 		return TCP_SKB_CB(skb)->header.h6.iif;
940 #endif
941 	return 0;
942 }
943 
944 extern const struct inet_connection_sock_af_ops ipv6_specific;
945 
946 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
947 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
948 void tcp_v6_early_demux(struct sk_buff *skb);
949 
950 #endif
951 
952 /* TCP_SKB_CB reference means this can not be used from early demux */
953 static inline int tcp_v4_sdif(struct sk_buff *skb)
954 {
955 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
956 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
957 		return TCP_SKB_CB(skb)->header.h4.iif;
958 #endif
959 	return 0;
960 }
961 
962 /* Due to TSO, an SKB can be composed of multiple actual
963  * packets.  To keep these tracked properly, we use this.
964  */
965 static inline int tcp_skb_pcount(const struct sk_buff *skb)
966 {
967 	return TCP_SKB_CB(skb)->tcp_gso_segs;
968 }
969 
970 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
971 {
972 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
973 }
974 
975 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
976 {
977 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
978 }
979 
980 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
981 static inline int tcp_skb_mss(const struct sk_buff *skb)
982 {
983 	return TCP_SKB_CB(skb)->tcp_gso_size;
984 }
985 
986 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
987 {
988 	return likely(!TCP_SKB_CB(skb)->eor);
989 }
990 
991 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
992 					const struct sk_buff *from)
993 {
994 	return likely(tcp_skb_can_collapse_to(to) &&
995 		      mptcp_skb_can_collapse(to, from) &&
996 		      skb_pure_zcopy_same(to, from));
997 }
998 
999 /* Events passed to congestion control interface */
1000 enum tcp_ca_event {
1001 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
1002 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
1003 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
1004 	CA_EVENT_LOSS,		/* loss timeout */
1005 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
1006 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
1007 };
1008 
1009 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1010 enum tcp_ca_ack_event_flags {
1011 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1012 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1013 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1014 };
1015 
1016 /*
1017  * Interface for adding new TCP congestion control handlers
1018  */
1019 #define TCP_CA_NAME_MAX	16
1020 #define TCP_CA_MAX	128
1021 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1022 
1023 #define TCP_CA_UNSPEC	0
1024 
1025 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1026 #define TCP_CONG_NON_RESTRICTED 0x1
1027 /* Requires ECN/ECT set on all packets */
1028 #define TCP_CONG_NEEDS_ECN	0x2
1029 #define TCP_CONG_MASK	(TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1030 
1031 union tcp_cc_info;
1032 
1033 struct ack_sample {
1034 	u32 pkts_acked;
1035 	s32 rtt_us;
1036 	u32 in_flight;
1037 };
1038 
1039 /* A rate sample measures the number of (original/retransmitted) data
1040  * packets delivered "delivered" over an interval of time "interval_us".
1041  * The tcp_rate.c code fills in the rate sample, and congestion
1042  * control modules that define a cong_control function to run at the end
1043  * of ACK processing can optionally chose to consult this sample when
1044  * setting cwnd and pacing rate.
1045  * A sample is invalid if "delivered" or "interval_us" is negative.
1046  */
1047 struct rate_sample {
1048 	u64  prior_mstamp; /* starting timestamp for interval */
1049 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1050 	u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1051 	s32  delivered;		/* number of packets delivered over interval */
1052 	s32  delivered_ce;	/* number of packets delivered w/ CE marks*/
1053 	long interval_us;	/* time for tp->delivered to incr "delivered" */
1054 	u32 snd_interval_us;	/* snd interval for delivered packets */
1055 	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1056 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1057 	int  losses;		/* number of packets marked lost upon ACK */
1058 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1059 	u32  prior_in_flight;	/* in flight before this ACK */
1060 	u32  last_end_seq;	/* end_seq of most recently ACKed packet */
1061 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1062 	bool is_retrans;	/* is sample from retransmission? */
1063 	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1064 };
1065 
1066 struct tcp_congestion_ops {
1067 /* fast path fields are put first to fill one cache line */
1068 
1069 	/* return slow start threshold (required) */
1070 	u32 (*ssthresh)(struct sock *sk);
1071 
1072 	/* do new cwnd calculation (required) */
1073 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1074 
1075 	/* call before changing ca_state (optional) */
1076 	void (*set_state)(struct sock *sk, u8 new_state);
1077 
1078 	/* call when cwnd event occurs (optional) */
1079 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1080 
1081 	/* call when ack arrives (optional) */
1082 	void (*in_ack_event)(struct sock *sk, u32 flags);
1083 
1084 	/* hook for packet ack accounting (optional) */
1085 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1086 
1087 	/* override sysctl_tcp_min_tso_segs */
1088 	u32 (*min_tso_segs)(struct sock *sk);
1089 
1090 	/* call when packets are delivered to update cwnd and pacing rate,
1091 	 * after all the ca_state processing. (optional)
1092 	 */
1093 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1094 
1095 
1096 	/* new value of cwnd after loss (required) */
1097 	u32  (*undo_cwnd)(struct sock *sk);
1098 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1099 	u32 (*sndbuf_expand)(struct sock *sk);
1100 
1101 /* control/slow paths put last */
1102 	/* get info for inet_diag (optional) */
1103 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1104 			   union tcp_cc_info *info);
1105 
1106 	char 			name[TCP_CA_NAME_MAX];
1107 	struct module		*owner;
1108 	struct list_head	list;
1109 	u32			key;
1110 	u32			flags;
1111 
1112 	/* initialize private data (optional) */
1113 	void (*init)(struct sock *sk);
1114 	/* cleanup private data  (optional) */
1115 	void (*release)(struct sock *sk);
1116 } ____cacheline_aligned_in_smp;
1117 
1118 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1119 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1120 int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1121 				  struct tcp_congestion_ops *old_type);
1122 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1123 
1124 void tcp_assign_congestion_control(struct sock *sk);
1125 void tcp_init_congestion_control(struct sock *sk);
1126 void tcp_cleanup_congestion_control(struct sock *sk);
1127 int tcp_set_default_congestion_control(struct net *net, const char *name);
1128 void tcp_get_default_congestion_control(struct net *net, char *name);
1129 void tcp_get_available_congestion_control(char *buf, size_t len);
1130 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1131 int tcp_set_allowed_congestion_control(char *allowed);
1132 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1133 			       bool cap_net_admin);
1134 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1135 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1136 
1137 u32 tcp_reno_ssthresh(struct sock *sk);
1138 u32 tcp_reno_undo_cwnd(struct sock *sk);
1139 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1140 extern struct tcp_congestion_ops tcp_reno;
1141 
1142 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1143 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1144 u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1145 #ifdef CONFIG_INET
1146 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1147 #else
1148 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1149 {
1150 	return NULL;
1151 }
1152 #endif
1153 
1154 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1155 {
1156 	const struct inet_connection_sock *icsk = inet_csk(sk);
1157 
1158 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1159 }
1160 
1161 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1162 {
1163 	const struct inet_connection_sock *icsk = inet_csk(sk);
1164 
1165 	if (icsk->icsk_ca_ops->cwnd_event)
1166 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1167 }
1168 
1169 /* From tcp_cong.c */
1170 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1171 
1172 /* From tcp_rate.c */
1173 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1174 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1175 			    struct rate_sample *rs);
1176 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1177 		  bool is_sack_reneg, struct rate_sample *rs);
1178 void tcp_rate_check_app_limited(struct sock *sk);
1179 
1180 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1181 {
1182 	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1183 }
1184 
1185 /* These functions determine how the current flow behaves in respect of SACK
1186  * handling. SACK is negotiated with the peer, and therefore it can vary
1187  * between different flows.
1188  *
1189  * tcp_is_sack - SACK enabled
1190  * tcp_is_reno - No SACK
1191  */
1192 static inline int tcp_is_sack(const struct tcp_sock *tp)
1193 {
1194 	return likely(tp->rx_opt.sack_ok);
1195 }
1196 
1197 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1198 {
1199 	return !tcp_is_sack(tp);
1200 }
1201 
1202 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1203 {
1204 	return tp->sacked_out + tp->lost_out;
1205 }
1206 
1207 /* This determines how many packets are "in the network" to the best
1208  * of our knowledge.  In many cases it is conservative, but where
1209  * detailed information is available from the receiver (via SACK
1210  * blocks etc.) we can make more aggressive calculations.
1211  *
1212  * Use this for decisions involving congestion control, use just
1213  * tp->packets_out to determine if the send queue is empty or not.
1214  *
1215  * Read this equation as:
1216  *
1217  *	"Packets sent once on transmission queue" MINUS
1218  *	"Packets left network, but not honestly ACKed yet" PLUS
1219  *	"Packets fast retransmitted"
1220  */
1221 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1222 {
1223 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1224 }
1225 
1226 #define TCP_INFINITE_SSTHRESH	0x7fffffff
1227 
1228 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1229 {
1230 	return tp->snd_cwnd;
1231 }
1232 
1233 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1234 {
1235 	WARN_ON_ONCE((int)val <= 0);
1236 	tp->snd_cwnd = val;
1237 }
1238 
1239 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1240 {
1241 	return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1242 }
1243 
1244 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1245 {
1246 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1247 }
1248 
1249 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1250 {
1251 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1252 	       (1 << inet_csk(sk)->icsk_ca_state);
1253 }
1254 
1255 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1256  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1257  * ssthresh.
1258  */
1259 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1260 {
1261 	const struct tcp_sock *tp = tcp_sk(sk);
1262 
1263 	if (tcp_in_cwnd_reduction(sk))
1264 		return tp->snd_ssthresh;
1265 	else
1266 		return max(tp->snd_ssthresh,
1267 			   ((tcp_snd_cwnd(tp) >> 1) +
1268 			    (tcp_snd_cwnd(tp) >> 2)));
1269 }
1270 
1271 /* Use define here intentionally to get WARN_ON location shown at the caller */
1272 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1273 
1274 void tcp_enter_cwr(struct sock *sk);
1275 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1276 
1277 /* The maximum number of MSS of available cwnd for which TSO defers
1278  * sending if not using sysctl_tcp_tso_win_divisor.
1279  */
1280 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1281 {
1282 	return 3;
1283 }
1284 
1285 /* Returns end sequence number of the receiver's advertised window */
1286 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1287 {
1288 	return tp->snd_una + tp->snd_wnd;
1289 }
1290 
1291 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1292  * flexible approach. The RFC suggests cwnd should not be raised unless
1293  * it was fully used previously. And that's exactly what we do in
1294  * congestion avoidance mode. But in slow start we allow cwnd to grow
1295  * as long as the application has used half the cwnd.
1296  * Example :
1297  *    cwnd is 10 (IW10), but application sends 9 frames.
1298  *    We allow cwnd to reach 18 when all frames are ACKed.
1299  * This check is safe because it's as aggressive as slow start which already
1300  * risks 100% overshoot. The advantage is that we discourage application to
1301  * either send more filler packets or data to artificially blow up the cwnd
1302  * usage, and allow application-limited process to probe bw more aggressively.
1303  */
1304 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1305 {
1306 	const struct tcp_sock *tp = tcp_sk(sk);
1307 
1308 	if (tp->is_cwnd_limited)
1309 		return true;
1310 
1311 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1312 	if (tcp_in_slow_start(tp))
1313 		return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1314 
1315 	return false;
1316 }
1317 
1318 /* BBR congestion control needs pacing.
1319  * Same remark for SO_MAX_PACING_RATE.
1320  * sch_fq packet scheduler is efficiently handling pacing,
1321  * but is not always installed/used.
1322  * Return true if TCP stack should pace packets itself.
1323  */
1324 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1325 {
1326 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1327 }
1328 
1329 /* Estimates in how many jiffies next packet for this flow can be sent.
1330  * Scheduling a retransmit timer too early would be silly.
1331  */
1332 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1333 {
1334 	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1335 
1336 	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1337 }
1338 
1339 static inline void tcp_reset_xmit_timer(struct sock *sk,
1340 					const int what,
1341 					unsigned long when,
1342 					const unsigned long max_when)
1343 {
1344 	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1345 				  max_when);
1346 }
1347 
1348 /* Something is really bad, we could not queue an additional packet,
1349  * because qdisc is full or receiver sent a 0 window, or we are paced.
1350  * We do not want to add fuel to the fire, or abort too early,
1351  * so make sure the timer we arm now is at least 200ms in the future,
1352  * regardless of current icsk_rto value (as it could be ~2ms)
1353  */
1354 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1355 {
1356 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1357 }
1358 
1359 /* Variant of inet_csk_rto_backoff() used for zero window probes */
1360 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1361 					    unsigned long max_when)
1362 {
1363 	u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1364 			   inet_csk(sk)->icsk_backoff);
1365 	u64 when = (u64)tcp_probe0_base(sk) << backoff;
1366 
1367 	return (unsigned long)min_t(u64, when, max_when);
1368 }
1369 
1370 static inline void tcp_check_probe_timer(struct sock *sk)
1371 {
1372 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1373 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1374 				     tcp_probe0_base(sk), TCP_RTO_MAX);
1375 }
1376 
1377 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1378 {
1379 	tp->snd_wl1 = seq;
1380 }
1381 
1382 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1383 {
1384 	tp->snd_wl1 = seq;
1385 }
1386 
1387 /*
1388  * Calculate(/check) TCP checksum
1389  */
1390 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1391 				   __be32 daddr, __wsum base)
1392 {
1393 	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1394 }
1395 
1396 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1397 {
1398 	return !skb_csum_unnecessary(skb) &&
1399 		__skb_checksum_complete(skb);
1400 }
1401 
1402 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1403 		     enum skb_drop_reason *reason);
1404 
1405 
1406 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1407 void tcp_set_state(struct sock *sk, int state);
1408 void tcp_done(struct sock *sk);
1409 int tcp_abort(struct sock *sk, int err);
1410 
1411 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1412 {
1413 	rx_opt->dsack = 0;
1414 	rx_opt->num_sacks = 0;
1415 }
1416 
1417 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1418 
1419 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1420 {
1421 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1422 	struct tcp_sock *tp = tcp_sk(sk);
1423 	s32 delta;
1424 
1425 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1426 	    tp->packets_out || ca_ops->cong_control)
1427 		return;
1428 	delta = tcp_jiffies32 - tp->lsndtime;
1429 	if (delta > inet_csk(sk)->icsk_rto)
1430 		tcp_cwnd_restart(sk, delta);
1431 }
1432 
1433 /* Determine a window scaling and initial window to offer. */
1434 void tcp_select_initial_window(const struct sock *sk, int __space,
1435 			       __u32 mss, __u32 *rcv_wnd,
1436 			       __u32 *window_clamp, int wscale_ok,
1437 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1438 
1439 static inline int tcp_win_from_space(const struct sock *sk, int space)
1440 {
1441 	int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
1442 
1443 	return tcp_adv_win_scale <= 0 ?
1444 		(space>>(-tcp_adv_win_scale)) :
1445 		space - (space>>tcp_adv_win_scale);
1446 }
1447 
1448 /* Note: caller must be prepared to deal with negative returns */
1449 static inline int tcp_space(const struct sock *sk)
1450 {
1451 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1452 				  READ_ONCE(sk->sk_backlog.len) -
1453 				  atomic_read(&sk->sk_rmem_alloc));
1454 }
1455 
1456 static inline int tcp_full_space(const struct sock *sk)
1457 {
1458 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1459 }
1460 
1461 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1462 {
1463 	int unused_mem = sk_unused_reserved_mem(sk);
1464 	struct tcp_sock *tp = tcp_sk(sk);
1465 
1466 	tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
1467 	if (unused_mem)
1468 		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1469 					 tcp_win_from_space(sk, unused_mem));
1470 }
1471 
1472 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1473 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1474 
1475 
1476 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1477  * If 87.5 % (7/8) of the space has been consumed, we want to override
1478  * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1479  * len/truesize ratio.
1480  */
1481 static inline bool tcp_rmem_pressure(const struct sock *sk)
1482 {
1483 	int rcvbuf, threshold;
1484 
1485 	if (tcp_under_memory_pressure(sk))
1486 		return true;
1487 
1488 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1489 	threshold = rcvbuf - (rcvbuf >> 3);
1490 
1491 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1492 }
1493 
1494 static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1495 {
1496 	const struct tcp_sock *tp = tcp_sk(sk);
1497 	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1498 
1499 	if (avail <= 0)
1500 		return false;
1501 
1502 	return (avail >= target) || tcp_rmem_pressure(sk) ||
1503 	       (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1504 }
1505 
1506 extern void tcp_openreq_init_rwin(struct request_sock *req,
1507 				  const struct sock *sk_listener,
1508 				  const struct dst_entry *dst);
1509 
1510 void tcp_enter_memory_pressure(struct sock *sk);
1511 void tcp_leave_memory_pressure(struct sock *sk);
1512 
1513 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1514 {
1515 	struct net *net = sock_net((struct sock *)tp);
1516 
1517 	return tp->keepalive_intvl ? :
1518 		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1519 }
1520 
1521 static inline int keepalive_time_when(const struct tcp_sock *tp)
1522 {
1523 	struct net *net = sock_net((struct sock *)tp);
1524 
1525 	return tp->keepalive_time ? :
1526 		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1527 }
1528 
1529 static inline int keepalive_probes(const struct tcp_sock *tp)
1530 {
1531 	struct net *net = sock_net((struct sock *)tp);
1532 
1533 	return tp->keepalive_probes ? :
1534 		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1535 }
1536 
1537 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1538 {
1539 	const struct inet_connection_sock *icsk = &tp->inet_conn;
1540 
1541 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1542 			  tcp_jiffies32 - tp->rcv_tstamp);
1543 }
1544 
1545 static inline int tcp_fin_time(const struct sock *sk)
1546 {
1547 	int fin_timeout = tcp_sk(sk)->linger2 ? :
1548 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1549 	const int rto = inet_csk(sk)->icsk_rto;
1550 
1551 	if (fin_timeout < (rto << 2) - (rto >> 1))
1552 		fin_timeout = (rto << 2) - (rto >> 1);
1553 
1554 	return fin_timeout;
1555 }
1556 
1557 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1558 				  int paws_win)
1559 {
1560 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1561 		return true;
1562 	if (unlikely(!time_before32(ktime_get_seconds(),
1563 				    rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1564 		return true;
1565 	/*
1566 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1567 	 * then following tcp messages have valid values. Ignore 0 value,
1568 	 * or else 'negative' tsval might forbid us to accept their packets.
1569 	 */
1570 	if (!rx_opt->ts_recent)
1571 		return true;
1572 	return false;
1573 }
1574 
1575 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1576 				   int rst)
1577 {
1578 	if (tcp_paws_check(rx_opt, 0))
1579 		return false;
1580 
1581 	/* RST segments are not recommended to carry timestamp,
1582 	   and, if they do, it is recommended to ignore PAWS because
1583 	   "their cleanup function should take precedence over timestamps."
1584 	   Certainly, it is mistake. It is necessary to understand the reasons
1585 	   of this constraint to relax it: if peer reboots, clock may go
1586 	   out-of-sync and half-open connections will not be reset.
1587 	   Actually, the problem would be not existing if all
1588 	   the implementations followed draft about maintaining clock
1589 	   via reboots. Linux-2.2 DOES NOT!
1590 
1591 	   However, we can relax time bounds for RST segments to MSL.
1592 	 */
1593 	if (rst && !time_before32(ktime_get_seconds(),
1594 				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1595 		return false;
1596 	return true;
1597 }
1598 
1599 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1600 			  int mib_idx, u32 *last_oow_ack_time);
1601 
1602 static inline void tcp_mib_init(struct net *net)
1603 {
1604 	/* See RFC 2012 */
1605 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1606 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1607 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1608 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1609 }
1610 
1611 /* from STCP */
1612 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1613 {
1614 	tp->lost_skb_hint = NULL;
1615 }
1616 
1617 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1618 {
1619 	tcp_clear_retrans_hints_partial(tp);
1620 	tp->retransmit_skb_hint = NULL;
1621 }
1622 
1623 union tcp_md5_addr {
1624 	struct in_addr  a4;
1625 #if IS_ENABLED(CONFIG_IPV6)
1626 	struct in6_addr	a6;
1627 #endif
1628 };
1629 
1630 /* - key database */
1631 struct tcp_md5sig_key {
1632 	struct hlist_node	node;
1633 	u8			keylen;
1634 	u8			family; /* AF_INET or AF_INET6 */
1635 	u8			prefixlen;
1636 	u8			flags;
1637 	union tcp_md5_addr	addr;
1638 	int			l3index; /* set if key added with L3 scope */
1639 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1640 	struct rcu_head		rcu;
1641 };
1642 
1643 /* - sock block */
1644 struct tcp_md5sig_info {
1645 	struct hlist_head	head;
1646 	struct rcu_head		rcu;
1647 };
1648 
1649 /* - pseudo header */
1650 struct tcp4_pseudohdr {
1651 	__be32		saddr;
1652 	__be32		daddr;
1653 	__u8		pad;
1654 	__u8		protocol;
1655 	__be16		len;
1656 };
1657 
1658 struct tcp6_pseudohdr {
1659 	struct in6_addr	saddr;
1660 	struct in6_addr daddr;
1661 	__be32		len;
1662 	__be32		protocol;	/* including padding */
1663 };
1664 
1665 union tcp_md5sum_block {
1666 	struct tcp4_pseudohdr ip4;
1667 #if IS_ENABLED(CONFIG_IPV6)
1668 	struct tcp6_pseudohdr ip6;
1669 #endif
1670 };
1671 
1672 /* - pool: digest algorithm, hash description and scratch buffer */
1673 struct tcp_md5sig_pool {
1674 	struct ahash_request	*md5_req;
1675 	void			*scratch;
1676 };
1677 
1678 /* - functions */
1679 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1680 			const struct sock *sk, const struct sk_buff *skb);
1681 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1682 		   int family, u8 prefixlen, int l3index, u8 flags,
1683 		   const u8 *newkey, u8 newkeylen);
1684 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1685 		     int family, u8 prefixlen, int l3index,
1686 		     struct tcp_md5sig_key *key);
1687 
1688 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1689 		   int family, u8 prefixlen, int l3index, u8 flags);
1690 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1691 					 const struct sock *addr_sk);
1692 
1693 #ifdef CONFIG_TCP_MD5SIG
1694 #include <linux/jump_label.h>
1695 extern struct static_key_false_deferred tcp_md5_needed;
1696 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1697 					   const union tcp_md5_addr *addr,
1698 					   int family);
1699 static inline struct tcp_md5sig_key *
1700 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1701 		  const union tcp_md5_addr *addr, int family)
1702 {
1703 	if (!static_branch_unlikely(&tcp_md5_needed.key))
1704 		return NULL;
1705 	return __tcp_md5_do_lookup(sk, l3index, addr, family);
1706 }
1707 
1708 enum skb_drop_reason
1709 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1710 		     const void *saddr, const void *daddr,
1711 		     int family, int dif, int sdif);
1712 
1713 
1714 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1715 #else
1716 static inline struct tcp_md5sig_key *
1717 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1718 		  const union tcp_md5_addr *addr, int family)
1719 {
1720 	return NULL;
1721 }
1722 
1723 static inline enum skb_drop_reason
1724 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1725 		     const void *saddr, const void *daddr,
1726 		     int family, int dif, int sdif)
1727 {
1728 	return SKB_NOT_DROPPED_YET;
1729 }
1730 #define tcp_twsk_md5_key(twsk)	NULL
1731 #endif
1732 
1733 bool tcp_alloc_md5sig_pool(void);
1734 
1735 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1736 static inline void tcp_put_md5sig_pool(void)
1737 {
1738 	local_bh_enable();
1739 }
1740 
1741 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1742 			  unsigned int header_len);
1743 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1744 		     const struct tcp_md5sig_key *key);
1745 
1746 /* From tcp_fastopen.c */
1747 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1748 			    struct tcp_fastopen_cookie *cookie);
1749 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1750 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1751 			    u16 try_exp);
1752 struct tcp_fastopen_request {
1753 	/* Fast Open cookie. Size 0 means a cookie request */
1754 	struct tcp_fastopen_cookie	cookie;
1755 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1756 	size_t				size;
1757 	int				copied;	/* queued in tcp_connect() */
1758 	struct ubuf_info		*uarg;
1759 };
1760 void tcp_free_fastopen_req(struct tcp_sock *tp);
1761 void tcp_fastopen_destroy_cipher(struct sock *sk);
1762 void tcp_fastopen_ctx_destroy(struct net *net);
1763 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1764 			      void *primary_key, void *backup_key);
1765 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1766 			    u64 *key);
1767 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1768 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1769 			      struct request_sock *req,
1770 			      struct tcp_fastopen_cookie *foc,
1771 			      const struct dst_entry *dst);
1772 void tcp_fastopen_init_key_once(struct net *net);
1773 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1774 			     struct tcp_fastopen_cookie *cookie);
1775 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1776 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1777 #define TCP_FASTOPEN_KEY_MAX 2
1778 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
1779 	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1780 
1781 /* Fastopen key context */
1782 struct tcp_fastopen_context {
1783 	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
1784 	int		num;
1785 	struct rcu_head	rcu;
1786 };
1787 
1788 void tcp_fastopen_active_disable(struct sock *sk);
1789 bool tcp_fastopen_active_should_disable(struct sock *sk);
1790 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1791 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1792 
1793 /* Caller needs to wrap with rcu_read_(un)lock() */
1794 static inline
1795 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1796 {
1797 	struct tcp_fastopen_context *ctx;
1798 
1799 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1800 	if (!ctx)
1801 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1802 	return ctx;
1803 }
1804 
1805 static inline
1806 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1807 			       const struct tcp_fastopen_cookie *orig)
1808 {
1809 	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1810 	    orig->len == foc->len &&
1811 	    !memcmp(orig->val, foc->val, foc->len))
1812 		return true;
1813 	return false;
1814 }
1815 
1816 static inline
1817 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1818 {
1819 	return ctx->num;
1820 }
1821 
1822 /* Latencies incurred by various limits for a sender. They are
1823  * chronograph-like stats that are mutually exclusive.
1824  */
1825 enum tcp_chrono {
1826 	TCP_CHRONO_UNSPEC,
1827 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1828 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1829 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1830 	__TCP_CHRONO_MAX,
1831 };
1832 
1833 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1834 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1835 
1836 /* This helper is needed, because skb->tcp_tsorted_anchor uses
1837  * the same memory storage than skb->destructor/_skb_refdst
1838  */
1839 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1840 {
1841 	skb->destructor = NULL;
1842 	skb->_skb_refdst = 0UL;
1843 }
1844 
1845 #define tcp_skb_tsorted_save(skb) {		\
1846 	unsigned long _save = skb->_skb_refdst;	\
1847 	skb->_skb_refdst = 0UL;
1848 
1849 #define tcp_skb_tsorted_restore(skb)		\
1850 	skb->_skb_refdst = _save;		\
1851 }
1852 
1853 void tcp_write_queue_purge(struct sock *sk);
1854 
1855 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1856 {
1857 	return skb_rb_first(&sk->tcp_rtx_queue);
1858 }
1859 
1860 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1861 {
1862 	return skb_rb_last(&sk->tcp_rtx_queue);
1863 }
1864 
1865 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1866 {
1867 	return skb_peek_tail(&sk->sk_write_queue);
1868 }
1869 
1870 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1871 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1872 
1873 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1874 {
1875 	return skb_peek(&sk->sk_write_queue);
1876 }
1877 
1878 static inline bool tcp_skb_is_last(const struct sock *sk,
1879 				   const struct sk_buff *skb)
1880 {
1881 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1882 }
1883 
1884 /**
1885  * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1886  * @sk: socket
1887  *
1888  * Since the write queue can have a temporary empty skb in it,
1889  * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1890  */
1891 static inline bool tcp_write_queue_empty(const struct sock *sk)
1892 {
1893 	const struct tcp_sock *tp = tcp_sk(sk);
1894 
1895 	return tp->write_seq == tp->snd_nxt;
1896 }
1897 
1898 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1899 {
1900 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1901 }
1902 
1903 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1904 {
1905 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1906 }
1907 
1908 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1909 {
1910 	__skb_queue_tail(&sk->sk_write_queue, skb);
1911 
1912 	/* Queue it, remembering where we must start sending. */
1913 	if (sk->sk_write_queue.next == skb)
1914 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1915 }
1916 
1917 /* Insert new before skb on the write queue of sk.  */
1918 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1919 						  struct sk_buff *skb,
1920 						  struct sock *sk)
1921 {
1922 	__skb_queue_before(&sk->sk_write_queue, skb, new);
1923 }
1924 
1925 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1926 {
1927 	tcp_skb_tsorted_anchor_cleanup(skb);
1928 	__skb_unlink(skb, &sk->sk_write_queue);
1929 }
1930 
1931 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1932 
1933 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1934 {
1935 	tcp_skb_tsorted_anchor_cleanup(skb);
1936 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1937 }
1938 
1939 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1940 {
1941 	list_del(&skb->tcp_tsorted_anchor);
1942 	tcp_rtx_queue_unlink(skb, sk);
1943 	tcp_wmem_free_skb(sk, skb);
1944 }
1945 
1946 static inline void tcp_push_pending_frames(struct sock *sk)
1947 {
1948 	if (tcp_send_head(sk)) {
1949 		struct tcp_sock *tp = tcp_sk(sk);
1950 
1951 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1952 	}
1953 }
1954 
1955 /* Start sequence of the skb just after the highest skb with SACKed
1956  * bit, valid only if sacked_out > 0 or when the caller has ensured
1957  * validity by itself.
1958  */
1959 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1960 {
1961 	if (!tp->sacked_out)
1962 		return tp->snd_una;
1963 
1964 	if (tp->highest_sack == NULL)
1965 		return tp->snd_nxt;
1966 
1967 	return TCP_SKB_CB(tp->highest_sack)->seq;
1968 }
1969 
1970 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1971 {
1972 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1973 }
1974 
1975 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1976 {
1977 	return tcp_sk(sk)->highest_sack;
1978 }
1979 
1980 static inline void tcp_highest_sack_reset(struct sock *sk)
1981 {
1982 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1983 }
1984 
1985 /* Called when old skb is about to be deleted and replaced by new skb */
1986 static inline void tcp_highest_sack_replace(struct sock *sk,
1987 					    struct sk_buff *old,
1988 					    struct sk_buff *new)
1989 {
1990 	if (old == tcp_highest_sack(sk))
1991 		tcp_sk(sk)->highest_sack = new;
1992 }
1993 
1994 /* This helper checks if socket has IP_TRANSPARENT set */
1995 static inline bool inet_sk_transparent(const struct sock *sk)
1996 {
1997 	switch (sk->sk_state) {
1998 	case TCP_TIME_WAIT:
1999 		return inet_twsk(sk)->tw_transparent;
2000 	case TCP_NEW_SYN_RECV:
2001 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
2002 	}
2003 	return inet_sk(sk)->transparent;
2004 }
2005 
2006 /* Determines whether this is a thin stream (which may suffer from
2007  * increased latency). Used to trigger latency-reducing mechanisms.
2008  */
2009 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2010 {
2011 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2012 }
2013 
2014 /* /proc */
2015 enum tcp_seq_states {
2016 	TCP_SEQ_STATE_LISTENING,
2017 	TCP_SEQ_STATE_ESTABLISHED,
2018 };
2019 
2020 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2021 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2022 void tcp_seq_stop(struct seq_file *seq, void *v);
2023 
2024 struct tcp_seq_afinfo {
2025 	sa_family_t			family;
2026 };
2027 
2028 struct tcp_iter_state {
2029 	struct seq_net_private	p;
2030 	enum tcp_seq_states	state;
2031 	struct sock		*syn_wait_sk;
2032 	int			bucket, offset, sbucket, num;
2033 	loff_t			last_pos;
2034 };
2035 
2036 extern struct request_sock_ops tcp_request_sock_ops;
2037 extern struct request_sock_ops tcp6_request_sock_ops;
2038 
2039 void tcp_v4_destroy_sock(struct sock *sk);
2040 
2041 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2042 				netdev_features_t features);
2043 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
2044 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2045 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2046 INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2047 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2048 int tcp_gro_complete(struct sk_buff *skb);
2049 
2050 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2051 
2052 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2053 {
2054 	struct net *net = sock_net((struct sock *)tp);
2055 	return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2056 }
2057 
2058 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2059 
2060 #ifdef CONFIG_PROC_FS
2061 int tcp4_proc_init(void);
2062 void tcp4_proc_exit(void);
2063 #endif
2064 
2065 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2066 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2067 		     const struct tcp_request_sock_ops *af_ops,
2068 		     struct sock *sk, struct sk_buff *skb);
2069 
2070 /* TCP af-specific functions */
2071 struct tcp_sock_af_ops {
2072 #ifdef CONFIG_TCP_MD5SIG
2073 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2074 						const struct sock *addr_sk);
2075 	int		(*calc_md5_hash)(char *location,
2076 					 const struct tcp_md5sig_key *md5,
2077 					 const struct sock *sk,
2078 					 const struct sk_buff *skb);
2079 	int		(*md5_parse)(struct sock *sk,
2080 				     int optname,
2081 				     sockptr_t optval,
2082 				     int optlen);
2083 #endif
2084 };
2085 
2086 struct tcp_request_sock_ops {
2087 	u16 mss_clamp;
2088 #ifdef CONFIG_TCP_MD5SIG
2089 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2090 						 const struct sock *addr_sk);
2091 	int		(*calc_md5_hash) (char *location,
2092 					  const struct tcp_md5sig_key *md5,
2093 					  const struct sock *sk,
2094 					  const struct sk_buff *skb);
2095 #endif
2096 #ifdef CONFIG_SYN_COOKIES
2097 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2098 				 __u16 *mss);
2099 #endif
2100 	struct dst_entry *(*route_req)(const struct sock *sk,
2101 				       struct sk_buff *skb,
2102 				       struct flowi *fl,
2103 				       struct request_sock *req);
2104 	u32 (*init_seq)(const struct sk_buff *skb);
2105 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2106 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2107 			   struct flowi *fl, struct request_sock *req,
2108 			   struct tcp_fastopen_cookie *foc,
2109 			   enum tcp_synack_type synack_type,
2110 			   struct sk_buff *syn_skb);
2111 };
2112 
2113 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2114 #if IS_ENABLED(CONFIG_IPV6)
2115 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2116 #endif
2117 
2118 #ifdef CONFIG_SYN_COOKIES
2119 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2120 					 const struct sock *sk, struct sk_buff *skb,
2121 					 __u16 *mss)
2122 {
2123 	tcp_synq_overflow(sk);
2124 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2125 	return ops->cookie_init_seq(skb, mss);
2126 }
2127 #else
2128 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2129 					 const struct sock *sk, struct sk_buff *skb,
2130 					 __u16 *mss)
2131 {
2132 	return 0;
2133 }
2134 #endif
2135 
2136 int tcpv4_offload_init(void);
2137 
2138 void tcp_v4_init(void);
2139 void tcp_init(void);
2140 
2141 /* tcp_recovery.c */
2142 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2143 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2144 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2145 				u32 reo_wnd);
2146 extern bool tcp_rack_mark_lost(struct sock *sk);
2147 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2148 			     u64 xmit_time);
2149 extern void tcp_rack_reo_timeout(struct sock *sk);
2150 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2151 
2152 /* tcp_plb.c */
2153 
2154 /*
2155  * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2156  * expects cong_ratio which represents fraction of traffic that experienced
2157  * congestion over a single RTT. In order to avoid floating point operations,
2158  * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2159  */
2160 #define TCP_PLB_SCALE 8
2161 
2162 /* State for PLB (Protective Load Balancing) for a single TCP connection. */
2163 struct tcp_plb_state {
2164 	u8	consec_cong_rounds:5, /* consecutive congested rounds */
2165 		unused:3;
2166 	u32	pause_until; /* jiffies32 when PLB can resume rerouting */
2167 };
2168 
2169 static inline void tcp_plb_init(const struct sock *sk,
2170 				struct tcp_plb_state *plb)
2171 {
2172 	plb->consec_cong_rounds = 0;
2173 	plb->pause_until = 0;
2174 }
2175 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2176 			  const int cong_ratio);
2177 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2178 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2179 
2180 /* At how many usecs into the future should the RTO fire? */
2181 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2182 {
2183 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2184 	u32 rto = inet_csk(sk)->icsk_rto;
2185 	u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2186 
2187 	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2188 }
2189 
2190 /*
2191  * Save and compile IPv4 options, return a pointer to it
2192  */
2193 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2194 							 struct sk_buff *skb)
2195 {
2196 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2197 	struct ip_options_rcu *dopt = NULL;
2198 
2199 	if (opt->optlen) {
2200 		int opt_size = sizeof(*dopt) + opt->optlen;
2201 
2202 		dopt = kmalloc(opt_size, GFP_ATOMIC);
2203 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2204 			kfree(dopt);
2205 			dopt = NULL;
2206 		}
2207 	}
2208 	return dopt;
2209 }
2210 
2211 /* locally generated TCP pure ACKs have skb->truesize == 2
2212  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2213  * This is much faster than dissecting the packet to find out.
2214  * (Think of GRE encapsulations, IPv4, IPv6, ...)
2215  */
2216 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2217 {
2218 	return skb->truesize == 2;
2219 }
2220 
2221 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2222 {
2223 	skb->truesize = 2;
2224 }
2225 
2226 static inline int tcp_inq(struct sock *sk)
2227 {
2228 	struct tcp_sock *tp = tcp_sk(sk);
2229 	int answ;
2230 
2231 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2232 		answ = 0;
2233 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2234 		   !tp->urg_data ||
2235 		   before(tp->urg_seq, tp->copied_seq) ||
2236 		   !before(tp->urg_seq, tp->rcv_nxt)) {
2237 
2238 		answ = tp->rcv_nxt - tp->copied_seq;
2239 
2240 		/* Subtract 1, if FIN was received */
2241 		if (answ && sock_flag(sk, SOCK_DONE))
2242 			answ--;
2243 	} else {
2244 		answ = tp->urg_seq - tp->copied_seq;
2245 	}
2246 
2247 	return answ;
2248 }
2249 
2250 int tcp_peek_len(struct socket *sock);
2251 
2252 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2253 {
2254 	u16 segs_in;
2255 
2256 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2257 
2258 	/* We update these fields while other threads might
2259 	 * read them from tcp_get_info()
2260 	 */
2261 	WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2262 	if (skb->len > tcp_hdrlen(skb))
2263 		WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2264 }
2265 
2266 /*
2267  * TCP listen path runs lockless.
2268  * We forced "struct sock" to be const qualified to make sure
2269  * we don't modify one of its field by mistake.
2270  * Here, we increment sk_drops which is an atomic_t, so we can safely
2271  * make sock writable again.
2272  */
2273 static inline void tcp_listendrop(const struct sock *sk)
2274 {
2275 	atomic_inc(&((struct sock *)sk)->sk_drops);
2276 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2277 }
2278 
2279 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2280 
2281 /*
2282  * Interface for adding Upper Level Protocols over TCP
2283  */
2284 
2285 #define TCP_ULP_NAME_MAX	16
2286 #define TCP_ULP_MAX		128
2287 #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2288 
2289 struct tcp_ulp_ops {
2290 	struct list_head	list;
2291 
2292 	/* initialize ulp */
2293 	int (*init)(struct sock *sk);
2294 	/* update ulp */
2295 	void (*update)(struct sock *sk, struct proto *p,
2296 		       void (*write_space)(struct sock *sk));
2297 	/* cleanup ulp */
2298 	void (*release)(struct sock *sk);
2299 	/* diagnostic */
2300 	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2301 	size_t (*get_info_size)(const struct sock *sk);
2302 	/* clone ulp */
2303 	void (*clone)(const struct request_sock *req, struct sock *newsk,
2304 		      const gfp_t priority);
2305 
2306 	char		name[TCP_ULP_NAME_MAX];
2307 	struct module	*owner;
2308 };
2309 int tcp_register_ulp(struct tcp_ulp_ops *type);
2310 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2311 int tcp_set_ulp(struct sock *sk, const char *name);
2312 void tcp_get_available_ulp(char *buf, size_t len);
2313 void tcp_cleanup_ulp(struct sock *sk);
2314 void tcp_update_ulp(struct sock *sk, struct proto *p,
2315 		    void (*write_space)(struct sock *sk));
2316 
2317 #define MODULE_ALIAS_TCP_ULP(name)				\
2318 	__MODULE_INFO(alias, alias_userspace, name);		\
2319 	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2320 
2321 #ifdef CONFIG_NET_SOCK_MSG
2322 struct sk_msg;
2323 struct sk_psock;
2324 
2325 #ifdef CONFIG_BPF_SYSCALL
2326 struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2327 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2328 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2329 #endif /* CONFIG_BPF_SYSCALL */
2330 
2331 #ifdef CONFIG_INET
2332 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2333 #else
2334 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2335 {
2336 }
2337 #endif
2338 
2339 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2340 			  struct sk_msg *msg, u32 bytes, int flags);
2341 #endif /* CONFIG_NET_SOCK_MSG */
2342 
2343 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2344 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2345 {
2346 }
2347 #endif
2348 
2349 #ifdef CONFIG_CGROUP_BPF
2350 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2351 				      struct sk_buff *skb,
2352 				      unsigned int end_offset)
2353 {
2354 	skops->skb = skb;
2355 	skops->skb_data_end = skb->data + end_offset;
2356 }
2357 #else
2358 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2359 				      struct sk_buff *skb,
2360 				      unsigned int end_offset)
2361 {
2362 }
2363 #endif
2364 
2365 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2366  * is < 0, then the BPF op failed (for example if the loaded BPF
2367  * program does not support the chosen operation or there is no BPF
2368  * program loaded).
2369  */
2370 #ifdef CONFIG_BPF
2371 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2372 {
2373 	struct bpf_sock_ops_kern sock_ops;
2374 	int ret;
2375 
2376 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2377 	if (sk_fullsock(sk)) {
2378 		sock_ops.is_fullsock = 1;
2379 		sock_owned_by_me(sk);
2380 	}
2381 
2382 	sock_ops.sk = sk;
2383 	sock_ops.op = op;
2384 	if (nargs > 0)
2385 		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2386 
2387 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2388 	if (ret == 0)
2389 		ret = sock_ops.reply;
2390 	else
2391 		ret = -1;
2392 	return ret;
2393 }
2394 
2395 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2396 {
2397 	u32 args[2] = {arg1, arg2};
2398 
2399 	return tcp_call_bpf(sk, op, 2, args);
2400 }
2401 
2402 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2403 				    u32 arg3)
2404 {
2405 	u32 args[3] = {arg1, arg2, arg3};
2406 
2407 	return tcp_call_bpf(sk, op, 3, args);
2408 }
2409 
2410 #else
2411 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2412 {
2413 	return -EPERM;
2414 }
2415 
2416 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2417 {
2418 	return -EPERM;
2419 }
2420 
2421 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2422 				    u32 arg3)
2423 {
2424 	return -EPERM;
2425 }
2426 
2427 #endif
2428 
2429 static inline u32 tcp_timeout_init(struct sock *sk)
2430 {
2431 	int timeout;
2432 
2433 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2434 
2435 	if (timeout <= 0)
2436 		timeout = TCP_TIMEOUT_INIT;
2437 	return min_t(int, timeout, TCP_RTO_MAX);
2438 }
2439 
2440 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2441 {
2442 	int rwnd;
2443 
2444 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2445 
2446 	if (rwnd < 0)
2447 		rwnd = 0;
2448 	return rwnd;
2449 }
2450 
2451 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2452 {
2453 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2454 }
2455 
2456 static inline void tcp_bpf_rtt(struct sock *sk)
2457 {
2458 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2459 		tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2460 }
2461 
2462 #if IS_ENABLED(CONFIG_SMC)
2463 extern struct static_key_false tcp_have_smc;
2464 #endif
2465 
2466 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2467 void clean_acked_data_enable(struct inet_connection_sock *icsk,
2468 			     void (*cad)(struct sock *sk, u32 ack_seq));
2469 void clean_acked_data_disable(struct inet_connection_sock *icsk);
2470 void clean_acked_data_flush(void);
2471 #endif
2472 
2473 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2474 static inline void tcp_add_tx_delay(struct sk_buff *skb,
2475 				    const struct tcp_sock *tp)
2476 {
2477 	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2478 		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2479 }
2480 
2481 /* Compute Earliest Departure Time for some control packets
2482  * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2483  */
2484 static inline u64 tcp_transmit_time(const struct sock *sk)
2485 {
2486 	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2487 		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2488 			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2489 
2490 		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2491 	}
2492 	return 0;
2493 }
2494 
2495 #endif	/* _TCP_H */
2496