xref: /openbmc/linux/include/net/tcp.h (revision 2d594783)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the TCP module.
8  *
9  * Version:	@(#)tcp.h	1.0.5	05/23/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  */
14 #ifndef _TCP_H
15 #define _TCP_H
16 
17 #define FASTRETRANS_DEBUG 1
18 
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 
30 #include <net/inet_connection_sock.h>
31 #include <net/inet_timewait_sock.h>
32 #include <net/inet_hashtables.h>
33 #include <net/checksum.h>
34 #include <net/request_sock.h>
35 #include <net/sock_reuseport.h>
36 #include <net/sock.h>
37 #include <net/snmp.h>
38 #include <net/ip.h>
39 #include <net/tcp_states.h>
40 #include <net/inet_ecn.h>
41 #include <net/dst.h>
42 #include <net/mptcp.h>
43 
44 #include <linux/seq_file.h>
45 #include <linux/memcontrol.h>
46 #include <linux/bpf-cgroup.h>
47 #include <linux/siphash.h>
48 #include <linux/net_mm.h>
49 
50 extern struct inet_hashinfo tcp_hashinfo;
51 
52 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
53 int tcp_orphan_count_sum(void);
54 
55 void tcp_time_wait(struct sock *sk, int state, int timeo);
56 
57 #define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
58 #define MAX_TCP_OPTION_SPACE 40
59 #define TCP_MIN_SND_MSS		48
60 #define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
61 
62 /*
63  * Never offer a window over 32767 without using window scaling. Some
64  * poor stacks do signed 16bit maths!
65  */
66 #define MAX_TCP_WINDOW		32767U
67 
68 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
69 #define TCP_MIN_MSS		88U
70 
71 /* The initial MTU to use for probing */
72 #define TCP_BASE_MSS		1024
73 
74 /* probing interval, default to 10 minutes as per RFC4821 */
75 #define TCP_PROBE_INTERVAL	600
76 
77 /* Specify interval when tcp mtu probing will stop */
78 #define TCP_PROBE_THRESHOLD	8
79 
80 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
81 #define TCP_FASTRETRANS_THRESH 3
82 
83 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
84 #define TCP_MAX_QUICKACKS	16U
85 
86 /* Maximal number of window scale according to RFC1323 */
87 #define TCP_MAX_WSCALE		14U
88 
89 /* urg_data states */
90 #define TCP_URG_VALID	0x0100
91 #define TCP_URG_NOTYET	0x0200
92 #define TCP_URG_READ	0x0400
93 
94 #define TCP_RETR1	3	/*
95 				 * This is how many retries it does before it
96 				 * tries to figure out if the gateway is
97 				 * down. Minimal RFC value is 3; it corresponds
98 				 * to ~3sec-8min depending on RTO.
99 				 */
100 
101 #define TCP_RETR2	15	/*
102 				 * This should take at least
103 				 * 90 minutes to time out.
104 				 * RFC1122 says that the limit is 100 sec.
105 				 * 15 is ~13-30min depending on RTO.
106 				 */
107 
108 #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
109 				 * when active opening a connection.
110 				 * RFC1122 says the minimum retry MUST
111 				 * be at least 180secs.  Nevertheless
112 				 * this value is corresponding to
113 				 * 63secs of retransmission with the
114 				 * current initial RTO.
115 				 */
116 
117 #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
118 				 * when passive opening a connection.
119 				 * This is corresponding to 31secs of
120 				 * retransmission with the current
121 				 * initial RTO.
122 				 */
123 
124 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
125 				  * state, about 60 seconds	*/
126 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
127                                  /* BSD style FIN_WAIT2 deadlock breaker.
128 				  * It used to be 3min, new value is 60sec,
129 				  * to combine FIN-WAIT-2 timeout with
130 				  * TIME-WAIT timer.
131 				  */
132 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
133 
134 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
135 #if HZ >= 100
136 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
137 #define TCP_ATO_MIN	((unsigned)(HZ/25))
138 #else
139 #define TCP_DELACK_MIN	4U
140 #define TCP_ATO_MIN	4U
141 #endif
142 #define TCP_RTO_MAX	((unsigned)(120*HZ))
143 #define TCP_RTO_MIN	((unsigned)(HZ/5))
144 #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
145 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
146 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
147 						 * used as a fallback RTO for the
148 						 * initial data transmission if no
149 						 * valid RTT sample has been acquired,
150 						 * most likely due to retrans in 3WHS.
151 						 */
152 
153 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
154 					                 * for local resources.
155 					                 */
156 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
157 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
158 #define TCP_KEEPALIVE_INTVL	(75*HZ)
159 
160 #define MAX_TCP_KEEPIDLE	32767
161 #define MAX_TCP_KEEPINTVL	32767
162 #define MAX_TCP_KEEPCNT		127
163 #define MAX_TCP_SYNCNT		127
164 
165 #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
166 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
167 					 * after this time. It should be equal
168 					 * (or greater than) TCP_TIMEWAIT_LEN
169 					 * to provide reliability equal to one
170 					 * provided by timewait state.
171 					 */
172 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
173 					 * timestamps. It must be less than
174 					 * minimal timewait lifetime.
175 					 */
176 /*
177  *	TCP option
178  */
179 
180 #define TCPOPT_NOP		1	/* Padding */
181 #define TCPOPT_EOL		0	/* End of options */
182 #define TCPOPT_MSS		2	/* Segment size negotiating */
183 #define TCPOPT_WINDOW		3	/* Window scaling */
184 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
185 #define TCPOPT_SACK             5       /* SACK Block */
186 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
187 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
188 #define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
189 #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
190 #define TCPOPT_EXP		254	/* Experimental */
191 /* Magic number to be after the option value for sharing TCP
192  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
193  */
194 #define TCPOPT_FASTOPEN_MAGIC	0xF989
195 #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
196 
197 /*
198  *     TCP option lengths
199  */
200 
201 #define TCPOLEN_MSS            4
202 #define TCPOLEN_WINDOW         3
203 #define TCPOLEN_SACK_PERM      2
204 #define TCPOLEN_TIMESTAMP      10
205 #define TCPOLEN_MD5SIG         18
206 #define TCPOLEN_FASTOPEN_BASE  2
207 #define TCPOLEN_EXP_FASTOPEN_BASE  4
208 #define TCPOLEN_EXP_SMC_BASE   6
209 
210 /* But this is what stacks really send out. */
211 #define TCPOLEN_TSTAMP_ALIGNED		12
212 #define TCPOLEN_WSCALE_ALIGNED		4
213 #define TCPOLEN_SACKPERM_ALIGNED	4
214 #define TCPOLEN_SACK_BASE		2
215 #define TCPOLEN_SACK_BASE_ALIGNED	4
216 #define TCPOLEN_SACK_PERBLOCK		8
217 #define TCPOLEN_MD5SIG_ALIGNED		20
218 #define TCPOLEN_MSS_ALIGNED		4
219 #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
220 
221 /* Flags in tp->nonagle */
222 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
223 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
224 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
225 
226 /* TCP thin-stream limits */
227 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
228 
229 /* TCP initial congestion window as per rfc6928 */
230 #define TCP_INIT_CWND		10
231 
232 /* Bit Flags for sysctl_tcp_fastopen */
233 #define	TFO_CLIENT_ENABLE	1
234 #define	TFO_SERVER_ENABLE	2
235 #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
236 
237 /* Accept SYN data w/o any cookie option */
238 #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
239 
240 /* Force enable TFO on all listeners, i.e., not requiring the
241  * TCP_FASTOPEN socket option.
242  */
243 #define	TFO_SERVER_WO_SOCKOPT1	0x400
244 
245 
246 /* sysctl variables for tcp */
247 extern int sysctl_tcp_max_orphans;
248 extern long sysctl_tcp_mem[3];
249 
250 #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
251 #define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
252 #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
253 
254 extern atomic_long_t tcp_memory_allocated;
255 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
256 
257 extern struct percpu_counter tcp_sockets_allocated;
258 extern unsigned long tcp_memory_pressure;
259 
260 /* optimized version of sk_under_memory_pressure() for TCP sockets */
261 static inline bool tcp_under_memory_pressure(const struct sock *sk)
262 {
263 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
264 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
265 		return true;
266 
267 	return READ_ONCE(tcp_memory_pressure);
268 }
269 /*
270  * The next routines deal with comparing 32 bit unsigned ints
271  * and worry about wraparound (automatic with unsigned arithmetic).
272  */
273 
274 static inline bool before(__u32 seq1, __u32 seq2)
275 {
276         return (__s32)(seq1-seq2) < 0;
277 }
278 #define after(seq2, seq1) 	before(seq1, seq2)
279 
280 /* is s2<=s1<=s3 ? */
281 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
282 {
283 	return seq3 - seq2 >= seq1 - seq2;
284 }
285 
286 static inline bool tcp_out_of_memory(struct sock *sk)
287 {
288 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
289 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
290 		return true;
291 	return false;
292 }
293 
294 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
295 {
296 	sk_wmem_queued_add(sk, -skb->truesize);
297 	if (!skb_zcopy_pure(skb))
298 		sk_mem_uncharge(sk, skb->truesize);
299 	else
300 		sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
301 	__kfree_skb(skb);
302 }
303 
304 void sk_forced_mem_schedule(struct sock *sk, int size);
305 
306 bool tcp_check_oom(struct sock *sk, int shift);
307 
308 
309 extern struct proto tcp_prot;
310 
311 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
312 #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
313 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
314 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
315 
316 void tcp_tasklet_init(void);
317 
318 int tcp_v4_err(struct sk_buff *skb, u32);
319 
320 void tcp_shutdown(struct sock *sk, int how);
321 
322 int tcp_v4_early_demux(struct sk_buff *skb);
323 int tcp_v4_rcv(struct sk_buff *skb);
324 
325 void tcp_remove_empty_skb(struct sock *sk);
326 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
327 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
328 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
329 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
330 			 size_t size, struct ubuf_info *uarg);
331 void tcp_splice_eof(struct socket *sock);
332 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
333 int tcp_wmem_schedule(struct sock *sk, int copy);
334 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
335 	      int size_goal);
336 void tcp_release_cb(struct sock *sk);
337 void tcp_wfree(struct sk_buff *skb);
338 void tcp_write_timer_handler(struct sock *sk);
339 void tcp_delack_timer_handler(struct sock *sk);
340 int tcp_ioctl(struct sock *sk, int cmd, int *karg);
341 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
342 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
343 void tcp_rcv_space_adjust(struct sock *sk);
344 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
345 void tcp_twsk_destructor(struct sock *sk);
346 void tcp_twsk_purge(struct list_head *net_exit_list, int family);
347 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
348 			struct pipe_inode_info *pipe, size_t len,
349 			unsigned int flags);
350 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
351 				     bool force_schedule);
352 
353 static inline void tcp_dec_quickack_mode(struct sock *sk,
354 					 const unsigned int pkts)
355 {
356 	struct inet_connection_sock *icsk = inet_csk(sk);
357 
358 	if (icsk->icsk_ack.quick) {
359 		if (pkts >= icsk->icsk_ack.quick) {
360 			icsk->icsk_ack.quick = 0;
361 			/* Leaving quickack mode we deflate ATO. */
362 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
363 		} else
364 			icsk->icsk_ack.quick -= pkts;
365 	}
366 }
367 
368 #define	TCP_ECN_OK		1
369 #define	TCP_ECN_QUEUE_CWR	2
370 #define	TCP_ECN_DEMAND_CWR	4
371 #define	TCP_ECN_SEEN		8
372 
373 enum tcp_tw_status {
374 	TCP_TW_SUCCESS = 0,
375 	TCP_TW_RST = 1,
376 	TCP_TW_ACK = 2,
377 	TCP_TW_SYN = 3
378 };
379 
380 
381 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
382 					      struct sk_buff *skb,
383 					      const struct tcphdr *th);
384 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
385 			   struct request_sock *req, bool fastopen,
386 			   bool *lost_race);
387 int tcp_child_process(struct sock *parent, struct sock *child,
388 		      struct sk_buff *skb);
389 void tcp_enter_loss(struct sock *sk);
390 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
391 void tcp_clear_retrans(struct tcp_sock *tp);
392 void tcp_update_metrics(struct sock *sk);
393 void tcp_init_metrics(struct sock *sk);
394 void tcp_metrics_init(void);
395 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
396 void __tcp_close(struct sock *sk, long timeout);
397 void tcp_close(struct sock *sk, long timeout);
398 void tcp_init_sock(struct sock *sk);
399 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
400 __poll_t tcp_poll(struct file *file, struct socket *sock,
401 		      struct poll_table_struct *wait);
402 int do_tcp_getsockopt(struct sock *sk, int level,
403 		      int optname, sockptr_t optval, sockptr_t optlen);
404 int tcp_getsockopt(struct sock *sk, int level, int optname,
405 		   char __user *optval, int __user *optlen);
406 bool tcp_bpf_bypass_getsockopt(int level, int optname);
407 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
408 		      sockptr_t optval, unsigned int optlen);
409 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
410 		   unsigned int optlen);
411 void tcp_set_keepalive(struct sock *sk, int val);
412 void tcp_syn_ack_timeout(const struct request_sock *req);
413 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
414 		int flags, int *addr_len);
415 int tcp_set_rcvlowat(struct sock *sk, int val);
416 int tcp_set_window_clamp(struct sock *sk, int val);
417 void tcp_update_recv_tstamps(struct sk_buff *skb,
418 			     struct scm_timestamping_internal *tss);
419 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
420 			struct scm_timestamping_internal *tss);
421 void tcp_data_ready(struct sock *sk);
422 #ifdef CONFIG_MMU
423 int tcp_mmap(struct file *file, struct socket *sock,
424 	     struct vm_area_struct *vma);
425 #endif
426 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
427 		       struct tcp_options_received *opt_rx,
428 		       int estab, struct tcp_fastopen_cookie *foc);
429 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
430 
431 /*
432  *	BPF SKB-less helpers
433  */
434 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
435 			 struct tcphdr *th, u32 *cookie);
436 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
437 			 struct tcphdr *th, u32 *cookie);
438 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
439 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
440 			  const struct tcp_request_sock_ops *af_ops,
441 			  struct sock *sk, struct tcphdr *th);
442 /*
443  *	TCP v4 functions exported for the inet6 API
444  */
445 
446 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
447 void tcp_v4_mtu_reduced(struct sock *sk);
448 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
449 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
450 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
451 struct sock *tcp_create_openreq_child(const struct sock *sk,
452 				      struct request_sock *req,
453 				      struct sk_buff *skb);
454 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
455 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
456 				  struct request_sock *req,
457 				  struct dst_entry *dst,
458 				  struct request_sock *req_unhash,
459 				  bool *own_req);
460 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
461 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
462 int tcp_connect(struct sock *sk);
463 enum tcp_synack_type {
464 	TCP_SYNACK_NORMAL,
465 	TCP_SYNACK_FASTOPEN,
466 	TCP_SYNACK_COOKIE,
467 };
468 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
469 				struct request_sock *req,
470 				struct tcp_fastopen_cookie *foc,
471 				enum tcp_synack_type synack_type,
472 				struct sk_buff *syn_skb);
473 int tcp_disconnect(struct sock *sk, int flags);
474 
475 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
476 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
477 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
478 
479 /* From syncookies.c */
480 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
481 				 struct request_sock *req,
482 				 struct dst_entry *dst, u32 tsoff);
483 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
484 		      u32 cookie);
485 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
486 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
487 					    const struct tcp_request_sock_ops *af_ops,
488 					    struct sock *sk, struct sk_buff *skb);
489 #ifdef CONFIG_SYN_COOKIES
490 
491 /* Syncookies use a monotonic timer which increments every 60 seconds.
492  * This counter is used both as a hash input and partially encoded into
493  * the cookie value.  A cookie is only validated further if the delta
494  * between the current counter value and the encoded one is less than this,
495  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
496  * the counter advances immediately after a cookie is generated).
497  */
498 #define MAX_SYNCOOKIE_AGE	2
499 #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
500 #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
501 
502 /* syncookies: remember time of last synqueue overflow
503  * But do not dirty this field too often (once per second is enough)
504  * It is racy as we do not hold a lock, but race is very minor.
505  */
506 static inline void tcp_synq_overflow(const struct sock *sk)
507 {
508 	unsigned int last_overflow;
509 	unsigned int now = jiffies;
510 
511 	if (sk->sk_reuseport) {
512 		struct sock_reuseport *reuse;
513 
514 		reuse = rcu_dereference(sk->sk_reuseport_cb);
515 		if (likely(reuse)) {
516 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
517 			if (!time_between32(now, last_overflow,
518 					    last_overflow + HZ))
519 				WRITE_ONCE(reuse->synq_overflow_ts, now);
520 			return;
521 		}
522 	}
523 
524 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
525 	if (!time_between32(now, last_overflow, last_overflow + HZ))
526 		WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
527 }
528 
529 /* syncookies: no recent synqueue overflow on this listening socket? */
530 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
531 {
532 	unsigned int last_overflow;
533 	unsigned int now = jiffies;
534 
535 	if (sk->sk_reuseport) {
536 		struct sock_reuseport *reuse;
537 
538 		reuse = rcu_dereference(sk->sk_reuseport_cb);
539 		if (likely(reuse)) {
540 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
541 			return !time_between32(now, last_overflow - HZ,
542 					       last_overflow +
543 					       TCP_SYNCOOKIE_VALID);
544 		}
545 	}
546 
547 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
548 
549 	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
550 	 * then we're under synflood. However, we have to use
551 	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
552 	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
553 	 * jiffies but before we store .ts_recent_stamp into last_overflow,
554 	 * which could lead to rejecting a valid syncookie.
555 	 */
556 	return !time_between32(now, last_overflow - HZ,
557 			       last_overflow + TCP_SYNCOOKIE_VALID);
558 }
559 
560 static inline u32 tcp_cookie_time(void)
561 {
562 	u64 val = get_jiffies_64();
563 
564 	do_div(val, TCP_SYNCOOKIE_PERIOD);
565 	return val;
566 }
567 
568 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
569 			      u16 *mssp);
570 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
571 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
572 bool cookie_timestamp_decode(const struct net *net,
573 			     struct tcp_options_received *opt);
574 bool cookie_ecn_ok(const struct tcp_options_received *opt,
575 		   const struct net *net, const struct dst_entry *dst);
576 
577 /* From net/ipv6/syncookies.c */
578 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
579 		      u32 cookie);
580 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
581 
582 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
583 			      const struct tcphdr *th, u16 *mssp);
584 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
585 #endif
586 /* tcp_output.c */
587 
588 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
589 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
590 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
591 			       int nonagle);
592 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
593 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
594 void tcp_retransmit_timer(struct sock *sk);
595 void tcp_xmit_retransmit_queue(struct sock *);
596 void tcp_simple_retransmit(struct sock *);
597 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
598 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
599 enum tcp_queue {
600 	TCP_FRAG_IN_WRITE_QUEUE,
601 	TCP_FRAG_IN_RTX_QUEUE,
602 };
603 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
604 		 struct sk_buff *skb, u32 len,
605 		 unsigned int mss_now, gfp_t gfp);
606 
607 void tcp_send_probe0(struct sock *);
608 int tcp_write_wakeup(struct sock *, int mib);
609 void tcp_send_fin(struct sock *sk);
610 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
611 int tcp_send_synack(struct sock *);
612 void tcp_push_one(struct sock *, unsigned int mss_now);
613 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
614 void tcp_send_ack(struct sock *sk);
615 void tcp_send_delayed_ack(struct sock *sk);
616 void tcp_send_loss_probe(struct sock *sk);
617 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
618 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
619 			     const struct sk_buff *next_skb);
620 
621 /* tcp_input.c */
622 void tcp_rearm_rto(struct sock *sk);
623 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
624 void tcp_reset(struct sock *sk, struct sk_buff *skb);
625 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
626 void tcp_fin(struct sock *sk);
627 void tcp_check_space(struct sock *sk);
628 void tcp_sack_compress_send_ack(struct sock *sk);
629 
630 /* tcp_timer.c */
631 void tcp_init_xmit_timers(struct sock *);
632 static inline void tcp_clear_xmit_timers(struct sock *sk)
633 {
634 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
635 		__sock_put(sk);
636 
637 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
638 		__sock_put(sk);
639 
640 	inet_csk_clear_xmit_timers(sk);
641 }
642 
643 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
644 unsigned int tcp_current_mss(struct sock *sk);
645 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
646 
647 /* Bound MSS / TSO packet size with the half of the window */
648 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
649 {
650 	int cutoff;
651 
652 	/* When peer uses tiny windows, there is no use in packetizing
653 	 * to sub-MSS pieces for the sake of SWS or making sure there
654 	 * are enough packets in the pipe for fast recovery.
655 	 *
656 	 * On the other hand, for extremely large MSS devices, handling
657 	 * smaller than MSS windows in this way does make sense.
658 	 */
659 	if (tp->max_window > TCP_MSS_DEFAULT)
660 		cutoff = (tp->max_window >> 1);
661 	else
662 		cutoff = tp->max_window;
663 
664 	if (cutoff && pktsize > cutoff)
665 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
666 	else
667 		return pktsize;
668 }
669 
670 /* tcp.c */
671 void tcp_get_info(struct sock *, struct tcp_info *);
672 
673 /* Read 'sendfile()'-style from a TCP socket */
674 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
675 		  sk_read_actor_t recv_actor);
676 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
677 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
678 void tcp_read_done(struct sock *sk, size_t len);
679 
680 void tcp_initialize_rcv_mss(struct sock *sk);
681 
682 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
683 int tcp_mss_to_mtu(struct sock *sk, int mss);
684 void tcp_mtup_init(struct sock *sk);
685 
686 static inline void tcp_bound_rto(const struct sock *sk)
687 {
688 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
689 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
690 }
691 
692 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
693 {
694 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
695 }
696 
697 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
698 {
699 	/* mptcp hooks are only on the slow path */
700 	if (sk_is_mptcp((struct sock *)tp))
701 		return;
702 
703 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
704 			       ntohl(TCP_FLAG_ACK) |
705 			       snd_wnd);
706 }
707 
708 static inline void tcp_fast_path_on(struct tcp_sock *tp)
709 {
710 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
711 }
712 
713 static inline void tcp_fast_path_check(struct sock *sk)
714 {
715 	struct tcp_sock *tp = tcp_sk(sk);
716 
717 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
718 	    tp->rcv_wnd &&
719 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
720 	    !tp->urg_data)
721 		tcp_fast_path_on(tp);
722 }
723 
724 /* Compute the actual rto_min value */
725 static inline u32 tcp_rto_min(struct sock *sk)
726 {
727 	const struct dst_entry *dst = __sk_dst_get(sk);
728 	u32 rto_min = inet_csk(sk)->icsk_rto_min;
729 
730 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
731 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
732 	return rto_min;
733 }
734 
735 static inline u32 tcp_rto_min_us(struct sock *sk)
736 {
737 	return jiffies_to_usecs(tcp_rto_min(sk));
738 }
739 
740 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
741 {
742 	return dst_metric_locked(dst, RTAX_CC_ALGO);
743 }
744 
745 /* Minimum RTT in usec. ~0 means not available. */
746 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
747 {
748 	return minmax_get(&tp->rtt_min);
749 }
750 
751 /* Compute the actual receive window we are currently advertising.
752  * Rcv_nxt can be after the window if our peer push more data
753  * than the offered window.
754  */
755 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
756 {
757 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
758 
759 	if (win < 0)
760 		win = 0;
761 	return (u32) win;
762 }
763 
764 /* Choose a new window, without checks for shrinking, and without
765  * scaling applied to the result.  The caller does these things
766  * if necessary.  This is a "raw" window selection.
767  */
768 u32 __tcp_select_window(struct sock *sk);
769 
770 void tcp_send_window_probe(struct sock *sk);
771 
772 /* TCP uses 32bit jiffies to save some space.
773  * Note that this is different from tcp_time_stamp, which
774  * historically has been the same until linux-4.13.
775  */
776 #define tcp_jiffies32 ((u32)jiffies)
777 
778 /*
779  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
780  * It is no longer tied to jiffies, but to 1 ms clock.
781  * Note: double check if you want to use tcp_jiffies32 instead of this.
782  */
783 #define TCP_TS_HZ	1000
784 
785 static inline u64 tcp_clock_ns(void)
786 {
787 	return ktime_get_ns();
788 }
789 
790 static inline u64 tcp_clock_us(void)
791 {
792 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
793 }
794 
795 /* This should only be used in contexts where tp->tcp_mstamp is up to date */
796 static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
797 {
798 	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
799 }
800 
801 /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
802 static inline u32 tcp_ns_to_ts(u64 ns)
803 {
804 	return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
805 }
806 
807 /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
808 static inline u32 tcp_time_stamp_raw(void)
809 {
810 	return tcp_ns_to_ts(tcp_clock_ns());
811 }
812 
813 void tcp_mstamp_refresh(struct tcp_sock *tp);
814 
815 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
816 {
817 	return max_t(s64, t1 - t0, 0);
818 }
819 
820 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
821 {
822 	return tcp_ns_to_ts(skb->skb_mstamp_ns);
823 }
824 
825 /* provide the departure time in us unit */
826 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
827 {
828 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
829 }
830 
831 
832 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
833 
834 #define TCPHDR_FIN 0x01
835 #define TCPHDR_SYN 0x02
836 #define TCPHDR_RST 0x04
837 #define TCPHDR_PSH 0x08
838 #define TCPHDR_ACK 0x10
839 #define TCPHDR_URG 0x20
840 #define TCPHDR_ECE 0x40
841 #define TCPHDR_CWR 0x80
842 
843 #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
844 
845 /* This is what the send packet queuing engine uses to pass
846  * TCP per-packet control information to the transmission code.
847  * We also store the host-order sequence numbers in here too.
848  * This is 44 bytes if IPV6 is enabled.
849  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
850  */
851 struct tcp_skb_cb {
852 	__u32		seq;		/* Starting sequence number	*/
853 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
854 	union {
855 		/* Note : tcp_tw_isn is used in input path only
856 		 *	  (isn chosen by tcp_timewait_state_process())
857 		 *
858 		 * 	  tcp_gso_segs/size are used in write queue only,
859 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
860 		 */
861 		__u32		tcp_tw_isn;
862 		struct {
863 			u16	tcp_gso_segs;
864 			u16	tcp_gso_size;
865 		};
866 	};
867 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
868 
869 	__u8		sacked;		/* State flags for SACK.	*/
870 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
871 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
872 #define TCPCB_LOST		0x04	/* SKB is lost			*/
873 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
874 #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp_ns)	*/
875 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
876 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
877 				TCPCB_REPAIRED)
878 
879 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
880 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
881 			eor:1,		/* Is skb MSG_EOR marked? */
882 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
883 			unused:5;
884 	__u32		ack_seq;	/* Sequence number ACK'd	*/
885 	union {
886 		struct {
887 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
888 			/* There is space for up to 24 bytes */
889 			__u32 is_app_limited:1, /* cwnd not fully used? */
890 			      delivered_ce:20,
891 			      unused:11;
892 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
893 			__u32 delivered;
894 			/* start of send pipeline phase */
895 			u64 first_tx_mstamp;
896 			/* when we reached the "delivered" count */
897 			u64 delivered_mstamp;
898 		} tx;   /* only used for outgoing skbs */
899 		union {
900 			struct inet_skb_parm	h4;
901 #if IS_ENABLED(CONFIG_IPV6)
902 			struct inet6_skb_parm	h6;
903 #endif
904 		} header;	/* For incoming skbs */
905 	};
906 };
907 
908 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
909 
910 extern const struct inet_connection_sock_af_ops ipv4_specific;
911 
912 #if IS_ENABLED(CONFIG_IPV6)
913 /* This is the variant of inet6_iif() that must be used by TCP,
914  * as TCP moves IP6CB into a different location in skb->cb[]
915  */
916 static inline int tcp_v6_iif(const struct sk_buff *skb)
917 {
918 	return TCP_SKB_CB(skb)->header.h6.iif;
919 }
920 
921 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
922 {
923 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
924 
925 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
926 }
927 
928 /* TCP_SKB_CB reference means this can not be used from early demux */
929 static inline int tcp_v6_sdif(const struct sk_buff *skb)
930 {
931 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
932 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
933 		return TCP_SKB_CB(skb)->header.h6.iif;
934 #endif
935 	return 0;
936 }
937 
938 extern const struct inet_connection_sock_af_ops ipv6_specific;
939 
940 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
941 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
942 void tcp_v6_early_demux(struct sk_buff *skb);
943 
944 #endif
945 
946 /* TCP_SKB_CB reference means this can not be used from early demux */
947 static inline int tcp_v4_sdif(struct sk_buff *skb)
948 {
949 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
950 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
951 		return TCP_SKB_CB(skb)->header.h4.iif;
952 #endif
953 	return 0;
954 }
955 
956 /* Due to TSO, an SKB can be composed of multiple actual
957  * packets.  To keep these tracked properly, we use this.
958  */
959 static inline int tcp_skb_pcount(const struct sk_buff *skb)
960 {
961 	return TCP_SKB_CB(skb)->tcp_gso_segs;
962 }
963 
964 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
965 {
966 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
967 }
968 
969 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
970 {
971 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
972 }
973 
974 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
975 static inline int tcp_skb_mss(const struct sk_buff *skb)
976 {
977 	return TCP_SKB_CB(skb)->tcp_gso_size;
978 }
979 
980 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
981 {
982 	return likely(!TCP_SKB_CB(skb)->eor);
983 }
984 
985 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
986 					const struct sk_buff *from)
987 {
988 	return likely(tcp_skb_can_collapse_to(to) &&
989 		      mptcp_skb_can_collapse(to, from) &&
990 		      skb_pure_zcopy_same(to, from));
991 }
992 
993 /* Events passed to congestion control interface */
994 enum tcp_ca_event {
995 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
996 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
997 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
998 	CA_EVENT_LOSS,		/* loss timeout */
999 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
1000 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
1001 };
1002 
1003 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1004 enum tcp_ca_ack_event_flags {
1005 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1006 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1007 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1008 };
1009 
1010 /*
1011  * Interface for adding new TCP congestion control handlers
1012  */
1013 #define TCP_CA_NAME_MAX	16
1014 #define TCP_CA_MAX	128
1015 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1016 
1017 #define TCP_CA_UNSPEC	0
1018 
1019 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1020 #define TCP_CONG_NON_RESTRICTED 0x1
1021 /* Requires ECN/ECT set on all packets */
1022 #define TCP_CONG_NEEDS_ECN	0x2
1023 #define TCP_CONG_MASK	(TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1024 
1025 union tcp_cc_info;
1026 
1027 struct ack_sample {
1028 	u32 pkts_acked;
1029 	s32 rtt_us;
1030 	u32 in_flight;
1031 };
1032 
1033 /* A rate sample measures the number of (original/retransmitted) data
1034  * packets delivered "delivered" over an interval of time "interval_us".
1035  * The tcp_rate.c code fills in the rate sample, and congestion
1036  * control modules that define a cong_control function to run at the end
1037  * of ACK processing can optionally chose to consult this sample when
1038  * setting cwnd and pacing rate.
1039  * A sample is invalid if "delivered" or "interval_us" is negative.
1040  */
1041 struct rate_sample {
1042 	u64  prior_mstamp; /* starting timestamp for interval */
1043 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1044 	u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1045 	s32  delivered;		/* number of packets delivered over interval */
1046 	s32  delivered_ce;	/* number of packets delivered w/ CE marks*/
1047 	long interval_us;	/* time for tp->delivered to incr "delivered" */
1048 	u32 snd_interval_us;	/* snd interval for delivered packets */
1049 	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1050 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1051 	int  losses;		/* number of packets marked lost upon ACK */
1052 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1053 	u32  prior_in_flight;	/* in flight before this ACK */
1054 	u32  last_end_seq;	/* end_seq of most recently ACKed packet */
1055 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1056 	bool is_retrans;	/* is sample from retransmission? */
1057 	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1058 };
1059 
1060 struct tcp_congestion_ops {
1061 /* fast path fields are put first to fill one cache line */
1062 
1063 	/* return slow start threshold (required) */
1064 	u32 (*ssthresh)(struct sock *sk);
1065 
1066 	/* do new cwnd calculation (required) */
1067 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1068 
1069 	/* call before changing ca_state (optional) */
1070 	void (*set_state)(struct sock *sk, u8 new_state);
1071 
1072 	/* call when cwnd event occurs (optional) */
1073 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1074 
1075 	/* call when ack arrives (optional) */
1076 	void (*in_ack_event)(struct sock *sk, u32 flags);
1077 
1078 	/* hook for packet ack accounting (optional) */
1079 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1080 
1081 	/* override sysctl_tcp_min_tso_segs */
1082 	u32 (*min_tso_segs)(struct sock *sk);
1083 
1084 	/* call when packets are delivered to update cwnd and pacing rate,
1085 	 * after all the ca_state processing. (optional)
1086 	 */
1087 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1088 
1089 
1090 	/* new value of cwnd after loss (required) */
1091 	u32  (*undo_cwnd)(struct sock *sk);
1092 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1093 	u32 (*sndbuf_expand)(struct sock *sk);
1094 
1095 /* control/slow paths put last */
1096 	/* get info for inet_diag (optional) */
1097 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1098 			   union tcp_cc_info *info);
1099 
1100 	char 			name[TCP_CA_NAME_MAX];
1101 	struct module		*owner;
1102 	struct list_head	list;
1103 	u32			key;
1104 	u32			flags;
1105 
1106 	/* initialize private data (optional) */
1107 	void (*init)(struct sock *sk);
1108 	/* cleanup private data  (optional) */
1109 	void (*release)(struct sock *sk);
1110 } ____cacheline_aligned_in_smp;
1111 
1112 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1113 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1114 int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1115 				  struct tcp_congestion_ops *old_type);
1116 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1117 
1118 void tcp_assign_congestion_control(struct sock *sk);
1119 void tcp_init_congestion_control(struct sock *sk);
1120 void tcp_cleanup_congestion_control(struct sock *sk);
1121 int tcp_set_default_congestion_control(struct net *net, const char *name);
1122 void tcp_get_default_congestion_control(struct net *net, char *name);
1123 void tcp_get_available_congestion_control(char *buf, size_t len);
1124 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1125 int tcp_set_allowed_congestion_control(char *allowed);
1126 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1127 			       bool cap_net_admin);
1128 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1129 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1130 
1131 u32 tcp_reno_ssthresh(struct sock *sk);
1132 u32 tcp_reno_undo_cwnd(struct sock *sk);
1133 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1134 extern struct tcp_congestion_ops tcp_reno;
1135 
1136 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1137 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1138 u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1139 #ifdef CONFIG_INET
1140 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1141 #else
1142 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1143 {
1144 	return NULL;
1145 }
1146 #endif
1147 
1148 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1149 {
1150 	const struct inet_connection_sock *icsk = inet_csk(sk);
1151 
1152 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1153 }
1154 
1155 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1156 {
1157 	const struct inet_connection_sock *icsk = inet_csk(sk);
1158 
1159 	if (icsk->icsk_ca_ops->cwnd_event)
1160 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1161 }
1162 
1163 /* From tcp_cong.c */
1164 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1165 
1166 /* From tcp_rate.c */
1167 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1168 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1169 			    struct rate_sample *rs);
1170 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1171 		  bool is_sack_reneg, struct rate_sample *rs);
1172 void tcp_rate_check_app_limited(struct sock *sk);
1173 
1174 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1175 {
1176 	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1177 }
1178 
1179 /* These functions determine how the current flow behaves in respect of SACK
1180  * handling. SACK is negotiated with the peer, and therefore it can vary
1181  * between different flows.
1182  *
1183  * tcp_is_sack - SACK enabled
1184  * tcp_is_reno - No SACK
1185  */
1186 static inline int tcp_is_sack(const struct tcp_sock *tp)
1187 {
1188 	return likely(tp->rx_opt.sack_ok);
1189 }
1190 
1191 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1192 {
1193 	return !tcp_is_sack(tp);
1194 }
1195 
1196 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1197 {
1198 	return tp->sacked_out + tp->lost_out;
1199 }
1200 
1201 /* This determines how many packets are "in the network" to the best
1202  * of our knowledge.  In many cases it is conservative, but where
1203  * detailed information is available from the receiver (via SACK
1204  * blocks etc.) we can make more aggressive calculations.
1205  *
1206  * Use this for decisions involving congestion control, use just
1207  * tp->packets_out to determine if the send queue is empty or not.
1208  *
1209  * Read this equation as:
1210  *
1211  *	"Packets sent once on transmission queue" MINUS
1212  *	"Packets left network, but not honestly ACKed yet" PLUS
1213  *	"Packets fast retransmitted"
1214  */
1215 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1216 {
1217 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1218 }
1219 
1220 #define TCP_INFINITE_SSTHRESH	0x7fffffff
1221 
1222 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1223 {
1224 	return tp->snd_cwnd;
1225 }
1226 
1227 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1228 {
1229 	WARN_ON_ONCE((int)val <= 0);
1230 	tp->snd_cwnd = val;
1231 }
1232 
1233 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1234 {
1235 	return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1236 }
1237 
1238 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1239 {
1240 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1241 }
1242 
1243 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1244 {
1245 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1246 	       (1 << inet_csk(sk)->icsk_ca_state);
1247 }
1248 
1249 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1250  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1251  * ssthresh.
1252  */
1253 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1254 {
1255 	const struct tcp_sock *tp = tcp_sk(sk);
1256 
1257 	if (tcp_in_cwnd_reduction(sk))
1258 		return tp->snd_ssthresh;
1259 	else
1260 		return max(tp->snd_ssthresh,
1261 			   ((tcp_snd_cwnd(tp) >> 1) +
1262 			    (tcp_snd_cwnd(tp) >> 2)));
1263 }
1264 
1265 /* Use define here intentionally to get WARN_ON location shown at the caller */
1266 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1267 
1268 void tcp_enter_cwr(struct sock *sk);
1269 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1270 
1271 /* The maximum number of MSS of available cwnd for which TSO defers
1272  * sending if not using sysctl_tcp_tso_win_divisor.
1273  */
1274 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1275 {
1276 	return 3;
1277 }
1278 
1279 /* Returns end sequence number of the receiver's advertised window */
1280 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1281 {
1282 	return tp->snd_una + tp->snd_wnd;
1283 }
1284 
1285 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1286  * flexible approach. The RFC suggests cwnd should not be raised unless
1287  * it was fully used previously. And that's exactly what we do in
1288  * congestion avoidance mode. But in slow start we allow cwnd to grow
1289  * as long as the application has used half the cwnd.
1290  * Example :
1291  *    cwnd is 10 (IW10), but application sends 9 frames.
1292  *    We allow cwnd to reach 18 when all frames are ACKed.
1293  * This check is safe because it's as aggressive as slow start which already
1294  * risks 100% overshoot. The advantage is that we discourage application to
1295  * either send more filler packets or data to artificially blow up the cwnd
1296  * usage, and allow application-limited process to probe bw more aggressively.
1297  */
1298 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1299 {
1300 	const struct tcp_sock *tp = tcp_sk(sk);
1301 
1302 	if (tp->is_cwnd_limited)
1303 		return true;
1304 
1305 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1306 	if (tcp_in_slow_start(tp))
1307 		return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1308 
1309 	return false;
1310 }
1311 
1312 /* BBR congestion control needs pacing.
1313  * Same remark for SO_MAX_PACING_RATE.
1314  * sch_fq packet scheduler is efficiently handling pacing,
1315  * but is not always installed/used.
1316  * Return true if TCP stack should pace packets itself.
1317  */
1318 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1319 {
1320 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1321 }
1322 
1323 /* Estimates in how many jiffies next packet for this flow can be sent.
1324  * Scheduling a retransmit timer too early would be silly.
1325  */
1326 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1327 {
1328 	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1329 
1330 	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1331 }
1332 
1333 static inline void tcp_reset_xmit_timer(struct sock *sk,
1334 					const int what,
1335 					unsigned long when,
1336 					const unsigned long max_when)
1337 {
1338 	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1339 				  max_when);
1340 }
1341 
1342 /* Something is really bad, we could not queue an additional packet,
1343  * because qdisc is full or receiver sent a 0 window, or we are paced.
1344  * We do not want to add fuel to the fire, or abort too early,
1345  * so make sure the timer we arm now is at least 200ms in the future,
1346  * regardless of current icsk_rto value (as it could be ~2ms)
1347  */
1348 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1349 {
1350 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1351 }
1352 
1353 /* Variant of inet_csk_rto_backoff() used for zero window probes */
1354 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1355 					    unsigned long max_when)
1356 {
1357 	u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1358 			   inet_csk(sk)->icsk_backoff);
1359 	u64 when = (u64)tcp_probe0_base(sk) << backoff;
1360 
1361 	return (unsigned long)min_t(u64, when, max_when);
1362 }
1363 
1364 static inline void tcp_check_probe_timer(struct sock *sk)
1365 {
1366 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1367 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1368 				     tcp_probe0_base(sk), TCP_RTO_MAX);
1369 }
1370 
1371 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1372 {
1373 	tp->snd_wl1 = seq;
1374 }
1375 
1376 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1377 {
1378 	tp->snd_wl1 = seq;
1379 }
1380 
1381 /*
1382  * Calculate(/check) TCP checksum
1383  */
1384 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1385 				   __be32 daddr, __wsum base)
1386 {
1387 	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1388 }
1389 
1390 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1391 {
1392 	return !skb_csum_unnecessary(skb) &&
1393 		__skb_checksum_complete(skb);
1394 }
1395 
1396 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1397 		     enum skb_drop_reason *reason);
1398 
1399 
1400 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1401 void tcp_set_state(struct sock *sk, int state);
1402 void tcp_done(struct sock *sk);
1403 int tcp_abort(struct sock *sk, int err);
1404 
1405 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1406 {
1407 	rx_opt->dsack = 0;
1408 	rx_opt->num_sacks = 0;
1409 }
1410 
1411 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1412 
1413 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1414 {
1415 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1416 	struct tcp_sock *tp = tcp_sk(sk);
1417 	s32 delta;
1418 
1419 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1420 	    tp->packets_out || ca_ops->cong_control)
1421 		return;
1422 	delta = tcp_jiffies32 - tp->lsndtime;
1423 	if (delta > inet_csk(sk)->icsk_rto)
1424 		tcp_cwnd_restart(sk, delta);
1425 }
1426 
1427 /* Determine a window scaling and initial window to offer. */
1428 void tcp_select_initial_window(const struct sock *sk, int __space,
1429 			       __u32 mss, __u32 *rcv_wnd,
1430 			       __u32 *window_clamp, int wscale_ok,
1431 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1432 
1433 static inline int tcp_win_from_space(const struct sock *sk, int space)
1434 {
1435 	s64 scaled_space = (s64)space * tcp_sk(sk)->scaling_ratio;
1436 
1437 	return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1438 }
1439 
1440 /* inverse of tcp_win_from_space() */
1441 static inline int tcp_space_from_win(const struct sock *sk, int win)
1442 {
1443 	u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1444 
1445 	do_div(val, tcp_sk(sk)->scaling_ratio);
1446 	return val;
1447 }
1448 
1449 static inline void tcp_scaling_ratio_init(struct sock *sk)
1450 {
1451 	/* Assume a conservative default of 1200 bytes of payload per 4K page.
1452 	 * This may be adjusted later in tcp_measure_rcv_mss().
1453 	 */
1454 	tcp_sk(sk)->scaling_ratio = (1200 << TCP_RMEM_TO_WIN_SCALE) /
1455 				    SKB_TRUESIZE(4096);
1456 }
1457 
1458 /* Note: caller must be prepared to deal with negative returns */
1459 static inline int tcp_space(const struct sock *sk)
1460 {
1461 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1462 				  READ_ONCE(sk->sk_backlog.len) -
1463 				  atomic_read(&sk->sk_rmem_alloc));
1464 }
1465 
1466 static inline int tcp_full_space(const struct sock *sk)
1467 {
1468 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1469 }
1470 
1471 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1472 {
1473 	int unused_mem = sk_unused_reserved_mem(sk);
1474 	struct tcp_sock *tp = tcp_sk(sk);
1475 
1476 	tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
1477 	if (unused_mem)
1478 		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1479 					 tcp_win_from_space(sk, unused_mem));
1480 }
1481 
1482 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1483 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1484 
1485 
1486 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1487  * If 87.5 % (7/8) of the space has been consumed, we want to override
1488  * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1489  * len/truesize ratio.
1490  */
1491 static inline bool tcp_rmem_pressure(const struct sock *sk)
1492 {
1493 	int rcvbuf, threshold;
1494 
1495 	if (tcp_under_memory_pressure(sk))
1496 		return true;
1497 
1498 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1499 	threshold = rcvbuf - (rcvbuf >> 3);
1500 
1501 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1502 }
1503 
1504 static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1505 {
1506 	const struct tcp_sock *tp = tcp_sk(sk);
1507 	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1508 
1509 	if (avail <= 0)
1510 		return false;
1511 
1512 	return (avail >= target) || tcp_rmem_pressure(sk) ||
1513 	       (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1514 }
1515 
1516 extern void tcp_openreq_init_rwin(struct request_sock *req,
1517 				  const struct sock *sk_listener,
1518 				  const struct dst_entry *dst);
1519 
1520 void tcp_enter_memory_pressure(struct sock *sk);
1521 void tcp_leave_memory_pressure(struct sock *sk);
1522 
1523 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1524 {
1525 	struct net *net = sock_net((struct sock *)tp);
1526 	int val;
1527 
1528 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1529 	 * and do_tcp_setsockopt().
1530 	 */
1531 	val = READ_ONCE(tp->keepalive_intvl);
1532 
1533 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1534 }
1535 
1536 static inline int keepalive_time_when(const struct tcp_sock *tp)
1537 {
1538 	struct net *net = sock_net((struct sock *)tp);
1539 	int val;
1540 
1541 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1542 	val = READ_ONCE(tp->keepalive_time);
1543 
1544 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1545 }
1546 
1547 static inline int keepalive_probes(const struct tcp_sock *tp)
1548 {
1549 	struct net *net = sock_net((struct sock *)tp);
1550 	int val;
1551 
1552 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1553 	 * and do_tcp_setsockopt().
1554 	 */
1555 	val = READ_ONCE(tp->keepalive_probes);
1556 
1557 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1558 }
1559 
1560 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1561 {
1562 	const struct inet_connection_sock *icsk = &tp->inet_conn;
1563 
1564 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1565 			  tcp_jiffies32 - tp->rcv_tstamp);
1566 }
1567 
1568 static inline int tcp_fin_time(const struct sock *sk)
1569 {
1570 	int fin_timeout = tcp_sk(sk)->linger2 ? :
1571 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1572 	const int rto = inet_csk(sk)->icsk_rto;
1573 
1574 	if (fin_timeout < (rto << 2) - (rto >> 1))
1575 		fin_timeout = (rto << 2) - (rto >> 1);
1576 
1577 	return fin_timeout;
1578 }
1579 
1580 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1581 				  int paws_win)
1582 {
1583 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1584 		return true;
1585 	if (unlikely(!time_before32(ktime_get_seconds(),
1586 				    rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1587 		return true;
1588 	/*
1589 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1590 	 * then following tcp messages have valid values. Ignore 0 value,
1591 	 * or else 'negative' tsval might forbid us to accept their packets.
1592 	 */
1593 	if (!rx_opt->ts_recent)
1594 		return true;
1595 	return false;
1596 }
1597 
1598 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1599 				   int rst)
1600 {
1601 	if (tcp_paws_check(rx_opt, 0))
1602 		return false;
1603 
1604 	/* RST segments are not recommended to carry timestamp,
1605 	   and, if they do, it is recommended to ignore PAWS because
1606 	   "their cleanup function should take precedence over timestamps."
1607 	   Certainly, it is mistake. It is necessary to understand the reasons
1608 	   of this constraint to relax it: if peer reboots, clock may go
1609 	   out-of-sync and half-open connections will not be reset.
1610 	   Actually, the problem would be not existing if all
1611 	   the implementations followed draft about maintaining clock
1612 	   via reboots. Linux-2.2 DOES NOT!
1613 
1614 	   However, we can relax time bounds for RST segments to MSL.
1615 	 */
1616 	if (rst && !time_before32(ktime_get_seconds(),
1617 				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1618 		return false;
1619 	return true;
1620 }
1621 
1622 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1623 			  int mib_idx, u32 *last_oow_ack_time);
1624 
1625 static inline void tcp_mib_init(struct net *net)
1626 {
1627 	/* See RFC 2012 */
1628 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1629 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1630 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1631 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1632 }
1633 
1634 /* from STCP */
1635 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1636 {
1637 	tp->lost_skb_hint = NULL;
1638 }
1639 
1640 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1641 {
1642 	tcp_clear_retrans_hints_partial(tp);
1643 	tp->retransmit_skb_hint = NULL;
1644 }
1645 
1646 union tcp_md5_addr {
1647 	struct in_addr  a4;
1648 #if IS_ENABLED(CONFIG_IPV6)
1649 	struct in6_addr	a6;
1650 #endif
1651 };
1652 
1653 /* - key database */
1654 struct tcp_md5sig_key {
1655 	struct hlist_node	node;
1656 	u8			keylen;
1657 	u8			family; /* AF_INET or AF_INET6 */
1658 	u8			prefixlen;
1659 	u8			flags;
1660 	union tcp_md5_addr	addr;
1661 	int			l3index; /* set if key added with L3 scope */
1662 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1663 	struct rcu_head		rcu;
1664 };
1665 
1666 /* - sock block */
1667 struct tcp_md5sig_info {
1668 	struct hlist_head	head;
1669 	struct rcu_head		rcu;
1670 };
1671 
1672 /* - pseudo header */
1673 struct tcp4_pseudohdr {
1674 	__be32		saddr;
1675 	__be32		daddr;
1676 	__u8		pad;
1677 	__u8		protocol;
1678 	__be16		len;
1679 };
1680 
1681 struct tcp6_pseudohdr {
1682 	struct in6_addr	saddr;
1683 	struct in6_addr daddr;
1684 	__be32		len;
1685 	__be32		protocol;	/* including padding */
1686 };
1687 
1688 union tcp_md5sum_block {
1689 	struct tcp4_pseudohdr ip4;
1690 #if IS_ENABLED(CONFIG_IPV6)
1691 	struct tcp6_pseudohdr ip6;
1692 #endif
1693 };
1694 
1695 /* - pool: digest algorithm, hash description and scratch buffer */
1696 struct tcp_md5sig_pool {
1697 	struct ahash_request	*md5_req;
1698 	void			*scratch;
1699 };
1700 
1701 /* - functions */
1702 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1703 			const struct sock *sk, const struct sk_buff *skb);
1704 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1705 		   int family, u8 prefixlen, int l3index, u8 flags,
1706 		   const u8 *newkey, u8 newkeylen);
1707 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1708 		     int family, u8 prefixlen, int l3index,
1709 		     struct tcp_md5sig_key *key);
1710 
1711 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1712 		   int family, u8 prefixlen, int l3index, u8 flags);
1713 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1714 					 const struct sock *addr_sk);
1715 
1716 #ifdef CONFIG_TCP_MD5SIG
1717 #include <linux/jump_label.h>
1718 extern struct static_key_false_deferred tcp_md5_needed;
1719 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1720 					   const union tcp_md5_addr *addr,
1721 					   int family);
1722 static inline struct tcp_md5sig_key *
1723 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1724 		  const union tcp_md5_addr *addr, int family)
1725 {
1726 	if (!static_branch_unlikely(&tcp_md5_needed.key))
1727 		return NULL;
1728 	return __tcp_md5_do_lookup(sk, l3index, addr, family);
1729 }
1730 
1731 enum skb_drop_reason
1732 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1733 		     const void *saddr, const void *daddr,
1734 		     int family, int dif, int sdif);
1735 
1736 
1737 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1738 #else
1739 static inline struct tcp_md5sig_key *
1740 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1741 		  const union tcp_md5_addr *addr, int family)
1742 {
1743 	return NULL;
1744 }
1745 
1746 static inline enum skb_drop_reason
1747 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1748 		     const void *saddr, const void *daddr,
1749 		     int family, int dif, int sdif)
1750 {
1751 	return SKB_NOT_DROPPED_YET;
1752 }
1753 #define tcp_twsk_md5_key(twsk)	NULL
1754 #endif
1755 
1756 bool tcp_alloc_md5sig_pool(void);
1757 
1758 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1759 static inline void tcp_put_md5sig_pool(void)
1760 {
1761 	local_bh_enable();
1762 }
1763 
1764 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1765 			  unsigned int header_len);
1766 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1767 		     const struct tcp_md5sig_key *key);
1768 
1769 /* From tcp_fastopen.c */
1770 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1771 			    struct tcp_fastopen_cookie *cookie);
1772 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1773 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1774 			    u16 try_exp);
1775 struct tcp_fastopen_request {
1776 	/* Fast Open cookie. Size 0 means a cookie request */
1777 	struct tcp_fastopen_cookie	cookie;
1778 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1779 	size_t				size;
1780 	int				copied;	/* queued in tcp_connect() */
1781 	struct ubuf_info		*uarg;
1782 };
1783 void tcp_free_fastopen_req(struct tcp_sock *tp);
1784 void tcp_fastopen_destroy_cipher(struct sock *sk);
1785 void tcp_fastopen_ctx_destroy(struct net *net);
1786 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1787 			      void *primary_key, void *backup_key);
1788 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1789 			    u64 *key);
1790 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1791 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1792 			      struct request_sock *req,
1793 			      struct tcp_fastopen_cookie *foc,
1794 			      const struct dst_entry *dst);
1795 void tcp_fastopen_init_key_once(struct net *net);
1796 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1797 			     struct tcp_fastopen_cookie *cookie);
1798 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1799 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1800 #define TCP_FASTOPEN_KEY_MAX 2
1801 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
1802 	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1803 
1804 /* Fastopen key context */
1805 struct tcp_fastopen_context {
1806 	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
1807 	int		num;
1808 	struct rcu_head	rcu;
1809 };
1810 
1811 void tcp_fastopen_active_disable(struct sock *sk);
1812 bool tcp_fastopen_active_should_disable(struct sock *sk);
1813 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1814 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1815 
1816 /* Caller needs to wrap with rcu_read_(un)lock() */
1817 static inline
1818 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1819 {
1820 	struct tcp_fastopen_context *ctx;
1821 
1822 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1823 	if (!ctx)
1824 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1825 	return ctx;
1826 }
1827 
1828 static inline
1829 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1830 			       const struct tcp_fastopen_cookie *orig)
1831 {
1832 	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1833 	    orig->len == foc->len &&
1834 	    !memcmp(orig->val, foc->val, foc->len))
1835 		return true;
1836 	return false;
1837 }
1838 
1839 static inline
1840 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1841 {
1842 	return ctx->num;
1843 }
1844 
1845 /* Latencies incurred by various limits for a sender. They are
1846  * chronograph-like stats that are mutually exclusive.
1847  */
1848 enum tcp_chrono {
1849 	TCP_CHRONO_UNSPEC,
1850 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1851 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1852 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1853 	__TCP_CHRONO_MAX,
1854 };
1855 
1856 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1857 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1858 
1859 /* This helper is needed, because skb->tcp_tsorted_anchor uses
1860  * the same memory storage than skb->destructor/_skb_refdst
1861  */
1862 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1863 {
1864 	skb->destructor = NULL;
1865 	skb->_skb_refdst = 0UL;
1866 }
1867 
1868 #define tcp_skb_tsorted_save(skb) {		\
1869 	unsigned long _save = skb->_skb_refdst;	\
1870 	skb->_skb_refdst = 0UL;
1871 
1872 #define tcp_skb_tsorted_restore(skb)		\
1873 	skb->_skb_refdst = _save;		\
1874 }
1875 
1876 void tcp_write_queue_purge(struct sock *sk);
1877 
1878 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1879 {
1880 	return skb_rb_first(&sk->tcp_rtx_queue);
1881 }
1882 
1883 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1884 {
1885 	return skb_rb_last(&sk->tcp_rtx_queue);
1886 }
1887 
1888 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1889 {
1890 	return skb_peek_tail(&sk->sk_write_queue);
1891 }
1892 
1893 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1894 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1895 
1896 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1897 {
1898 	return skb_peek(&sk->sk_write_queue);
1899 }
1900 
1901 static inline bool tcp_skb_is_last(const struct sock *sk,
1902 				   const struct sk_buff *skb)
1903 {
1904 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1905 }
1906 
1907 /**
1908  * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1909  * @sk: socket
1910  *
1911  * Since the write queue can have a temporary empty skb in it,
1912  * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1913  */
1914 static inline bool tcp_write_queue_empty(const struct sock *sk)
1915 {
1916 	const struct tcp_sock *tp = tcp_sk(sk);
1917 
1918 	return tp->write_seq == tp->snd_nxt;
1919 }
1920 
1921 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1922 {
1923 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1924 }
1925 
1926 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1927 {
1928 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1929 }
1930 
1931 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1932 {
1933 	__skb_queue_tail(&sk->sk_write_queue, skb);
1934 
1935 	/* Queue it, remembering where we must start sending. */
1936 	if (sk->sk_write_queue.next == skb)
1937 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1938 }
1939 
1940 /* Insert new before skb on the write queue of sk.  */
1941 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1942 						  struct sk_buff *skb,
1943 						  struct sock *sk)
1944 {
1945 	__skb_queue_before(&sk->sk_write_queue, skb, new);
1946 }
1947 
1948 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1949 {
1950 	tcp_skb_tsorted_anchor_cleanup(skb);
1951 	__skb_unlink(skb, &sk->sk_write_queue);
1952 }
1953 
1954 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1955 
1956 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1957 {
1958 	tcp_skb_tsorted_anchor_cleanup(skb);
1959 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1960 }
1961 
1962 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1963 {
1964 	list_del(&skb->tcp_tsorted_anchor);
1965 	tcp_rtx_queue_unlink(skb, sk);
1966 	tcp_wmem_free_skb(sk, skb);
1967 }
1968 
1969 static inline void tcp_push_pending_frames(struct sock *sk)
1970 {
1971 	if (tcp_send_head(sk)) {
1972 		struct tcp_sock *tp = tcp_sk(sk);
1973 
1974 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1975 	}
1976 }
1977 
1978 /* Start sequence of the skb just after the highest skb with SACKed
1979  * bit, valid only if sacked_out > 0 or when the caller has ensured
1980  * validity by itself.
1981  */
1982 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1983 {
1984 	if (!tp->sacked_out)
1985 		return tp->snd_una;
1986 
1987 	if (tp->highest_sack == NULL)
1988 		return tp->snd_nxt;
1989 
1990 	return TCP_SKB_CB(tp->highest_sack)->seq;
1991 }
1992 
1993 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1994 {
1995 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1996 }
1997 
1998 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1999 {
2000 	return tcp_sk(sk)->highest_sack;
2001 }
2002 
2003 static inline void tcp_highest_sack_reset(struct sock *sk)
2004 {
2005 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2006 }
2007 
2008 /* Called when old skb is about to be deleted and replaced by new skb */
2009 static inline void tcp_highest_sack_replace(struct sock *sk,
2010 					    struct sk_buff *old,
2011 					    struct sk_buff *new)
2012 {
2013 	if (old == tcp_highest_sack(sk))
2014 		tcp_sk(sk)->highest_sack = new;
2015 }
2016 
2017 /* This helper checks if socket has IP_TRANSPARENT set */
2018 static inline bool inet_sk_transparent(const struct sock *sk)
2019 {
2020 	switch (sk->sk_state) {
2021 	case TCP_TIME_WAIT:
2022 		return inet_twsk(sk)->tw_transparent;
2023 	case TCP_NEW_SYN_RECV:
2024 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
2025 	}
2026 	return inet_sk(sk)->transparent;
2027 }
2028 
2029 /* Determines whether this is a thin stream (which may suffer from
2030  * increased latency). Used to trigger latency-reducing mechanisms.
2031  */
2032 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2033 {
2034 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2035 }
2036 
2037 /* /proc */
2038 enum tcp_seq_states {
2039 	TCP_SEQ_STATE_LISTENING,
2040 	TCP_SEQ_STATE_ESTABLISHED,
2041 };
2042 
2043 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2044 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2045 void tcp_seq_stop(struct seq_file *seq, void *v);
2046 
2047 struct tcp_seq_afinfo {
2048 	sa_family_t			family;
2049 };
2050 
2051 struct tcp_iter_state {
2052 	struct seq_net_private	p;
2053 	enum tcp_seq_states	state;
2054 	struct sock		*syn_wait_sk;
2055 	int			bucket, offset, sbucket, num;
2056 	loff_t			last_pos;
2057 };
2058 
2059 extern struct request_sock_ops tcp_request_sock_ops;
2060 extern struct request_sock_ops tcp6_request_sock_ops;
2061 
2062 void tcp_v4_destroy_sock(struct sock *sk);
2063 
2064 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2065 				netdev_features_t features);
2066 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
2067 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2068 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2069 INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2070 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2071 void tcp_gro_complete(struct sk_buff *skb);
2072 
2073 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2074 
2075 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2076 {
2077 	struct net *net = sock_net((struct sock *)tp);
2078 	u32 val;
2079 
2080 	val = READ_ONCE(tp->notsent_lowat);
2081 
2082 	return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2083 }
2084 
2085 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2086 
2087 #ifdef CONFIG_PROC_FS
2088 int tcp4_proc_init(void);
2089 void tcp4_proc_exit(void);
2090 #endif
2091 
2092 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2093 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2094 		     const struct tcp_request_sock_ops *af_ops,
2095 		     struct sock *sk, struct sk_buff *skb);
2096 
2097 /* TCP af-specific functions */
2098 struct tcp_sock_af_ops {
2099 #ifdef CONFIG_TCP_MD5SIG
2100 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2101 						const struct sock *addr_sk);
2102 	int		(*calc_md5_hash)(char *location,
2103 					 const struct tcp_md5sig_key *md5,
2104 					 const struct sock *sk,
2105 					 const struct sk_buff *skb);
2106 	int		(*md5_parse)(struct sock *sk,
2107 				     int optname,
2108 				     sockptr_t optval,
2109 				     int optlen);
2110 #endif
2111 };
2112 
2113 struct tcp_request_sock_ops {
2114 	u16 mss_clamp;
2115 #ifdef CONFIG_TCP_MD5SIG
2116 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2117 						 const struct sock *addr_sk);
2118 	int		(*calc_md5_hash) (char *location,
2119 					  const struct tcp_md5sig_key *md5,
2120 					  const struct sock *sk,
2121 					  const struct sk_buff *skb);
2122 #endif
2123 #ifdef CONFIG_SYN_COOKIES
2124 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2125 				 __u16 *mss);
2126 #endif
2127 	struct dst_entry *(*route_req)(const struct sock *sk,
2128 				       struct sk_buff *skb,
2129 				       struct flowi *fl,
2130 				       struct request_sock *req);
2131 	u32 (*init_seq)(const struct sk_buff *skb);
2132 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2133 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2134 			   struct flowi *fl, struct request_sock *req,
2135 			   struct tcp_fastopen_cookie *foc,
2136 			   enum tcp_synack_type synack_type,
2137 			   struct sk_buff *syn_skb);
2138 };
2139 
2140 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2141 #if IS_ENABLED(CONFIG_IPV6)
2142 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2143 #endif
2144 
2145 #ifdef CONFIG_SYN_COOKIES
2146 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2147 					 const struct sock *sk, struct sk_buff *skb,
2148 					 __u16 *mss)
2149 {
2150 	tcp_synq_overflow(sk);
2151 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2152 	return ops->cookie_init_seq(skb, mss);
2153 }
2154 #else
2155 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2156 					 const struct sock *sk, struct sk_buff *skb,
2157 					 __u16 *mss)
2158 {
2159 	return 0;
2160 }
2161 #endif
2162 
2163 int tcpv4_offload_init(void);
2164 
2165 void tcp_v4_init(void);
2166 void tcp_init(void);
2167 
2168 /* tcp_recovery.c */
2169 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2170 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2171 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2172 				u32 reo_wnd);
2173 extern bool tcp_rack_mark_lost(struct sock *sk);
2174 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2175 			     u64 xmit_time);
2176 extern void tcp_rack_reo_timeout(struct sock *sk);
2177 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2178 
2179 /* tcp_plb.c */
2180 
2181 /*
2182  * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2183  * expects cong_ratio which represents fraction of traffic that experienced
2184  * congestion over a single RTT. In order to avoid floating point operations,
2185  * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2186  */
2187 #define TCP_PLB_SCALE 8
2188 
2189 /* State for PLB (Protective Load Balancing) for a single TCP connection. */
2190 struct tcp_plb_state {
2191 	u8	consec_cong_rounds:5, /* consecutive congested rounds */
2192 		unused:3;
2193 	u32	pause_until; /* jiffies32 when PLB can resume rerouting */
2194 };
2195 
2196 static inline void tcp_plb_init(const struct sock *sk,
2197 				struct tcp_plb_state *plb)
2198 {
2199 	plb->consec_cong_rounds = 0;
2200 	plb->pause_until = 0;
2201 }
2202 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2203 			  const int cong_ratio);
2204 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2205 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2206 
2207 /* At how many usecs into the future should the RTO fire? */
2208 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2209 {
2210 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2211 	u32 rto = inet_csk(sk)->icsk_rto;
2212 	u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2213 
2214 	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2215 }
2216 
2217 /*
2218  * Save and compile IPv4 options, return a pointer to it
2219  */
2220 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2221 							 struct sk_buff *skb)
2222 {
2223 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2224 	struct ip_options_rcu *dopt = NULL;
2225 
2226 	if (opt->optlen) {
2227 		int opt_size = sizeof(*dopt) + opt->optlen;
2228 
2229 		dopt = kmalloc(opt_size, GFP_ATOMIC);
2230 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2231 			kfree(dopt);
2232 			dopt = NULL;
2233 		}
2234 	}
2235 	return dopt;
2236 }
2237 
2238 /* locally generated TCP pure ACKs have skb->truesize == 2
2239  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2240  * This is much faster than dissecting the packet to find out.
2241  * (Think of GRE encapsulations, IPv4, IPv6, ...)
2242  */
2243 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2244 {
2245 	return skb->truesize == 2;
2246 }
2247 
2248 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2249 {
2250 	skb->truesize = 2;
2251 }
2252 
2253 static inline int tcp_inq(struct sock *sk)
2254 {
2255 	struct tcp_sock *tp = tcp_sk(sk);
2256 	int answ;
2257 
2258 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2259 		answ = 0;
2260 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2261 		   !tp->urg_data ||
2262 		   before(tp->urg_seq, tp->copied_seq) ||
2263 		   !before(tp->urg_seq, tp->rcv_nxt)) {
2264 
2265 		answ = tp->rcv_nxt - tp->copied_seq;
2266 
2267 		/* Subtract 1, if FIN was received */
2268 		if (answ && sock_flag(sk, SOCK_DONE))
2269 			answ--;
2270 	} else {
2271 		answ = tp->urg_seq - tp->copied_seq;
2272 	}
2273 
2274 	return answ;
2275 }
2276 
2277 int tcp_peek_len(struct socket *sock);
2278 
2279 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2280 {
2281 	u16 segs_in;
2282 
2283 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2284 
2285 	/* We update these fields while other threads might
2286 	 * read them from tcp_get_info()
2287 	 */
2288 	WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2289 	if (skb->len > tcp_hdrlen(skb))
2290 		WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2291 }
2292 
2293 /*
2294  * TCP listen path runs lockless.
2295  * We forced "struct sock" to be const qualified to make sure
2296  * we don't modify one of its field by mistake.
2297  * Here, we increment sk_drops which is an atomic_t, so we can safely
2298  * make sock writable again.
2299  */
2300 static inline void tcp_listendrop(const struct sock *sk)
2301 {
2302 	atomic_inc(&((struct sock *)sk)->sk_drops);
2303 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2304 }
2305 
2306 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2307 
2308 /*
2309  * Interface for adding Upper Level Protocols over TCP
2310  */
2311 
2312 #define TCP_ULP_NAME_MAX	16
2313 #define TCP_ULP_MAX		128
2314 #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2315 
2316 struct tcp_ulp_ops {
2317 	struct list_head	list;
2318 
2319 	/* initialize ulp */
2320 	int (*init)(struct sock *sk);
2321 	/* update ulp */
2322 	void (*update)(struct sock *sk, struct proto *p,
2323 		       void (*write_space)(struct sock *sk));
2324 	/* cleanup ulp */
2325 	void (*release)(struct sock *sk);
2326 	/* diagnostic */
2327 	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2328 	size_t (*get_info_size)(const struct sock *sk);
2329 	/* clone ulp */
2330 	void (*clone)(const struct request_sock *req, struct sock *newsk,
2331 		      const gfp_t priority);
2332 
2333 	char		name[TCP_ULP_NAME_MAX];
2334 	struct module	*owner;
2335 };
2336 int tcp_register_ulp(struct tcp_ulp_ops *type);
2337 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2338 int tcp_set_ulp(struct sock *sk, const char *name);
2339 void tcp_get_available_ulp(char *buf, size_t len);
2340 void tcp_cleanup_ulp(struct sock *sk);
2341 void tcp_update_ulp(struct sock *sk, struct proto *p,
2342 		    void (*write_space)(struct sock *sk));
2343 
2344 #define MODULE_ALIAS_TCP_ULP(name)				\
2345 	__MODULE_INFO(alias, alias_userspace, name);		\
2346 	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2347 
2348 #ifdef CONFIG_NET_SOCK_MSG
2349 struct sk_msg;
2350 struct sk_psock;
2351 
2352 #ifdef CONFIG_BPF_SYSCALL
2353 struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2354 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2355 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2356 #endif /* CONFIG_BPF_SYSCALL */
2357 
2358 #ifdef CONFIG_INET
2359 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2360 #else
2361 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2362 {
2363 }
2364 #endif
2365 
2366 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2367 			  struct sk_msg *msg, u32 bytes, int flags);
2368 #endif /* CONFIG_NET_SOCK_MSG */
2369 
2370 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2371 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2372 {
2373 }
2374 #endif
2375 
2376 #ifdef CONFIG_CGROUP_BPF
2377 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2378 				      struct sk_buff *skb,
2379 				      unsigned int end_offset)
2380 {
2381 	skops->skb = skb;
2382 	skops->skb_data_end = skb->data + end_offset;
2383 }
2384 #else
2385 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2386 				      struct sk_buff *skb,
2387 				      unsigned int end_offset)
2388 {
2389 }
2390 #endif
2391 
2392 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2393  * is < 0, then the BPF op failed (for example if the loaded BPF
2394  * program does not support the chosen operation or there is no BPF
2395  * program loaded).
2396  */
2397 #ifdef CONFIG_BPF
2398 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2399 {
2400 	struct bpf_sock_ops_kern sock_ops;
2401 	int ret;
2402 
2403 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2404 	if (sk_fullsock(sk)) {
2405 		sock_ops.is_fullsock = 1;
2406 		sock_owned_by_me(sk);
2407 	}
2408 
2409 	sock_ops.sk = sk;
2410 	sock_ops.op = op;
2411 	if (nargs > 0)
2412 		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2413 
2414 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2415 	if (ret == 0)
2416 		ret = sock_ops.reply;
2417 	else
2418 		ret = -1;
2419 	return ret;
2420 }
2421 
2422 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2423 {
2424 	u32 args[2] = {arg1, arg2};
2425 
2426 	return tcp_call_bpf(sk, op, 2, args);
2427 }
2428 
2429 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2430 				    u32 arg3)
2431 {
2432 	u32 args[3] = {arg1, arg2, arg3};
2433 
2434 	return tcp_call_bpf(sk, op, 3, args);
2435 }
2436 
2437 #else
2438 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2439 {
2440 	return -EPERM;
2441 }
2442 
2443 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2444 {
2445 	return -EPERM;
2446 }
2447 
2448 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2449 				    u32 arg3)
2450 {
2451 	return -EPERM;
2452 }
2453 
2454 #endif
2455 
2456 static inline u32 tcp_timeout_init(struct sock *sk)
2457 {
2458 	int timeout;
2459 
2460 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2461 
2462 	if (timeout <= 0)
2463 		timeout = TCP_TIMEOUT_INIT;
2464 	return min_t(int, timeout, TCP_RTO_MAX);
2465 }
2466 
2467 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2468 {
2469 	int rwnd;
2470 
2471 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2472 
2473 	if (rwnd < 0)
2474 		rwnd = 0;
2475 	return rwnd;
2476 }
2477 
2478 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2479 {
2480 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2481 }
2482 
2483 static inline void tcp_bpf_rtt(struct sock *sk)
2484 {
2485 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2486 		tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2487 }
2488 
2489 #if IS_ENABLED(CONFIG_SMC)
2490 extern struct static_key_false tcp_have_smc;
2491 #endif
2492 
2493 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2494 void clean_acked_data_enable(struct inet_connection_sock *icsk,
2495 			     void (*cad)(struct sock *sk, u32 ack_seq));
2496 void clean_acked_data_disable(struct inet_connection_sock *icsk);
2497 void clean_acked_data_flush(void);
2498 #endif
2499 
2500 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2501 static inline void tcp_add_tx_delay(struct sk_buff *skb,
2502 				    const struct tcp_sock *tp)
2503 {
2504 	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2505 		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2506 }
2507 
2508 /* Compute Earliest Departure Time for some control packets
2509  * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2510  */
2511 static inline u64 tcp_transmit_time(const struct sock *sk)
2512 {
2513 	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2514 		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2515 			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2516 
2517 		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2518 	}
2519 	return 0;
2520 }
2521 
2522 #endif	/* _TCP_H */
2523