1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * NET		Generic infrastructure for INET connection oriented protocols.
4  *
5  *		Definitions for inet_connection_sock
6  *
7  * Authors:	Many people, see the TCP sources
8  *
9  * 		From code originally in TCP
10  */
11 #ifndef _INET_CONNECTION_SOCK_H
12 #define _INET_CONNECTION_SOCK_H
13 
14 #include <linux/compiler.h>
15 #include <linux/string.h>
16 #include <linux/timer.h>
17 #include <linux/poll.h>
18 #include <linux/kernel.h>
19 #include <linux/sockptr.h>
20 
21 #include <net/inet_sock.h>
22 #include <net/request_sock.h>
23 
24 /* Cancel timers, when they are not required. */
25 #undef INET_CSK_CLEAR_TIMERS
26 
27 struct inet_bind_bucket;
28 struct inet_bind2_bucket;
29 struct tcp_congestion_ops;
30 
31 /*
32  * Pointers to address related TCP functions
33  * (i.e. things that depend on the address family)
34  */
35 struct inet_connection_sock_af_ops {
36 	int	    (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
37 	void	    (*send_check)(struct sock *sk, struct sk_buff *skb);
38 	int	    (*rebuild_header)(struct sock *sk);
39 	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
40 	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
41 	struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
42 				      struct request_sock *req,
43 				      struct dst_entry *dst,
44 				      struct request_sock *req_unhash,
45 				      bool *own_req);
46 	u16	    net_header_len;
47 	u16	    net_frag_header_len;
48 	u16	    sockaddr_len;
49 	int	    (*setsockopt)(struct sock *sk, int level, int optname,
50 				  sockptr_t optval, unsigned int optlen);
51 	int	    (*getsockopt)(struct sock *sk, int level, int optname,
52 				  char __user *optval, int __user *optlen);
53 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
54 	void	    (*mtu_reduced)(struct sock *sk);
55 };
56 
57 /** inet_connection_sock - INET connection oriented sock
58  *
59  * @icsk_accept_queue:	   FIFO of established children
60  * @icsk_bind_hash:	   Bind node
61  * @icsk_bind2_hash:	   Bind node in the bhash2 table
62  * @icsk_timeout:	   Timeout
63  * @icsk_retransmit_timer: Resend (no ack)
64  * @icsk_rto:		   Retransmit timeout
65  * @icsk_pmtu_cookie	   Last pmtu seen by socket
66  * @icsk_ca_ops		   Pluggable congestion control hook
67  * @icsk_af_ops		   Operations which are AF_INET{4,6} specific
68  * @icsk_ulp_ops	   Pluggable ULP control hook
69  * @icsk_ulp_data	   ULP private data
70  * @icsk_clean_acked	   Clean acked data hook
71  * @icsk_ca_state:	   Congestion control state
72  * @icsk_retransmits:	   Number of unrecovered [RTO] timeouts
73  * @icsk_pending:	   Scheduled timer event
74  * @icsk_backoff:	   Backoff
75  * @icsk_syn_retries:      Number of allowed SYN (or equivalent) retries
76  * @icsk_probes_out:	   unanswered 0 window probes
77  * @icsk_ext_hdr_len:	   Network protocol overhead (IP/IPv6 options)
78  * @icsk_ack:		   Delayed ACK control data
79  * @icsk_mtup;		   MTU probing control data
80  * @icsk_probes_tstamp:    Probe timestamp (cleared by non-zero window ack)
81  * @icsk_user_timeout:	   TCP_USER_TIMEOUT value
82  */
83 struct inet_connection_sock {
84 	/* inet_sock has to be the first member! */
85 	struct inet_sock	  icsk_inet;
86 	struct request_sock_queue icsk_accept_queue;
87 	struct inet_bind_bucket	  *icsk_bind_hash;
88 	struct inet_bind2_bucket  *icsk_bind2_hash;
89 	unsigned long		  icsk_timeout;
90  	struct timer_list	  icsk_retransmit_timer;
91  	struct timer_list	  icsk_delack_timer;
92 	__u32			  icsk_rto;
93 	__u32                     icsk_rto_min;
94 	__u32                     icsk_delack_max;
95 	__u32			  icsk_pmtu_cookie;
96 	const struct tcp_congestion_ops *icsk_ca_ops;
97 	const struct inet_connection_sock_af_ops *icsk_af_ops;
98 	const struct tcp_ulp_ops  *icsk_ulp_ops;
99 	void __rcu		  *icsk_ulp_data;
100 	void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
101 	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
102 	__u8			  icsk_ca_state:5,
103 				  icsk_ca_initialized:1,
104 				  icsk_ca_setsockopt:1,
105 				  icsk_ca_dst_locked:1;
106 	__u8			  icsk_retransmits;
107 	__u8			  icsk_pending;
108 	__u8			  icsk_backoff;
109 	__u8			  icsk_syn_retries;
110 	__u8			  icsk_probes_out;
111 	__u16			  icsk_ext_hdr_len;
112 	struct {
113 		__u8		  pending;	 /* ACK is pending			   */
114 		__u8		  quick;	 /* Scheduled number of quick acks	   */
115 		__u8		  pingpong;	 /* The session is interactive		   */
116 		__u8		  retry;	 /* Number of attempts			   */
117 		__u32		  ato;		 /* Predicted tick of soft clock	   */
118 		unsigned long	  timeout;	 /* Currently scheduled timeout		   */
119 		__u32		  lrcvtime;	 /* timestamp of last received data packet */
120 		__u16		  last_seg_size; /* Size of last incoming segment	   */
121 		__u16		  rcv_mss;	 /* MSS used for delayed ACK decisions	   */
122 	} icsk_ack;
123 	struct {
124 		/* Range of MTUs to search */
125 		int		  search_high;
126 		int		  search_low;
127 
128 		/* Information on the current probe. */
129 		u32		  probe_size:31,
130 		/* Is the MTUP feature enabled for this connection? */
131 				  enabled:1;
132 
133 		u32		  probe_timestamp;
134 	} icsk_mtup;
135 	u32			  icsk_probes_tstamp;
136 	u32			  icsk_user_timeout;
137 
138 	u64			  icsk_ca_priv[104 / sizeof(u64)];
139 #define ICSK_CA_PRIV_SIZE	  sizeof_field(struct inet_connection_sock, icsk_ca_priv)
140 };
141 
142 #define ICSK_TIME_RETRANS	1	/* Retransmit timer */
143 #define ICSK_TIME_DACK		2	/* Delayed ack timer */
144 #define ICSK_TIME_PROBE0	3	/* Zero window probe timer */
145 #define ICSK_TIME_LOSS_PROBE	5	/* Tail loss probe timer */
146 #define ICSK_TIME_REO_TIMEOUT	6	/* Reordering timer */
147 
inet_csk(const struct sock * sk)148 static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
149 {
150 	return (struct inet_connection_sock *)sk;
151 }
152 
inet_csk_ca(const struct sock * sk)153 static inline void *inet_csk_ca(const struct sock *sk)
154 {
155 	return (void *)inet_csk(sk)->icsk_ca_priv;
156 }
157 
158 struct sock *inet_csk_clone_lock(const struct sock *sk,
159 				 const struct request_sock *req,
160 				 const gfp_t priority);
161 
162 enum inet_csk_ack_state_t {
163 	ICSK_ACK_SCHED	= 1,
164 	ICSK_ACK_TIMER  = 2,
165 	ICSK_ACK_PUSHED = 4,
166 	ICSK_ACK_PUSHED2 = 8,
167 	ICSK_ACK_NOW = 16,	/* Send the next ACK immediately (once) */
168 	ICSK_ACK_NOMEM = 32,
169 };
170 
171 void inet_csk_init_xmit_timers(struct sock *sk,
172 			       void (*retransmit_handler)(struct timer_list *),
173 			       void (*delack_handler)(struct timer_list *),
174 			       void (*keepalive_handler)(struct timer_list *));
175 void inet_csk_clear_xmit_timers(struct sock *sk);
176 void inet_csk_clear_xmit_timers_sync(struct sock *sk);
177 
inet_csk_schedule_ack(struct sock * sk)178 static inline void inet_csk_schedule_ack(struct sock *sk)
179 {
180 	inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
181 }
182 
inet_csk_ack_scheduled(const struct sock * sk)183 static inline int inet_csk_ack_scheduled(const struct sock *sk)
184 {
185 	return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
186 }
187 
inet_csk_delack_init(struct sock * sk)188 static inline void inet_csk_delack_init(struct sock *sk)
189 {
190 	memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
191 }
192 
193 void inet_csk_delete_keepalive_timer(struct sock *sk);
194 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
195 
inet_csk_clear_xmit_timer(struct sock * sk,const int what)196 static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
197 {
198 	struct inet_connection_sock *icsk = inet_csk(sk);
199 
200 	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
201 		icsk->icsk_pending = 0;
202 #ifdef INET_CSK_CLEAR_TIMERS
203 		sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
204 #endif
205 	} else if (what == ICSK_TIME_DACK) {
206 		icsk->icsk_ack.pending = 0;
207 		icsk->icsk_ack.retry = 0;
208 #ifdef INET_CSK_CLEAR_TIMERS
209 		sk_stop_timer(sk, &icsk->icsk_delack_timer);
210 #endif
211 	} else {
212 		pr_debug("inet_csk BUG: unknown timer value\n");
213 	}
214 }
215 
216 /*
217  *	Reset the retransmission timer
218  */
inet_csk_reset_xmit_timer(struct sock * sk,const int what,unsigned long when,const unsigned long max_when)219 static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
220 					     unsigned long when,
221 					     const unsigned long max_when)
222 {
223 	struct inet_connection_sock *icsk = inet_csk(sk);
224 
225 	if (when > max_when) {
226 		pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
227 			 sk, what, when, (void *)_THIS_IP_);
228 		when = max_when;
229 	}
230 
231 	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
232 	    what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
233 		icsk->icsk_pending = what;
234 		icsk->icsk_timeout = jiffies + when;
235 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
236 	} else if (what == ICSK_TIME_DACK) {
237 		icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
238 		icsk->icsk_ack.timeout = jiffies + when;
239 		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
240 	} else {
241 		pr_debug("inet_csk BUG: unknown timer value\n");
242 	}
243 }
244 
245 static inline unsigned long
inet_csk_rto_backoff(const struct inet_connection_sock * icsk,unsigned long max_when)246 inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
247 		     unsigned long max_when)
248 {
249         u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
250 
251         return (unsigned long)min_t(u64, when, max_when);
252 }
253 
254 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
255 
256 int inet_csk_get_port(struct sock *sk, unsigned short snum);
257 
258 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
259 				     const struct request_sock *req);
260 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
261 					    struct sock *newsk,
262 					    const struct request_sock *req);
263 
264 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
265 				      struct request_sock *req,
266 				      struct sock *child);
267 bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
268 				   unsigned long timeout);
269 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
270 					 struct request_sock *req,
271 					 bool own_req);
272 
inet_csk_reqsk_queue_added(struct sock * sk)273 static inline void inet_csk_reqsk_queue_added(struct sock *sk)
274 {
275 	reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
276 }
277 
inet_csk_reqsk_queue_len(const struct sock * sk)278 static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
279 {
280 	return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
281 }
282 
inet_csk_reqsk_queue_is_full(const struct sock * sk)283 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
284 {
285 	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
286 }
287 
288 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
289 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
290 
291 static inline unsigned long
reqsk_timeout(struct request_sock * req,unsigned long max_timeout)292 reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
293 {
294 	u64 timeout = (u64)req->timeout << req->num_timeout;
295 
296 	return (unsigned long)min_t(u64, timeout, max_timeout);
297 }
298 
inet_csk_prepare_for_destroy_sock(struct sock * sk)299 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
300 {
301 	/* The below has to be done to allow calling inet_csk_destroy_sock */
302 	sock_set_flag(sk, SOCK_DEAD);
303 	this_cpu_inc(*sk->sk_prot->orphan_count);
304 }
305 
306 void inet_csk_destroy_sock(struct sock *sk);
307 void inet_csk_prepare_forced_close(struct sock *sk);
308 
309 /*
310  * LISTEN is a special case for poll..
311  */
inet_csk_listen_poll(const struct sock * sk)312 static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
313 {
314 	return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
315 			(EPOLLIN | EPOLLRDNORM) : 0;
316 }
317 
318 int inet_csk_listen_start(struct sock *sk);
319 void inet_csk_listen_stop(struct sock *sk);
320 
321 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
322 
323 /* update the fast reuse flag when adding a socket */
324 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
325 			       struct sock *sk);
326 
327 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
328 
329 #define TCP_PINGPONG_THRESH	1
330 
inet_csk_enter_pingpong_mode(struct sock * sk)331 static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
332 {
333 	inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
334 }
335 
inet_csk_exit_pingpong_mode(struct sock * sk)336 static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
337 {
338 	inet_csk(sk)->icsk_ack.pingpong = 0;
339 }
340 
inet_csk_in_pingpong_mode(struct sock * sk)341 static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
342 {
343 	return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
344 }
345 
inet_csk_has_ulp(const struct sock * sk)346 static inline bool inet_csk_has_ulp(const struct sock *sk)
347 {
348 	return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
349 }
350 
inet_init_csk_locks(struct sock * sk)351 static inline void inet_init_csk_locks(struct sock *sk)
352 {
353 	struct inet_connection_sock *icsk = inet_csk(sk);
354 
355 	spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
356 	spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
357 }
358 
359 #endif /* _INET_CONNECTION_SOCK_H */
360