xref: /openbmc/linux/include/net/tcp.h (revision e8e0929d)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the TCP module.
7  *
8  * Version:	@(#)tcp.h	1.0.5	05/23/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *
13  *		This program is free software; you can redistribute it and/or
14  *		modify it under the terms of the GNU General Public License
15  *		as published by the Free Software Foundation; either version
16  *		2 of the License, or (at your option) any later version.
17  */
18 #ifndef _TCP_H
19 #define _TCP_H
20 
21 #define TCP_DEBUG 1
22 #define FASTRETRANS_DEBUG 1
23 
24 #include <linux/list.h>
25 #include <linux/tcp.h>
26 #include <linux/slab.h>
27 #include <linux/cache.h>
28 #include <linux/percpu.h>
29 #include <linux/skbuff.h>
30 #include <linux/dmaengine.h>
31 #include <linux/crypto.h>
32 #include <linux/cryptohash.h>
33 
34 #include <net/inet_connection_sock.h>
35 #include <net/inet_timewait_sock.h>
36 #include <net/inet_hashtables.h>
37 #include <net/checksum.h>
38 #include <net/request_sock.h>
39 #include <net/sock.h>
40 #include <net/snmp.h>
41 #include <net/ip.h>
42 #include <net/tcp_states.h>
43 #include <net/inet_ecn.h>
44 #include <net/dst.h>
45 
46 #include <linux/seq_file.h>
47 
48 extern struct inet_hashinfo tcp_hashinfo;
49 
50 extern struct percpu_counter tcp_orphan_count;
51 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
52 
53 #define MAX_TCP_HEADER	(128 + MAX_HEADER)
54 #define MAX_TCP_OPTION_SPACE 40
55 
56 /*
57  * Never offer a window over 32767 without using window scaling. Some
58  * poor stacks do signed 16bit maths!
59  */
60 #define MAX_TCP_WINDOW		32767U
61 
62 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
63 #define TCP_MIN_MSS		88U
64 
65 /* Minimal RCV_MSS. */
66 #define TCP_MIN_RCVMSS		536U
67 
68 /* The least MTU to use for probing */
69 #define TCP_BASE_MSS		512
70 
71 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
72 #define TCP_FASTRETRANS_THRESH 3
73 
74 /* Maximal reordering. */
75 #define TCP_MAX_REORDERING	127
76 
77 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
78 #define TCP_MAX_QUICKACKS	16U
79 
80 /* urg_data states */
81 #define TCP_URG_VALID	0x0100
82 #define TCP_URG_NOTYET	0x0200
83 #define TCP_URG_READ	0x0400
84 
85 #define TCP_RETR1	3	/*
86 				 * This is how many retries it does before it
87 				 * tries to figure out if the gateway is
88 				 * down. Minimal RFC value is 3; it corresponds
89 				 * to ~3sec-8min depending on RTO.
90 				 */
91 
92 #define TCP_RETR2	15	/*
93 				 * This should take at least
94 				 * 90 minutes to time out.
95 				 * RFC1122 says that the limit is 100 sec.
96 				 * 15 is ~13-30min depending on RTO.
97 				 */
98 
99 #define TCP_SYN_RETRIES	 5	/* number of times to retry active opening a
100 				 * connection: ~180sec is RFC minimum	*/
101 
102 #define TCP_SYNACK_RETRIES 5	/* number of times to retry passive opening a
103 				 * connection: ~180sec is RFC minimum	*/
104 
105 
106 #define TCP_ORPHAN_RETRIES 7	/* number of times to retry on an orphaned
107 				 * socket. 7 is ~50sec-16min.
108 				 */
109 
110 
111 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
112 				  * state, about 60 seconds	*/
113 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
114                                  /* BSD style FIN_WAIT2 deadlock breaker.
115 				  * It used to be 3min, new value is 60sec,
116 				  * to combine FIN-WAIT-2 timeout with
117 				  * TIME-WAIT timer.
118 				  */
119 
120 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
121 #if HZ >= 100
122 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
123 #define TCP_ATO_MIN	((unsigned)(HZ/25))
124 #else
125 #define TCP_DELACK_MIN	4U
126 #define TCP_ATO_MIN	4U
127 #endif
128 #define TCP_RTO_MAX	((unsigned)(120*HZ))
129 #define TCP_RTO_MIN	((unsigned)(HZ/5))
130 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value	*/
131 
132 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
133 					                 * for local resources.
134 					                 */
135 
136 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
137 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
138 #define TCP_KEEPALIVE_INTVL	(75*HZ)
139 
140 #define MAX_TCP_KEEPIDLE	32767
141 #define MAX_TCP_KEEPINTVL	32767
142 #define MAX_TCP_KEEPCNT		127
143 #define MAX_TCP_SYNCNT		127
144 
145 #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
146 
147 #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
148 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
149 					 * after this time. It should be equal
150 					 * (or greater than) TCP_TIMEWAIT_LEN
151 					 * to provide reliability equal to one
152 					 * provided by timewait state.
153 					 */
154 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
155 					 * timestamps. It must be less than
156 					 * minimal timewait lifetime.
157 					 */
158 /*
159  *	TCP option
160  */
161 
162 #define TCPOPT_NOP		1	/* Padding */
163 #define TCPOPT_EOL		0	/* End of options */
164 #define TCPOPT_MSS		2	/* Segment size negotiating */
165 #define TCPOPT_WINDOW		3	/* Window scaling */
166 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
167 #define TCPOPT_SACK             5       /* SACK Block */
168 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
169 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
170 
171 /*
172  *     TCP option lengths
173  */
174 
175 #define TCPOLEN_MSS            4
176 #define TCPOLEN_WINDOW         3
177 #define TCPOLEN_SACK_PERM      2
178 #define TCPOLEN_TIMESTAMP      10
179 #define TCPOLEN_MD5SIG         18
180 
181 /* But this is what stacks really send out. */
182 #define TCPOLEN_TSTAMP_ALIGNED		12
183 #define TCPOLEN_WSCALE_ALIGNED		4
184 #define TCPOLEN_SACKPERM_ALIGNED	4
185 #define TCPOLEN_SACK_BASE		2
186 #define TCPOLEN_SACK_BASE_ALIGNED	4
187 #define TCPOLEN_SACK_PERBLOCK		8
188 #define TCPOLEN_MD5SIG_ALIGNED		20
189 #define TCPOLEN_MSS_ALIGNED		4
190 
191 /* Flags in tp->nonagle */
192 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
193 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
194 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
195 
196 extern struct inet_timewait_death_row tcp_death_row;
197 
198 /* sysctl variables for tcp */
199 extern int sysctl_tcp_timestamps;
200 extern int sysctl_tcp_window_scaling;
201 extern int sysctl_tcp_sack;
202 extern int sysctl_tcp_fin_timeout;
203 extern int sysctl_tcp_keepalive_time;
204 extern int sysctl_tcp_keepalive_probes;
205 extern int sysctl_tcp_keepalive_intvl;
206 extern int sysctl_tcp_syn_retries;
207 extern int sysctl_tcp_synack_retries;
208 extern int sysctl_tcp_retries1;
209 extern int sysctl_tcp_retries2;
210 extern int sysctl_tcp_orphan_retries;
211 extern int sysctl_tcp_syncookies;
212 extern int sysctl_tcp_retrans_collapse;
213 extern int sysctl_tcp_stdurg;
214 extern int sysctl_tcp_rfc1337;
215 extern int sysctl_tcp_abort_on_overflow;
216 extern int sysctl_tcp_max_orphans;
217 extern int sysctl_tcp_fack;
218 extern int sysctl_tcp_reordering;
219 extern int sysctl_tcp_ecn;
220 extern int sysctl_tcp_dsack;
221 extern int sysctl_tcp_mem[3];
222 extern int sysctl_tcp_wmem[3];
223 extern int sysctl_tcp_rmem[3];
224 extern int sysctl_tcp_app_win;
225 extern int sysctl_tcp_adv_win_scale;
226 extern int sysctl_tcp_tw_reuse;
227 extern int sysctl_tcp_frto;
228 extern int sysctl_tcp_frto_response;
229 extern int sysctl_tcp_low_latency;
230 extern int sysctl_tcp_dma_copybreak;
231 extern int sysctl_tcp_nometrics_save;
232 extern int sysctl_tcp_moderate_rcvbuf;
233 extern int sysctl_tcp_tso_win_divisor;
234 extern int sysctl_tcp_abc;
235 extern int sysctl_tcp_mtu_probing;
236 extern int sysctl_tcp_base_mss;
237 extern int sysctl_tcp_workaround_signed_windows;
238 extern int sysctl_tcp_slow_start_after_idle;
239 extern int sysctl_tcp_max_ssthresh;
240 
241 extern atomic_t tcp_memory_allocated;
242 extern struct percpu_counter tcp_sockets_allocated;
243 extern int tcp_memory_pressure;
244 
245 /*
246  * The next routines deal with comparing 32 bit unsigned ints
247  * and worry about wraparound (automatic with unsigned arithmetic).
248  */
249 
250 static inline int before(__u32 seq1, __u32 seq2)
251 {
252         return (__s32)(seq1-seq2) < 0;
253 }
254 #define after(seq2, seq1) 	before(seq1, seq2)
255 
256 /* is s2<=s1<=s3 ? */
257 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
258 {
259 	return seq3 - seq2 >= seq1 - seq2;
260 }
261 
262 static inline int tcp_too_many_orphans(struct sock *sk, int num)
263 {
264 	return (num > sysctl_tcp_max_orphans) ||
265 		(sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
266 		 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
267 }
268 
269 /* syncookies: remember time of last synqueue overflow */
270 static inline void tcp_synq_overflow(struct sock *sk)
271 {
272 	tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
273 }
274 
275 /* syncookies: no recent synqueue overflow on this listening socket? */
276 static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
277 {
278 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
279 	return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
280 }
281 
282 extern struct proto tcp_prot;
283 
284 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
285 #define TCP_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
286 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
287 #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
288 
289 extern void			tcp_v4_err(struct sk_buff *skb, u32);
290 
291 extern void			tcp_shutdown (struct sock *sk, int how);
292 
293 extern int			tcp_v4_rcv(struct sk_buff *skb);
294 
295 extern int			tcp_v4_remember_stamp(struct sock *sk);
296 
297 extern int		    	tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
298 
299 extern int			tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
300 					    struct msghdr *msg, size_t size);
301 extern ssize_t			tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
302 
303 extern int			tcp_ioctl(struct sock *sk,
304 					  int cmd,
305 					  unsigned long arg);
306 
307 extern int			tcp_rcv_state_process(struct sock *sk,
308 						      struct sk_buff *skb,
309 						      struct tcphdr *th,
310 						      unsigned len);
311 
312 extern int			tcp_rcv_established(struct sock *sk,
313 						    struct sk_buff *skb,
314 						    struct tcphdr *th,
315 						    unsigned len);
316 
317 extern void			tcp_rcv_space_adjust(struct sock *sk);
318 
319 extern void			tcp_cleanup_rbuf(struct sock *sk, int copied);
320 
321 extern int			tcp_twsk_unique(struct sock *sk,
322 						struct sock *sktw, void *twp);
323 
324 extern void			tcp_twsk_destructor(struct sock *sk);
325 
326 extern ssize_t			tcp_splice_read(struct socket *sk, loff_t *ppos,
327 					        struct pipe_inode_info *pipe, size_t len, unsigned int flags);
328 
329 static inline void tcp_dec_quickack_mode(struct sock *sk,
330 					 const unsigned int pkts)
331 {
332 	struct inet_connection_sock *icsk = inet_csk(sk);
333 
334 	if (icsk->icsk_ack.quick) {
335 		if (pkts >= icsk->icsk_ack.quick) {
336 			icsk->icsk_ack.quick = 0;
337 			/* Leaving quickack mode we deflate ATO. */
338 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
339 		} else
340 			icsk->icsk_ack.quick -= pkts;
341 	}
342 }
343 
344 extern void tcp_enter_quickack_mode(struct sock *sk);
345 
346 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
347 {
348  	rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
349 }
350 
351 #define	TCP_ECN_OK		1
352 #define	TCP_ECN_QUEUE_CWR	2
353 #define	TCP_ECN_DEMAND_CWR	4
354 
355 static __inline__ void
356 TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
357 {
358 	if (sysctl_tcp_ecn && th->ece && th->cwr)
359 		inet_rsk(req)->ecn_ok = 1;
360 }
361 
362 enum tcp_tw_status
363 {
364 	TCP_TW_SUCCESS = 0,
365 	TCP_TW_RST = 1,
366 	TCP_TW_ACK = 2,
367 	TCP_TW_SYN = 3
368 };
369 
370 
371 extern enum tcp_tw_status	tcp_timewait_state_process(struct inet_timewait_sock *tw,
372 							   struct sk_buff *skb,
373 							   const struct tcphdr *th);
374 
375 extern struct sock *		tcp_check_req(struct sock *sk,struct sk_buff *skb,
376 					      struct request_sock *req,
377 					      struct request_sock **prev);
378 extern int			tcp_child_process(struct sock *parent,
379 						  struct sock *child,
380 						  struct sk_buff *skb);
381 extern int			tcp_use_frto(struct sock *sk);
382 extern void			tcp_enter_frto(struct sock *sk);
383 extern void			tcp_enter_loss(struct sock *sk, int how);
384 extern void			tcp_clear_retrans(struct tcp_sock *tp);
385 extern void			tcp_update_metrics(struct sock *sk);
386 
387 extern void			tcp_close(struct sock *sk,
388 					  long timeout);
389 extern unsigned int		tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
390 
391 extern int			tcp_getsockopt(struct sock *sk, int level,
392 					       int optname,
393 					       char __user *optval,
394 					       int __user *optlen);
395 extern int			tcp_setsockopt(struct sock *sk, int level,
396 					       int optname, char __user *optval,
397 					       unsigned int optlen);
398 extern int			compat_tcp_getsockopt(struct sock *sk,
399 					int level, int optname,
400 					char __user *optval, int __user *optlen);
401 extern int			compat_tcp_setsockopt(struct sock *sk,
402 					int level, int optname,
403 					char __user *optval, unsigned int optlen);
404 extern void			tcp_set_keepalive(struct sock *sk, int val);
405 extern int			tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
406 					    struct msghdr *msg,
407 					    size_t len, int nonblock,
408 					    int flags, int *addr_len);
409 
410 extern void			tcp_parse_options(struct sk_buff *skb,
411 						  struct tcp_options_received *opt_rx,
412 						  int estab);
413 
414 extern u8			*tcp_parse_md5sig_option(struct tcphdr *th);
415 
416 /*
417  *	TCP v4 functions exported for the inet6 API
418  */
419 
420 extern void		       	tcp_v4_send_check(struct sock *sk, int len,
421 						  struct sk_buff *skb);
422 
423 extern int			tcp_v4_conn_request(struct sock *sk,
424 						    struct sk_buff *skb);
425 
426 extern struct sock *		tcp_create_openreq_child(struct sock *sk,
427 							 struct request_sock *req,
428 							 struct sk_buff *skb);
429 
430 extern struct sock *		tcp_v4_syn_recv_sock(struct sock *sk,
431 						     struct sk_buff *skb,
432 						     struct request_sock *req,
433 							struct dst_entry *dst);
434 
435 extern int			tcp_v4_do_rcv(struct sock *sk,
436 					      struct sk_buff *skb);
437 
438 extern int			tcp_v4_connect(struct sock *sk,
439 					       struct sockaddr *uaddr,
440 					       int addr_len);
441 
442 extern int			tcp_connect(struct sock *sk);
443 
444 extern struct sk_buff *		tcp_make_synack(struct sock *sk,
445 						struct dst_entry *dst,
446 						struct request_sock *req);
447 
448 extern int			tcp_disconnect(struct sock *sk, int flags);
449 
450 
451 /* From syncookies.c */
452 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
453 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
454 				    struct ip_options *opt);
455 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
456 				     __u16 *mss);
457 
458 extern __u32 cookie_init_timestamp(struct request_sock *req);
459 extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
460 
461 /* From net/ipv6/syncookies.c */
462 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
463 extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
464 				     __u16 *mss);
465 
466 /* tcp_output.c */
467 
468 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
469 				      int nonagle);
470 extern int tcp_may_send_now(struct sock *sk);
471 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
472 extern void tcp_retransmit_timer(struct sock *sk);
473 extern void tcp_xmit_retransmit_queue(struct sock *);
474 extern void tcp_simple_retransmit(struct sock *);
475 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
476 extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
477 
478 extern void tcp_send_probe0(struct sock *);
479 extern void tcp_send_partial(struct sock *);
480 extern int  tcp_write_wakeup(struct sock *);
481 extern void tcp_send_fin(struct sock *sk);
482 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
483 extern int  tcp_send_synack(struct sock *);
484 extern void tcp_push_one(struct sock *, unsigned int mss_now);
485 extern void tcp_send_ack(struct sock *sk);
486 extern void tcp_send_delayed_ack(struct sock *sk);
487 
488 /* tcp_input.c */
489 extern void tcp_cwnd_application_limited(struct sock *sk);
490 
491 /* tcp_timer.c */
492 extern void tcp_init_xmit_timers(struct sock *);
493 static inline void tcp_clear_xmit_timers(struct sock *sk)
494 {
495 	inet_csk_clear_xmit_timers(sk);
496 }
497 
498 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
499 extern unsigned int tcp_current_mss(struct sock *sk);
500 
501 /* Bound MSS / TSO packet size with the half of the window */
502 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
503 {
504 	if (tp->max_window && pktsize > (tp->max_window >> 1))
505 		return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
506 	else
507 		return pktsize;
508 }
509 
510 /* tcp.c */
511 extern void tcp_get_info(struct sock *, struct tcp_info *);
512 
513 /* Read 'sendfile()'-style from a TCP socket */
514 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
515 				unsigned int, size_t);
516 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
517 			 sk_read_actor_t recv_actor);
518 
519 extern void tcp_initialize_rcv_mss(struct sock *sk);
520 
521 extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
522 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
523 extern void tcp_mtup_init(struct sock *sk);
524 
525 static inline void tcp_bound_rto(const struct sock *sk)
526 {
527 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
528 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
529 }
530 
531 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
532 {
533 	return (tp->srtt >> 3) + tp->rttvar;
534 }
535 
536 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
537 {
538 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
539 			       ntohl(TCP_FLAG_ACK) |
540 			       snd_wnd);
541 }
542 
543 static inline void tcp_fast_path_on(struct tcp_sock *tp)
544 {
545 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
546 }
547 
548 static inline void tcp_fast_path_check(struct sock *sk)
549 {
550 	struct tcp_sock *tp = tcp_sk(sk);
551 
552 	if (skb_queue_empty(&tp->out_of_order_queue) &&
553 	    tp->rcv_wnd &&
554 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
555 	    !tp->urg_data)
556 		tcp_fast_path_on(tp);
557 }
558 
559 /* Compute the actual rto_min value */
560 static inline u32 tcp_rto_min(struct sock *sk)
561 {
562 	struct dst_entry *dst = __sk_dst_get(sk);
563 	u32 rto_min = TCP_RTO_MIN;
564 
565 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
566 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
567 	return rto_min;
568 }
569 
570 /* Compute the actual receive window we are currently advertising.
571  * Rcv_nxt can be after the window if our peer push more data
572  * than the offered window.
573  */
574 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
575 {
576 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
577 
578 	if (win < 0)
579 		win = 0;
580 	return (u32) win;
581 }
582 
583 /* Choose a new window, without checks for shrinking, and without
584  * scaling applied to the result.  The caller does these things
585  * if necessary.  This is a "raw" window selection.
586  */
587 extern u32	__tcp_select_window(struct sock *sk);
588 
589 /* TCP timestamps are only 32-bits, this causes a slight
590  * complication on 64-bit systems since we store a snapshot
591  * of jiffies in the buffer control blocks below.  We decided
592  * to use only the low 32-bits of jiffies and hide the ugly
593  * casts with the following macro.
594  */
595 #define tcp_time_stamp		((__u32)(jiffies))
596 
597 /* This is what the send packet queuing engine uses to pass
598  * TCP per-packet control information to the transmission
599  * code.  We also store the host-order sequence numbers in
600  * here too.  This is 36 bytes on 32-bit architectures,
601  * 40 bytes on 64-bit machines, if this grows please adjust
602  * skbuff.h:skbuff->cb[xxx] size appropriately.
603  */
604 struct tcp_skb_cb {
605 	union {
606 		struct inet_skb_parm	h4;
607 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
608 		struct inet6_skb_parm	h6;
609 #endif
610 	} header;	/* For incoming frames		*/
611 	__u32		seq;		/* Starting sequence number	*/
612 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
613 	__u32		when;		/* used to compute rtt's	*/
614 	__u8		flags;		/* TCP header flags.		*/
615 
616 	/* NOTE: These must match up to the flags byte in a
617 	 *       real TCP header.
618 	 */
619 #define TCPCB_FLAG_FIN		0x01
620 #define TCPCB_FLAG_SYN		0x02
621 #define TCPCB_FLAG_RST		0x04
622 #define TCPCB_FLAG_PSH		0x08
623 #define TCPCB_FLAG_ACK		0x10
624 #define TCPCB_FLAG_URG		0x20
625 #define TCPCB_FLAG_ECE		0x40
626 #define TCPCB_FLAG_CWR		0x80
627 
628 	__u8		sacked;		/* State flags for SACK/FACK.	*/
629 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
630 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
631 #define TCPCB_LOST		0x04	/* SKB is lost			*/
632 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
633 
634 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
635 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
636 
637 	__u32		ack_seq;	/* Sequence number ACK'd	*/
638 };
639 
640 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
641 
642 /* Due to TSO, an SKB can be composed of multiple actual
643  * packets.  To keep these tracked properly, we use this.
644  */
645 static inline int tcp_skb_pcount(const struct sk_buff *skb)
646 {
647 	return skb_shinfo(skb)->gso_segs;
648 }
649 
650 /* This is valid iff tcp_skb_pcount() > 1. */
651 static inline int tcp_skb_mss(const struct sk_buff *skb)
652 {
653 	return skb_shinfo(skb)->gso_size;
654 }
655 
656 /* Events passed to congestion control interface */
657 enum tcp_ca_event {
658 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
659 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
660 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
661 	CA_EVENT_FRTO,		/* fast recovery timeout */
662 	CA_EVENT_LOSS,		/* loss timeout */
663 	CA_EVENT_FAST_ACK,	/* in sequence ack */
664 	CA_EVENT_SLOW_ACK,	/* other ack */
665 };
666 
667 /*
668  * Interface for adding new TCP congestion control handlers
669  */
670 #define TCP_CA_NAME_MAX	16
671 #define TCP_CA_MAX	128
672 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
673 
674 #define TCP_CONG_NON_RESTRICTED 0x1
675 #define TCP_CONG_RTT_STAMP	0x2
676 
677 struct tcp_congestion_ops {
678 	struct list_head	list;
679 	unsigned long flags;
680 
681 	/* initialize private data (optional) */
682 	void (*init)(struct sock *sk);
683 	/* cleanup private data  (optional) */
684 	void (*release)(struct sock *sk);
685 
686 	/* return slow start threshold (required) */
687 	u32 (*ssthresh)(struct sock *sk);
688 	/* lower bound for congestion window (optional) */
689 	u32 (*min_cwnd)(const struct sock *sk);
690 	/* do new cwnd calculation (required) */
691 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
692 	/* call before changing ca_state (optional) */
693 	void (*set_state)(struct sock *sk, u8 new_state);
694 	/* call when cwnd event occurs (optional) */
695 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
696 	/* new value of cwnd after loss (optional) */
697 	u32  (*undo_cwnd)(struct sock *sk);
698 	/* hook for packet ack accounting (optional) */
699 	void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
700 	/* get info for inet_diag (optional) */
701 	void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
702 
703 	char 		name[TCP_CA_NAME_MAX];
704 	struct module 	*owner;
705 };
706 
707 extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
708 extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
709 
710 extern void tcp_init_congestion_control(struct sock *sk);
711 extern void tcp_cleanup_congestion_control(struct sock *sk);
712 extern int tcp_set_default_congestion_control(const char *name);
713 extern void tcp_get_default_congestion_control(char *name);
714 extern void tcp_get_available_congestion_control(char *buf, size_t len);
715 extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
716 extern int tcp_set_allowed_congestion_control(char *allowed);
717 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
718 extern void tcp_slow_start(struct tcp_sock *tp);
719 extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
720 
721 extern struct tcp_congestion_ops tcp_init_congestion_ops;
722 extern u32 tcp_reno_ssthresh(struct sock *sk);
723 extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
724 extern u32 tcp_reno_min_cwnd(const struct sock *sk);
725 extern struct tcp_congestion_ops tcp_reno;
726 
727 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
728 {
729 	struct inet_connection_sock *icsk = inet_csk(sk);
730 
731 	if (icsk->icsk_ca_ops->set_state)
732 		icsk->icsk_ca_ops->set_state(sk, ca_state);
733 	icsk->icsk_ca_state = ca_state;
734 }
735 
736 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
737 {
738 	const struct inet_connection_sock *icsk = inet_csk(sk);
739 
740 	if (icsk->icsk_ca_ops->cwnd_event)
741 		icsk->icsk_ca_ops->cwnd_event(sk, event);
742 }
743 
744 /* These functions determine how the current flow behaves in respect of SACK
745  * handling. SACK is negotiated with the peer, and therefore it can vary
746  * between different flows.
747  *
748  * tcp_is_sack - SACK enabled
749  * tcp_is_reno - No SACK
750  * tcp_is_fack - FACK enabled, implies SACK enabled
751  */
752 static inline int tcp_is_sack(const struct tcp_sock *tp)
753 {
754 	return tp->rx_opt.sack_ok;
755 }
756 
757 static inline int tcp_is_reno(const struct tcp_sock *tp)
758 {
759 	return !tcp_is_sack(tp);
760 }
761 
762 static inline int tcp_is_fack(const struct tcp_sock *tp)
763 {
764 	return tp->rx_opt.sack_ok & 2;
765 }
766 
767 static inline void tcp_enable_fack(struct tcp_sock *tp)
768 {
769 	tp->rx_opt.sack_ok |= 2;
770 }
771 
772 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
773 {
774 	return tp->sacked_out + tp->lost_out;
775 }
776 
777 /* This determines how many packets are "in the network" to the best
778  * of our knowledge.  In many cases it is conservative, but where
779  * detailed information is available from the receiver (via SACK
780  * blocks etc.) we can make more aggressive calculations.
781  *
782  * Use this for decisions involving congestion control, use just
783  * tp->packets_out to determine if the send queue is empty or not.
784  *
785  * Read this equation as:
786  *
787  *	"Packets sent once on transmission queue" MINUS
788  *	"Packets left network, but not honestly ACKed yet" PLUS
789  *	"Packets fast retransmitted"
790  */
791 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
792 {
793 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
794 }
795 
796 #define TCP_INFINITE_SSTHRESH	0x7fffffff
797 
798 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
799 {
800 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
801 }
802 
803 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
804  * The exception is rate halving phase, when cwnd is decreasing towards
805  * ssthresh.
806  */
807 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
808 {
809 	const struct tcp_sock *tp = tcp_sk(sk);
810 	if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
811 		return tp->snd_ssthresh;
812 	else
813 		return max(tp->snd_ssthresh,
814 			   ((tp->snd_cwnd >> 1) +
815 			    (tp->snd_cwnd >> 2)));
816 }
817 
818 /* Use define here intentionally to get WARN_ON location shown at the caller */
819 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
820 
821 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
822 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
823 
824 /* Slow start with delack produces 3 packets of burst, so that
825  * it is safe "de facto".  This will be the default - same as
826  * the default reordering threshold - but if reordering increases,
827  * we must be able to allow cwnd to burst at least this much in order
828  * to not pull it back when holes are filled.
829  */
830 static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
831 {
832 	return tp->reordering;
833 }
834 
835 /* Returns end sequence number of the receiver's advertised window */
836 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
837 {
838 	return tp->snd_una + tp->snd_wnd;
839 }
840 extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
841 
842 static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
843 				       const struct sk_buff *skb)
844 {
845 	if (skb->len < mss)
846 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
847 }
848 
849 static inline void tcp_check_probe_timer(struct sock *sk)
850 {
851 	struct tcp_sock *tp = tcp_sk(sk);
852 	const struct inet_connection_sock *icsk = inet_csk(sk);
853 
854 	if (!tp->packets_out && !icsk->icsk_pending)
855 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
856 					  icsk->icsk_rto, TCP_RTO_MAX);
857 }
858 
859 static inline void tcp_push_pending_frames(struct sock *sk)
860 {
861 	struct tcp_sock *tp = tcp_sk(sk);
862 
863 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
864 }
865 
866 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
867 {
868 	tp->snd_wl1 = seq;
869 }
870 
871 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
872 {
873 	tp->snd_wl1 = seq;
874 }
875 
876 /*
877  * Calculate(/check) TCP checksum
878  */
879 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
880 				   __be32 daddr, __wsum base)
881 {
882 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
883 }
884 
885 static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
886 {
887 	return __skb_checksum_complete(skb);
888 }
889 
890 static inline int tcp_checksum_complete(struct sk_buff *skb)
891 {
892 	return !skb_csum_unnecessary(skb) &&
893 		__tcp_checksum_complete(skb);
894 }
895 
896 /* Prequeue for VJ style copy to user, combined with checksumming. */
897 
898 static inline void tcp_prequeue_init(struct tcp_sock *tp)
899 {
900 	tp->ucopy.task = NULL;
901 	tp->ucopy.len = 0;
902 	tp->ucopy.memory = 0;
903 	skb_queue_head_init(&tp->ucopy.prequeue);
904 #ifdef CONFIG_NET_DMA
905 	tp->ucopy.dma_chan = NULL;
906 	tp->ucopy.wakeup = 0;
907 	tp->ucopy.pinned_list = NULL;
908 	tp->ucopy.dma_cookie = 0;
909 #endif
910 }
911 
912 /* Packet is added to VJ-style prequeue for processing in process
913  * context, if a reader task is waiting. Apparently, this exciting
914  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
915  * failed somewhere. Latency? Burstiness? Well, at least now we will
916  * see, why it failed. 8)8)				  --ANK
917  *
918  * NOTE: is this not too big to inline?
919  */
920 static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
921 {
922 	struct tcp_sock *tp = tcp_sk(sk);
923 
924 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
925 		return 0;
926 
927 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
928 	tp->ucopy.memory += skb->truesize;
929 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
930 		struct sk_buff *skb1;
931 
932 		BUG_ON(sock_owned_by_user(sk));
933 
934 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
935 			sk_backlog_rcv(sk, skb1);
936 			NET_INC_STATS_BH(sock_net(sk),
937 					 LINUX_MIB_TCPPREQUEUEDROPPED);
938 		}
939 
940 		tp->ucopy.memory = 0;
941 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
942 		wake_up_interruptible_poll(sk->sk_sleep,
943 					   POLLIN | POLLRDNORM | POLLRDBAND);
944 		if (!inet_csk_ack_scheduled(sk))
945 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
946 						  (3 * tcp_rto_min(sk)) / 4,
947 						  TCP_RTO_MAX);
948 	}
949 	return 1;
950 }
951 
952 
953 #undef STATE_TRACE
954 
955 #ifdef STATE_TRACE
956 static const char *statename[]={
957 	"Unused","Established","Syn Sent","Syn Recv",
958 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
959 	"Close Wait","Last ACK","Listen","Closing"
960 };
961 #endif
962 extern void tcp_set_state(struct sock *sk, int state);
963 
964 extern void tcp_done(struct sock *sk);
965 
966 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
967 {
968 	rx_opt->dsack = 0;
969 	rx_opt->num_sacks = 0;
970 }
971 
972 /* Determine a window scaling and initial window to offer. */
973 extern void tcp_select_initial_window(int __space, __u32 mss,
974 				      __u32 *rcv_wnd, __u32 *window_clamp,
975 				      int wscale_ok, __u8 *rcv_wscale);
976 
977 static inline int tcp_win_from_space(int space)
978 {
979 	return sysctl_tcp_adv_win_scale<=0 ?
980 		(space>>(-sysctl_tcp_adv_win_scale)) :
981 		space - (space>>sysctl_tcp_adv_win_scale);
982 }
983 
984 /* Note: caller must be prepared to deal with negative returns */
985 static inline int tcp_space(const struct sock *sk)
986 {
987 	return tcp_win_from_space(sk->sk_rcvbuf -
988 				  atomic_read(&sk->sk_rmem_alloc));
989 }
990 
991 static inline int tcp_full_space(const struct sock *sk)
992 {
993 	return tcp_win_from_space(sk->sk_rcvbuf);
994 }
995 
996 static inline void tcp_openreq_init(struct request_sock *req,
997 				    struct tcp_options_received *rx_opt,
998 				    struct sk_buff *skb)
999 {
1000 	struct inet_request_sock *ireq = inet_rsk(req);
1001 
1002 	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
1003 	req->cookie_ts = 0;
1004 	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1005 	req->mss = rx_opt->mss_clamp;
1006 	req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1007 	ireq->tstamp_ok = rx_opt->tstamp_ok;
1008 	ireq->sack_ok = rx_opt->sack_ok;
1009 	ireq->snd_wscale = rx_opt->snd_wscale;
1010 	ireq->wscale_ok = rx_opt->wscale_ok;
1011 	ireq->acked = 0;
1012 	ireq->ecn_ok = 0;
1013 	ireq->rmt_port = tcp_hdr(skb)->source;
1014 	ireq->loc_port = tcp_hdr(skb)->dest;
1015 }
1016 
1017 extern void tcp_enter_memory_pressure(struct sock *sk);
1018 
1019 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1020 {
1021 	return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1022 }
1023 
1024 static inline int keepalive_time_when(const struct tcp_sock *tp)
1025 {
1026 	return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1027 }
1028 
1029 static inline int keepalive_probes(const struct tcp_sock *tp)
1030 {
1031 	return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1032 }
1033 
1034 static inline int tcp_fin_time(const struct sock *sk)
1035 {
1036 	int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1037 	const int rto = inet_csk(sk)->icsk_rto;
1038 
1039 	if (fin_timeout < (rto << 2) - (rto >> 1))
1040 		fin_timeout = (rto << 2) - (rto >> 1);
1041 
1042 	return fin_timeout;
1043 }
1044 
1045 static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1046 				 int paws_win)
1047 {
1048 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1049 		return 1;
1050 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1051 		return 1;
1052 
1053 	return 0;
1054 }
1055 
1056 static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1057 				  int rst)
1058 {
1059 	if (tcp_paws_check(rx_opt, 0))
1060 		return 0;
1061 
1062 	/* RST segments are not recommended to carry timestamp,
1063 	   and, if they do, it is recommended to ignore PAWS because
1064 	   "their cleanup function should take precedence over timestamps."
1065 	   Certainly, it is mistake. It is necessary to understand the reasons
1066 	   of this constraint to relax it: if peer reboots, clock may go
1067 	   out-of-sync and half-open connections will not be reset.
1068 	   Actually, the problem would be not existing if all
1069 	   the implementations followed draft about maintaining clock
1070 	   via reboots. Linux-2.2 DOES NOT!
1071 
1072 	   However, we can relax time bounds for RST segments to MSL.
1073 	 */
1074 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1075 		return 0;
1076 	return 1;
1077 }
1078 
1079 #define TCP_CHECK_TIMER(sk) do { } while (0)
1080 
1081 static inline void tcp_mib_init(struct net *net)
1082 {
1083 	/* See RFC 2012 */
1084 	TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1085 	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1086 	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1087 	TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1088 }
1089 
1090 /* from STCP */
1091 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1092 {
1093 	tp->lost_skb_hint = NULL;
1094 	tp->scoreboard_skb_hint = NULL;
1095 }
1096 
1097 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1098 {
1099 	tcp_clear_retrans_hints_partial(tp);
1100 	tp->retransmit_skb_hint = NULL;
1101 }
1102 
1103 /* MD5 Signature */
1104 struct crypto_hash;
1105 
1106 /* - key database */
1107 struct tcp_md5sig_key {
1108 	u8			*key;
1109 	u8			keylen;
1110 };
1111 
1112 struct tcp4_md5sig_key {
1113 	struct tcp_md5sig_key	base;
1114 	__be32			addr;
1115 };
1116 
1117 struct tcp6_md5sig_key {
1118 	struct tcp_md5sig_key	base;
1119 #if 0
1120 	u32			scope_id;	/* XXX */
1121 #endif
1122 	struct in6_addr		addr;
1123 };
1124 
1125 /* - sock block */
1126 struct tcp_md5sig_info {
1127 	struct tcp4_md5sig_key	*keys4;
1128 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1129 	struct tcp6_md5sig_key	*keys6;
1130 	u32			entries6;
1131 	u32			alloced6;
1132 #endif
1133 	u32			entries4;
1134 	u32			alloced4;
1135 };
1136 
1137 /* - pseudo header */
1138 struct tcp4_pseudohdr {
1139 	__be32		saddr;
1140 	__be32		daddr;
1141 	__u8		pad;
1142 	__u8		protocol;
1143 	__be16		len;
1144 };
1145 
1146 struct tcp6_pseudohdr {
1147 	struct in6_addr	saddr;
1148 	struct in6_addr daddr;
1149 	__be32		len;
1150 	__be32		protocol;	/* including padding */
1151 };
1152 
1153 union tcp_md5sum_block {
1154 	struct tcp4_pseudohdr ip4;
1155 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1156 	struct tcp6_pseudohdr ip6;
1157 #endif
1158 };
1159 
1160 /* - pool: digest algorithm, hash description and scratch buffer */
1161 struct tcp_md5sig_pool {
1162 	struct hash_desc	md5_desc;
1163 	union tcp_md5sum_block	md5_blk;
1164 };
1165 
1166 #define TCP_MD5SIG_MAXKEYS	(~(u32)0)	/* really?! */
1167 
1168 /* - functions */
1169 extern int			tcp_v4_md5_hash_skb(char *md5_hash,
1170 						    struct tcp_md5sig_key *key,
1171 						    struct sock *sk,
1172 						    struct request_sock *req,
1173 						    struct sk_buff *skb);
1174 
1175 extern struct tcp_md5sig_key	*tcp_v4_md5_lookup(struct sock *sk,
1176 						   struct sock *addr_sk);
1177 
1178 extern int			tcp_v4_md5_do_add(struct sock *sk,
1179 						  __be32 addr,
1180 						  u8 *newkey,
1181 						  u8 newkeylen);
1182 
1183 extern int			tcp_v4_md5_do_del(struct sock *sk,
1184 						  __be32 addr);
1185 
1186 #ifdef CONFIG_TCP_MD5SIG
1187 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_keylen ? 		 \
1188 				 &(struct tcp_md5sig_key) {		 \
1189 					.key = (twsk)->tw_md5_key,	 \
1190 					.keylen = (twsk)->tw_md5_keylen, \
1191 				} : NULL)
1192 #else
1193 #define tcp_twsk_md5_key(twsk)	NULL
1194 #endif
1195 
1196 extern struct tcp_md5sig_pool	**tcp_alloc_md5sig_pool(struct sock *);
1197 extern void			tcp_free_md5sig_pool(void);
1198 
1199 extern struct tcp_md5sig_pool	*__tcp_get_md5sig_pool(int cpu);
1200 extern void			__tcp_put_md5sig_pool(void);
1201 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1202 extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1203 				 unsigned header_len);
1204 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1205 			    struct tcp_md5sig_key *key);
1206 
1207 static inline
1208 struct tcp_md5sig_pool		*tcp_get_md5sig_pool(void)
1209 {
1210 	int cpu = get_cpu();
1211 	struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
1212 	if (!ret)
1213 		put_cpu();
1214 	return ret;
1215 }
1216 
1217 static inline void		tcp_put_md5sig_pool(void)
1218 {
1219 	__tcp_put_md5sig_pool();
1220 	put_cpu();
1221 }
1222 
1223 /* write queue abstraction */
1224 static inline void tcp_write_queue_purge(struct sock *sk)
1225 {
1226 	struct sk_buff *skb;
1227 
1228 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1229 		sk_wmem_free_skb(sk, skb);
1230 	sk_mem_reclaim(sk);
1231 }
1232 
1233 static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1234 {
1235 	return skb_peek(&sk->sk_write_queue);
1236 }
1237 
1238 static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1239 {
1240 	return skb_peek_tail(&sk->sk_write_queue);
1241 }
1242 
1243 static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1244 {
1245 	return skb_queue_next(&sk->sk_write_queue, skb);
1246 }
1247 
1248 static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1249 {
1250 	return skb_queue_prev(&sk->sk_write_queue, skb);
1251 }
1252 
1253 #define tcp_for_write_queue(skb, sk)					\
1254 	skb_queue_walk(&(sk)->sk_write_queue, skb)
1255 
1256 #define tcp_for_write_queue_from(skb, sk)				\
1257 	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1258 
1259 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1260 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1261 
1262 /* This function calculates a "timeout" which is equivalent to the timeout of a
1263  * TCP connection after "boundary" unsucessful, exponentially backed-off
1264  * retransmissions with an initial RTO of TCP_RTO_MIN.
1265  */
1266 static inline bool retransmits_timed_out(const struct sock *sk,
1267 					 unsigned int boundary)
1268 {
1269 	unsigned int timeout, linear_backoff_thresh;
1270 
1271 	if (!inet_csk(sk)->icsk_retransmits)
1272 		return false;
1273 
1274 	linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
1275 
1276 	if (boundary <= linear_backoff_thresh)
1277 		timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
1278 	else
1279 		timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
1280 			  (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
1281 
1282 	return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout;
1283 }
1284 
1285 static inline struct sk_buff *tcp_send_head(struct sock *sk)
1286 {
1287 	return sk->sk_send_head;
1288 }
1289 
1290 static inline bool tcp_skb_is_last(const struct sock *sk,
1291 				   const struct sk_buff *skb)
1292 {
1293 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1294 }
1295 
1296 static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1297 {
1298 	if (tcp_skb_is_last(sk, skb))
1299 		sk->sk_send_head = NULL;
1300 	else
1301 		sk->sk_send_head = tcp_write_queue_next(sk, skb);
1302 }
1303 
1304 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1305 {
1306 	if (sk->sk_send_head == skb_unlinked)
1307 		sk->sk_send_head = NULL;
1308 }
1309 
1310 static inline void tcp_init_send_head(struct sock *sk)
1311 {
1312 	sk->sk_send_head = NULL;
1313 }
1314 
1315 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1316 {
1317 	__skb_queue_tail(&sk->sk_write_queue, skb);
1318 }
1319 
1320 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1321 {
1322 	__tcp_add_write_queue_tail(sk, skb);
1323 
1324 	/* Queue it, remembering where we must start sending. */
1325 	if (sk->sk_send_head == NULL) {
1326 		sk->sk_send_head = skb;
1327 
1328 		if (tcp_sk(sk)->highest_sack == NULL)
1329 			tcp_sk(sk)->highest_sack = skb;
1330 	}
1331 }
1332 
1333 static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1334 {
1335 	__skb_queue_head(&sk->sk_write_queue, skb);
1336 }
1337 
1338 /* Insert buff after skb on the write queue of sk.  */
1339 static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1340 						struct sk_buff *buff,
1341 						struct sock *sk)
1342 {
1343 	__skb_queue_after(&sk->sk_write_queue, skb, buff);
1344 }
1345 
1346 /* Insert new before skb on the write queue of sk.  */
1347 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1348 						  struct sk_buff *skb,
1349 						  struct sock *sk)
1350 {
1351 	__skb_queue_before(&sk->sk_write_queue, skb, new);
1352 
1353 	if (sk->sk_send_head == skb)
1354 		sk->sk_send_head = new;
1355 }
1356 
1357 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1358 {
1359 	__skb_unlink(skb, &sk->sk_write_queue);
1360 }
1361 
1362 static inline int tcp_write_queue_empty(struct sock *sk)
1363 {
1364 	return skb_queue_empty(&sk->sk_write_queue);
1365 }
1366 
1367 /* Start sequence of the highest skb with SACKed bit, valid only if
1368  * sacked > 0 or when the caller has ensured validity by itself.
1369  */
1370 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1371 {
1372 	if (!tp->sacked_out)
1373 		return tp->snd_una;
1374 
1375 	if (tp->highest_sack == NULL)
1376 		return tp->snd_nxt;
1377 
1378 	return TCP_SKB_CB(tp->highest_sack)->seq;
1379 }
1380 
1381 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1382 {
1383 	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1384 						tcp_write_queue_next(sk, skb);
1385 }
1386 
1387 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1388 {
1389 	return tcp_sk(sk)->highest_sack;
1390 }
1391 
1392 static inline void tcp_highest_sack_reset(struct sock *sk)
1393 {
1394 	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1395 }
1396 
1397 /* Called when old skb is about to be deleted (to be combined with new skb) */
1398 static inline void tcp_highest_sack_combine(struct sock *sk,
1399 					    struct sk_buff *old,
1400 					    struct sk_buff *new)
1401 {
1402 	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1403 		tcp_sk(sk)->highest_sack = new;
1404 }
1405 
1406 /* /proc */
1407 enum tcp_seq_states {
1408 	TCP_SEQ_STATE_LISTENING,
1409 	TCP_SEQ_STATE_OPENREQ,
1410 	TCP_SEQ_STATE_ESTABLISHED,
1411 	TCP_SEQ_STATE_TIME_WAIT,
1412 };
1413 
1414 struct tcp_seq_afinfo {
1415 	char			*name;
1416 	sa_family_t		family;
1417 	struct file_operations	seq_fops;
1418 	struct seq_operations	seq_ops;
1419 };
1420 
1421 struct tcp_iter_state {
1422 	struct seq_net_private	p;
1423 	sa_family_t		family;
1424 	enum tcp_seq_states	state;
1425 	struct sock		*syn_wait_sk;
1426 	int			bucket, sbucket, num, uid;
1427 };
1428 
1429 extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1430 extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1431 
1432 extern struct request_sock_ops tcp_request_sock_ops;
1433 extern struct request_sock_ops tcp6_request_sock_ops;
1434 
1435 extern void tcp_v4_destroy_sock(struct sock *sk);
1436 
1437 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1438 extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
1439 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1440 					struct sk_buff *skb);
1441 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1442 					 struct sk_buff *skb);
1443 extern int tcp_gro_complete(struct sk_buff *skb);
1444 extern int tcp4_gro_complete(struct sk_buff *skb);
1445 
1446 #ifdef CONFIG_PROC_FS
1447 extern int  tcp4_proc_init(void);
1448 extern void tcp4_proc_exit(void);
1449 #endif
1450 
1451 /* TCP af-specific functions */
1452 struct tcp_sock_af_ops {
1453 #ifdef CONFIG_TCP_MD5SIG
1454 	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
1455 						struct sock *addr_sk);
1456 	int			(*calc_md5_hash) (char *location,
1457 						  struct tcp_md5sig_key *md5,
1458 						  struct sock *sk,
1459 						  struct request_sock *req,
1460 						  struct sk_buff *skb);
1461 	int			(*md5_add) (struct sock *sk,
1462 					    struct sock *addr_sk,
1463 					    u8 *newkey,
1464 					    u8 len);
1465 	int			(*md5_parse) (struct sock *sk,
1466 					      char __user *optval,
1467 					      int optlen);
1468 #endif
1469 };
1470 
1471 struct tcp_request_sock_ops {
1472 #ifdef CONFIG_TCP_MD5SIG
1473 	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
1474 						struct request_sock *req);
1475 	int			(*calc_md5_hash) (char *location,
1476 						  struct tcp_md5sig_key *md5,
1477 						  struct sock *sk,
1478 						  struct request_sock *req,
1479 						  struct sk_buff *skb);
1480 #endif
1481 };
1482 
1483 extern void tcp_v4_init(void);
1484 extern void tcp_init(void);
1485 
1486 #endif	/* _TCP_H */
1487