1 /* (C) 1999-2001 Paul `Rusty' Russell
2  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
3  * (C) 2002-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4  * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
14 #include <linux/in.h>
15 #include <linux/tcp.h>
16 #include <linux/spinlock.h>
17 #include <linux/skbuff.h>
18 #include <linux/ipv6.h>
19 #include <net/ip6_checksum.h>
20 #include <asm/unaligned.h>
21 
22 #include <net/tcp.h>
23 
24 #include <linux/netfilter.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <net/netfilter/nf_conntrack.h>
28 #include <net/netfilter/nf_conntrack_l4proto.h>
29 #include <net/netfilter/nf_conntrack_ecache.h>
30 #include <net/netfilter/nf_conntrack_seqadj.h>
31 #include <net/netfilter/nf_conntrack_synproxy.h>
32 #include <net/netfilter/nf_log.h>
33 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
34 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
35 
36 /* "Be conservative in what you do,
37     be liberal in what you accept from others."
38     If it's non-zero, we mark only out of window RST segments as INVALID. */
39 static int nf_ct_tcp_be_liberal __read_mostly = 0;
40 
41 /* If it is set to zero, we disable picking up already established
42    connections. */
43 static int nf_ct_tcp_loose __read_mostly = 1;
44 
45 /* Max number of the retransmitted packets without receiving an (acceptable)
46    ACK from the destination. If this number is reached, a shorter timer
47    will be started. */
48 static int nf_ct_tcp_max_retrans __read_mostly = 3;
49 
50   /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
51      closely.  They're more complex. --RR */
52 
53 static const char *const tcp_conntrack_names[] = {
54 	"NONE",
55 	"SYN_SENT",
56 	"SYN_RECV",
57 	"ESTABLISHED",
58 	"FIN_WAIT",
59 	"CLOSE_WAIT",
60 	"LAST_ACK",
61 	"TIME_WAIT",
62 	"CLOSE",
63 	"SYN_SENT2",
64 };
65 
66 #define SECS * HZ
67 #define MINS * 60 SECS
68 #define HOURS * 60 MINS
69 #define DAYS * 24 HOURS
70 
71 static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
72 	[TCP_CONNTRACK_SYN_SENT]	= 2 MINS,
73 	[TCP_CONNTRACK_SYN_RECV]	= 60 SECS,
74 	[TCP_CONNTRACK_ESTABLISHED]	= 5 DAYS,
75 	[TCP_CONNTRACK_FIN_WAIT]	= 2 MINS,
76 	[TCP_CONNTRACK_CLOSE_WAIT]	= 60 SECS,
77 	[TCP_CONNTRACK_LAST_ACK]	= 30 SECS,
78 	[TCP_CONNTRACK_TIME_WAIT]	= 2 MINS,
79 	[TCP_CONNTRACK_CLOSE]		= 10 SECS,
80 	[TCP_CONNTRACK_SYN_SENT2]	= 2 MINS,
81 /* RFC1122 says the R2 limit should be at least 100 seconds.
82    Linux uses 15 packets as limit, which corresponds
83    to ~13-30min depending on RTO. */
84 	[TCP_CONNTRACK_RETRANS]		= 5 MINS,
85 	[TCP_CONNTRACK_UNACK]		= 5 MINS,
86 };
87 
88 #define sNO TCP_CONNTRACK_NONE
89 #define sSS TCP_CONNTRACK_SYN_SENT
90 #define sSR TCP_CONNTRACK_SYN_RECV
91 #define sES TCP_CONNTRACK_ESTABLISHED
92 #define sFW TCP_CONNTRACK_FIN_WAIT
93 #define sCW TCP_CONNTRACK_CLOSE_WAIT
94 #define sLA TCP_CONNTRACK_LAST_ACK
95 #define sTW TCP_CONNTRACK_TIME_WAIT
96 #define sCL TCP_CONNTRACK_CLOSE
97 #define sS2 TCP_CONNTRACK_SYN_SENT2
98 #define sIV TCP_CONNTRACK_MAX
99 #define sIG TCP_CONNTRACK_IGNORE
100 
101 /* What TCP flags are set from RST/SYN/FIN/ACK. */
102 enum tcp_bit_set {
103 	TCP_SYN_SET,
104 	TCP_SYNACK_SET,
105 	TCP_FIN_SET,
106 	TCP_ACK_SET,
107 	TCP_RST_SET,
108 	TCP_NONE_SET,
109 };
110 
111 /*
112  * The TCP state transition table needs a few words...
113  *
114  * We are the man in the middle. All the packets go through us
115  * but might get lost in transit to the destination.
116  * It is assumed that the destinations can't receive segments
117  * we haven't seen.
118  *
119  * The checked segment is in window, but our windows are *not*
120  * equivalent with the ones of the sender/receiver. We always
121  * try to guess the state of the current sender.
122  *
123  * The meaning of the states are:
124  *
125  * NONE:	initial state
126  * SYN_SENT:	SYN-only packet seen
127  * SYN_SENT2:	SYN-only packet seen from reply dir, simultaneous open
128  * SYN_RECV:	SYN-ACK packet seen
129  * ESTABLISHED:	ACK packet seen
130  * FIN_WAIT:	FIN packet seen
131  * CLOSE_WAIT:	ACK seen (after FIN)
132  * LAST_ACK:	FIN seen (after FIN)
133  * TIME_WAIT:	last ACK seen
134  * CLOSE:	closed connection (RST)
135  *
136  * Packets marked as IGNORED (sIG):
137  *	if they may be either invalid or valid
138  *	and the receiver may send back a connection
139  *	closing RST or a SYN/ACK.
140  *
141  * Packets marked as INVALID (sIV):
142  *	if we regard them as truly invalid packets
143  */
144 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
145 	{
146 /* ORIGINAL */
147 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
148 /*syn*/	   { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
149 /*
150  *	sNO -> sSS	Initialize a new connection
151  *	sSS -> sSS	Retransmitted SYN
152  *	sS2 -> sS2	Late retransmitted SYN
153  *	sSR -> sIG
154  *	sES -> sIG	Error: SYNs in window outside the SYN_SENT state
155  *			are errors. Receiver will reply with RST
156  *			and close the connection.
157  *			Or we are not in sync and hold a dead connection.
158  *	sFW -> sIG
159  *	sCW -> sIG
160  *	sLA -> sIG
161  *	sTW -> sSS	Reopened connection (RFC 1122).
162  *	sCL -> sSS
163  */
164 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
165 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
166 /*
167  *	sNO -> sIV	Too late and no reason to do anything
168  *	sSS -> sIV	Client can't send SYN and then SYN/ACK
169  *	sS2 -> sSR	SYN/ACK sent to SYN2 in simultaneous open
170  *	sSR -> sSR	Late retransmitted SYN/ACK in simultaneous open
171  *	sES -> sIV	Invalid SYN/ACK packets sent by the client
172  *	sFW -> sIV
173  *	sCW -> sIV
174  *	sLA -> sIV
175  *	sTW -> sIV
176  *	sCL -> sIV
177  */
178 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
179 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
180 /*
181  *	sNO -> sIV	Too late and no reason to do anything...
182  *	sSS -> sIV	Client migth not send FIN in this state:
183  *			we enforce waiting for a SYN/ACK reply first.
184  *	sS2 -> sIV
185  *	sSR -> sFW	Close started.
186  *	sES -> sFW
187  *	sFW -> sLA	FIN seen in both directions, waiting for
188  *			the last ACK.
189  *			Migth be a retransmitted FIN as well...
190  *	sCW -> sLA
191  *	sLA -> sLA	Retransmitted FIN. Remain in the same state.
192  *	sTW -> sTW
193  *	sCL -> sCL
194  */
195 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
196 /*ack*/	   { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
197 /*
198  *	sNO -> sES	Assumed.
199  *	sSS -> sIV	ACK is invalid: we haven't seen a SYN/ACK yet.
200  *	sS2 -> sIV
201  *	sSR -> sES	Established state is reached.
202  *	sES -> sES	:-)
203  *	sFW -> sCW	Normal close request answered by ACK.
204  *	sCW -> sCW
205  *	sLA -> sTW	Last ACK detected (RFC5961 challenged)
206  *	sTW -> sTW	Retransmitted last ACK. Remain in the same state.
207  *	sCL -> sCL
208  */
209 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
210 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
211 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
212 	},
213 	{
214 /* REPLY */
215 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
216 /*syn*/	   { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
217 /*
218  *	sNO -> sIV	Never reached.
219  *	sSS -> sS2	Simultaneous open
220  *	sS2 -> sS2	Retransmitted simultaneous SYN
221  *	sSR -> sIV	Invalid SYN packets sent by the server
222  *	sES -> sIV
223  *	sFW -> sIV
224  *	sCW -> sIV
225  *	sLA -> sIV
226  *	sTW -> sSS	Reopened connection, but server may have switched role
227  *	sCL -> sIV
228  */
229 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
230 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
231 /*
232  *	sSS -> sSR	Standard open.
233  *	sS2 -> sSR	Simultaneous open
234  *	sSR -> sIG	Retransmitted SYN/ACK, ignore it.
235  *	sES -> sIG	Late retransmitted SYN/ACK?
236  *	sFW -> sIG	Might be SYN/ACK answering ignored SYN
237  *	sCW -> sIG
238  *	sLA -> sIG
239  *	sTW -> sIG
240  *	sCL -> sIG
241  */
242 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
243 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
244 /*
245  *	sSS -> sIV	Server might not send FIN in this state.
246  *	sS2 -> sIV
247  *	sSR -> sFW	Close started.
248  *	sES -> sFW
249  *	sFW -> sLA	FIN seen in both directions.
250  *	sCW -> sLA
251  *	sLA -> sLA	Retransmitted FIN.
252  *	sTW -> sTW
253  *	sCL -> sCL
254  */
255 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
256 /*ack*/	   { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
257 /*
258  *	sSS -> sIG	Might be a half-open connection.
259  *	sS2 -> sIG
260  *	sSR -> sSR	Might answer late resent SYN.
261  *	sES -> sES	:-)
262  *	sFW -> sCW	Normal close request answered by ACK.
263  *	sCW -> sCW
264  *	sLA -> sTW	Last ACK detected (RFC5961 challenged)
265  *	sTW -> sTW	Retransmitted last ACK.
266  *	sCL -> sCL
267  */
268 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
269 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
270 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
271 	}
272 };
273 
274 static inline struct nf_tcp_net *tcp_pernet(struct net *net)
275 {
276 	return &net->ct.nf_ct_proto.tcp;
277 }
278 
279 static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
280 			     struct net *net, struct nf_conntrack_tuple *tuple)
281 {
282 	const struct tcphdr *hp;
283 	struct tcphdr _hdr;
284 
285 	/* Actually only need first 4 bytes to get ports. */
286 	hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
287 	if (hp == NULL)
288 		return false;
289 
290 	tuple->src.u.tcp.port = hp->source;
291 	tuple->dst.u.tcp.port = hp->dest;
292 
293 	return true;
294 }
295 
296 static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
297 			     const struct nf_conntrack_tuple *orig)
298 {
299 	tuple->src.u.tcp.port = orig->dst.u.tcp.port;
300 	tuple->dst.u.tcp.port = orig->src.u.tcp.port;
301 	return true;
302 }
303 
304 #ifdef CONFIG_NF_CONNTRACK_PROCFS
305 /* Print out the private part of the conntrack. */
306 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
307 {
308 	seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
309 }
310 #endif
311 
312 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
313 {
314 	if (tcph->rst) return TCP_RST_SET;
315 	else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
316 	else if (tcph->fin) return TCP_FIN_SET;
317 	else if (tcph->ack) return TCP_ACK_SET;
318 	else return TCP_NONE_SET;
319 }
320 
321 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
322    in IP Filter' by Guido van Rooij.
323 
324    http://www.sane.nl/events/sane2000/papers.html
325    http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
326 
327    The boundaries and the conditions are changed according to RFC793:
328    the packet must intersect the window (i.e. segments may be
329    after the right or before the left edge) and thus receivers may ACK
330    segments after the right edge of the window.
331 
332 	td_maxend = max(sack + max(win,1)) seen in reply packets
333 	td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
334 	td_maxwin += seq + len - sender.td_maxend
335 			if seq + len > sender.td_maxend
336 	td_end    = max(seq + len) seen in sent packets
337 
338    I.   Upper bound for valid data:	seq <= sender.td_maxend
339    II.  Lower bound for valid data:	seq + len >= sender.td_end - receiver.td_maxwin
340    III.	Upper bound for valid (s)ack:   sack <= receiver.td_end
341    IV.	Lower bound for valid (s)ack:	sack >= receiver.td_end - MAXACKWINDOW
342 
343    where sack is the highest right edge of sack block found in the packet
344    or ack in the case of packet without SACK option.
345 
346    The upper bound limit for a valid (s)ack is not ignored -
347    we doesn't have to deal with fragments.
348 */
349 
350 static inline __u32 segment_seq_plus_len(__u32 seq,
351 					 size_t len,
352 					 unsigned int dataoff,
353 					 const struct tcphdr *tcph)
354 {
355 	/* XXX Should I use payload length field in IP/IPv6 header ?
356 	 * - YK */
357 	return (seq + len - dataoff - tcph->doff*4
358 		+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
359 }
360 
361 /* Fixme: what about big packets? */
362 #define MAXACKWINCONST			66000
363 #define MAXACKWINDOW(sender)						\
364 	((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin	\
365 					      : MAXACKWINCONST)
366 
367 /*
368  * Simplified tcp_parse_options routine from tcp_input.c
369  */
370 static void tcp_options(const struct sk_buff *skb,
371 			unsigned int dataoff,
372 			const struct tcphdr *tcph,
373 			struct ip_ct_tcp_state *state)
374 {
375 	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
376 	const unsigned char *ptr;
377 	int length = (tcph->doff*4) - sizeof(struct tcphdr);
378 
379 	if (!length)
380 		return;
381 
382 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
383 				 length, buff);
384 	BUG_ON(ptr == NULL);
385 
386 	state->td_scale =
387 	state->flags = 0;
388 
389 	while (length > 0) {
390 		int opcode=*ptr++;
391 		int opsize;
392 
393 		switch (opcode) {
394 		case TCPOPT_EOL:
395 			return;
396 		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
397 			length--;
398 			continue;
399 		default:
400 			if (length < 2)
401 				return;
402 			opsize=*ptr++;
403 			if (opsize < 2) /* "silly options" */
404 				return;
405 			if (opsize > length)
406 				return;	/* don't parse partial options */
407 
408 			if (opcode == TCPOPT_SACK_PERM
409 			    && opsize == TCPOLEN_SACK_PERM)
410 				state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
411 			else if (opcode == TCPOPT_WINDOW
412 				 && opsize == TCPOLEN_WINDOW) {
413 				state->td_scale = *(u_int8_t *)ptr;
414 
415 				if (state->td_scale > TCP_MAX_WSCALE)
416 					state->td_scale = TCP_MAX_WSCALE;
417 
418 				state->flags |=
419 					IP_CT_TCP_FLAG_WINDOW_SCALE;
420 			}
421 			ptr += opsize - 2;
422 			length -= opsize;
423 		}
424 	}
425 }
426 
427 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
428                      const struct tcphdr *tcph, __u32 *sack)
429 {
430 	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
431 	const unsigned char *ptr;
432 	int length = (tcph->doff*4) - sizeof(struct tcphdr);
433 	__u32 tmp;
434 
435 	if (!length)
436 		return;
437 
438 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
439 				 length, buff);
440 	BUG_ON(ptr == NULL);
441 
442 	/* Fast path for timestamp-only option */
443 	if (length == TCPOLEN_TSTAMP_ALIGNED
444 	    && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
445 				       | (TCPOPT_NOP << 16)
446 				       | (TCPOPT_TIMESTAMP << 8)
447 				       | TCPOLEN_TIMESTAMP))
448 		return;
449 
450 	while (length > 0) {
451 		int opcode = *ptr++;
452 		int opsize, i;
453 
454 		switch (opcode) {
455 		case TCPOPT_EOL:
456 			return;
457 		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
458 			length--;
459 			continue;
460 		default:
461 			if (length < 2)
462 				return;
463 			opsize = *ptr++;
464 			if (opsize < 2) /* "silly options" */
465 				return;
466 			if (opsize > length)
467 				return;	/* don't parse partial options */
468 
469 			if (opcode == TCPOPT_SACK
470 			    && opsize >= (TCPOLEN_SACK_BASE
471 					  + TCPOLEN_SACK_PERBLOCK)
472 			    && !((opsize - TCPOLEN_SACK_BASE)
473 				 % TCPOLEN_SACK_PERBLOCK)) {
474 				for (i = 0;
475 				     i < (opsize - TCPOLEN_SACK_BASE);
476 				     i += TCPOLEN_SACK_PERBLOCK) {
477 					tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
478 
479 					if (after(tmp, *sack))
480 						*sack = tmp;
481 				}
482 				return;
483 			}
484 			ptr += opsize - 2;
485 			length -= opsize;
486 		}
487 	}
488 }
489 
490 static bool tcp_in_window(const struct nf_conn *ct,
491 			  struct ip_ct_tcp *state,
492 			  enum ip_conntrack_dir dir,
493 			  unsigned int index,
494 			  const struct sk_buff *skb,
495 			  unsigned int dataoff,
496 			  const struct tcphdr *tcph,
497 			  u_int8_t pf)
498 {
499 	struct net *net = nf_ct_net(ct);
500 	struct nf_tcp_net *tn = tcp_pernet(net);
501 	struct ip_ct_tcp_state *sender = &state->seen[dir];
502 	struct ip_ct_tcp_state *receiver = &state->seen[!dir];
503 	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
504 	__u32 seq, ack, sack, end, win, swin;
505 	s32 receiver_offset;
506 	bool res, in_recv_win;
507 
508 	/*
509 	 * Get the required data from the packet.
510 	 */
511 	seq = ntohl(tcph->seq);
512 	ack = sack = ntohl(tcph->ack_seq);
513 	win = ntohs(tcph->window);
514 	end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
515 
516 	if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
517 		tcp_sack(skb, dataoff, tcph, &sack);
518 
519 	/* Take into account NAT sequence number mangling */
520 	receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
521 	ack -= receiver_offset;
522 	sack -= receiver_offset;
523 
524 	pr_debug("tcp_in_window: START\n");
525 	pr_debug("tcp_in_window: ");
526 	nf_ct_dump_tuple(tuple);
527 	pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
528 		 seq, ack, receiver_offset, sack, receiver_offset, win, end);
529 	pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
530 		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
531 		 sender->td_end, sender->td_maxend, sender->td_maxwin,
532 		 sender->td_scale,
533 		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
534 		 receiver->td_scale);
535 
536 	if (sender->td_maxwin == 0) {
537 		/*
538 		 * Initialize sender data.
539 		 */
540 		if (tcph->syn) {
541 			/*
542 			 * SYN-ACK in reply to a SYN
543 			 * or SYN from reply direction in simultaneous open.
544 			 */
545 			sender->td_end =
546 			sender->td_maxend = end;
547 			sender->td_maxwin = (win == 0 ? 1 : win);
548 
549 			tcp_options(skb, dataoff, tcph, sender);
550 			/*
551 			 * RFC 1323:
552 			 * Both sides must send the Window Scale option
553 			 * to enable window scaling in either direction.
554 			 */
555 			if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
556 			      && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
557 				sender->td_scale =
558 				receiver->td_scale = 0;
559 			if (!tcph->ack)
560 				/* Simultaneous open */
561 				return true;
562 		} else {
563 			/*
564 			 * We are in the middle of a connection,
565 			 * its history is lost for us.
566 			 * Let's try to use the data from the packet.
567 			 */
568 			sender->td_end = end;
569 			swin = win << sender->td_scale;
570 			sender->td_maxwin = (swin == 0 ? 1 : swin);
571 			sender->td_maxend = end + sender->td_maxwin;
572 			/*
573 			 * We haven't seen traffic in the other direction yet
574 			 * but we have to tweak window tracking to pass III
575 			 * and IV until that happens.
576 			 */
577 			if (receiver->td_maxwin == 0)
578 				receiver->td_end = receiver->td_maxend = sack;
579 		}
580 	} else if (((state->state == TCP_CONNTRACK_SYN_SENT
581 		     && dir == IP_CT_DIR_ORIGINAL)
582 		   || (state->state == TCP_CONNTRACK_SYN_RECV
583 		     && dir == IP_CT_DIR_REPLY))
584 		   && after(end, sender->td_end)) {
585 		/*
586 		 * RFC 793: "if a TCP is reinitialized ... then it need
587 		 * not wait at all; it must only be sure to use sequence
588 		 * numbers larger than those recently used."
589 		 */
590 		sender->td_end =
591 		sender->td_maxend = end;
592 		sender->td_maxwin = (win == 0 ? 1 : win);
593 
594 		tcp_options(skb, dataoff, tcph, sender);
595 	}
596 
597 	if (!(tcph->ack)) {
598 		/*
599 		 * If there is no ACK, just pretend it was set and OK.
600 		 */
601 		ack = sack = receiver->td_end;
602 	} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
603 		    (TCP_FLAG_ACK|TCP_FLAG_RST))
604 		   && (ack == 0)) {
605 		/*
606 		 * Broken TCP stacks, that set ACK in RST packets as well
607 		 * with zero ack value.
608 		 */
609 		ack = sack = receiver->td_end;
610 	}
611 
612 	if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
613 		/*
614 		 * RST sent answering SYN.
615 		 */
616 		seq = end = sender->td_end;
617 
618 	pr_debug("tcp_in_window: ");
619 	nf_ct_dump_tuple(tuple);
620 	pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
621 		 seq, ack, receiver_offset, sack, receiver_offset, win, end);
622 	pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
623 		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
624 		 sender->td_end, sender->td_maxend, sender->td_maxwin,
625 		 sender->td_scale,
626 		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
627 		 receiver->td_scale);
628 
629 	/* Is the ending sequence in the receive window (if available)? */
630 	in_recv_win = !receiver->td_maxwin ||
631 		      after(end, sender->td_end - receiver->td_maxwin - 1);
632 
633 	pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
634 		 before(seq, sender->td_maxend + 1),
635 		 (in_recv_win ? 1 : 0),
636 		 before(sack, receiver->td_end + 1),
637 		 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
638 
639 	if (before(seq, sender->td_maxend + 1) &&
640 	    in_recv_win &&
641 	    before(sack, receiver->td_end + 1) &&
642 	    after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
643 		/*
644 		 * Take into account window scaling (RFC 1323).
645 		 */
646 		if (!tcph->syn)
647 			win <<= sender->td_scale;
648 
649 		/*
650 		 * Update sender data.
651 		 */
652 		swin = win + (sack - ack);
653 		if (sender->td_maxwin < swin)
654 			sender->td_maxwin = swin;
655 		if (after(end, sender->td_end)) {
656 			sender->td_end = end;
657 			sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
658 		}
659 		if (tcph->ack) {
660 			if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
661 				sender->td_maxack = ack;
662 				sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
663 			} else if (after(ack, sender->td_maxack))
664 				sender->td_maxack = ack;
665 		}
666 
667 		/*
668 		 * Update receiver data.
669 		 */
670 		if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
671 			receiver->td_maxwin += end - sender->td_maxend;
672 		if (after(sack + win, receiver->td_maxend - 1)) {
673 			receiver->td_maxend = sack + win;
674 			if (win == 0)
675 				receiver->td_maxend++;
676 		}
677 		if (ack == receiver->td_end)
678 			receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
679 
680 		/*
681 		 * Check retransmissions.
682 		 */
683 		if (index == TCP_ACK_SET) {
684 			if (state->last_dir == dir
685 			    && state->last_seq == seq
686 			    && state->last_ack == ack
687 			    && state->last_end == end
688 			    && state->last_win == win)
689 				state->retrans++;
690 			else {
691 				state->last_dir = dir;
692 				state->last_seq = seq;
693 				state->last_ack = ack;
694 				state->last_end = end;
695 				state->last_win = win;
696 				state->retrans = 0;
697 			}
698 		}
699 		res = true;
700 	} else {
701 		res = false;
702 		if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
703 		    tn->tcp_be_liberal)
704 			res = true;
705 		if (!res && LOG_INVALID(net, IPPROTO_TCP))
706 			nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
707 			"nf_ct_tcp: %s ",
708 			before(seq, sender->td_maxend + 1) ?
709 			in_recv_win ?
710 			before(sack, receiver->td_end + 1) ?
711 			after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
712 			: "ACK is under the lower bound (possible overly delayed ACK)"
713 			: "ACK is over the upper bound (ACKed data not seen yet)"
714 			: "SEQ is under the lower bound (already ACKed data retransmitted)"
715 			: "SEQ is over the upper bound (over the window of the receiver)");
716 	}
717 
718 	pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
719 		 "receiver end=%u maxend=%u maxwin=%u\n",
720 		 res, sender->td_end, sender->td_maxend, sender->td_maxwin,
721 		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
722 
723 	return res;
724 }
725 
726 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
727 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
728 				 TCPHDR_URG) + 1] =
729 {
730 	[TCPHDR_SYN]				= 1,
731 	[TCPHDR_SYN|TCPHDR_URG]			= 1,
732 	[TCPHDR_SYN|TCPHDR_ACK]			= 1,
733 	[TCPHDR_RST]				= 1,
734 	[TCPHDR_RST|TCPHDR_ACK]			= 1,
735 	[TCPHDR_FIN|TCPHDR_ACK]			= 1,
736 	[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG]	= 1,
737 	[TCPHDR_ACK]				= 1,
738 	[TCPHDR_ACK|TCPHDR_URG]			= 1,
739 };
740 
741 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
742 static int tcp_error(struct net *net, struct nf_conn *tmpl,
743 		     struct sk_buff *skb,
744 		     unsigned int dataoff,
745 		     u_int8_t pf,
746 		     unsigned int hooknum)
747 {
748 	const struct tcphdr *th;
749 	struct tcphdr _tcph;
750 	unsigned int tcplen = skb->len - dataoff;
751 	u_int8_t tcpflags;
752 
753 	/* Smaller that minimal TCP header? */
754 	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
755 	if (th == NULL) {
756 		if (LOG_INVALID(net, IPPROTO_TCP))
757 			nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
758 				"nf_ct_tcp: short packet ");
759 		return -NF_ACCEPT;
760 	}
761 
762 	/* Not whole TCP header or malformed packet */
763 	if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
764 		if (LOG_INVALID(net, IPPROTO_TCP))
765 			nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
766 				"nf_ct_tcp: truncated/malformed packet ");
767 		return -NF_ACCEPT;
768 	}
769 
770 	/* Checksum invalid? Ignore.
771 	 * We skip checking packets on the outgoing path
772 	 * because the checksum is assumed to be correct.
773 	 */
774 	/* FIXME: Source route IP option packets --RR */
775 	if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
776 	    nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
777 		if (LOG_INVALID(net, IPPROTO_TCP))
778 			nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
779 				  "nf_ct_tcp: bad TCP checksum ");
780 		return -NF_ACCEPT;
781 	}
782 
783 	/* Check TCP flags. */
784 	tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
785 	if (!tcp_valid_flags[tcpflags]) {
786 		if (LOG_INVALID(net, IPPROTO_TCP))
787 			nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
788 				  "nf_ct_tcp: invalid TCP flag combination ");
789 		return -NF_ACCEPT;
790 	}
791 
792 	return NF_ACCEPT;
793 }
794 
795 static unsigned int *tcp_get_timeouts(struct net *net)
796 {
797 	return tcp_pernet(net)->timeouts;
798 }
799 
800 /* Returns verdict for packet, or -1 for invalid. */
801 static int tcp_packet(struct nf_conn *ct,
802 		      const struct sk_buff *skb,
803 		      unsigned int dataoff,
804 		      enum ip_conntrack_info ctinfo,
805 		      u_int8_t pf,
806 		      unsigned int *timeouts)
807 {
808 	struct net *net = nf_ct_net(ct);
809 	struct nf_tcp_net *tn = tcp_pernet(net);
810 	struct nf_conntrack_tuple *tuple;
811 	enum tcp_conntrack new_state, old_state;
812 	enum ip_conntrack_dir dir;
813 	const struct tcphdr *th;
814 	struct tcphdr _tcph;
815 	unsigned long timeout;
816 	unsigned int index;
817 
818 	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
819 	BUG_ON(th == NULL);
820 
821 	spin_lock_bh(&ct->lock);
822 	old_state = ct->proto.tcp.state;
823 	dir = CTINFO2DIR(ctinfo);
824 	index = get_conntrack_index(th);
825 	new_state = tcp_conntracks[dir][index][old_state];
826 	tuple = &ct->tuplehash[dir].tuple;
827 
828 	switch (new_state) {
829 	case TCP_CONNTRACK_SYN_SENT:
830 		if (old_state < TCP_CONNTRACK_TIME_WAIT)
831 			break;
832 		/* RFC 1122: "When a connection is closed actively,
833 		 * it MUST linger in TIME-WAIT state for a time 2xMSL
834 		 * (Maximum Segment Lifetime). However, it MAY accept
835 		 * a new SYN from the remote TCP to reopen the connection
836 		 * directly from TIME-WAIT state, if..."
837 		 * We ignore the conditions because we are in the
838 		 * TIME-WAIT state anyway.
839 		 *
840 		 * Handle aborted connections: we and the server
841 		 * think there is an existing connection but the client
842 		 * aborts it and starts a new one.
843 		 */
844 		if (((ct->proto.tcp.seen[dir].flags
845 		      | ct->proto.tcp.seen[!dir].flags)
846 		     & IP_CT_TCP_FLAG_CLOSE_INIT)
847 		    || (ct->proto.tcp.last_dir == dir
848 		        && ct->proto.tcp.last_index == TCP_RST_SET)) {
849 			/* Attempt to reopen a closed/aborted connection.
850 			 * Delete this connection and look up again. */
851 			spin_unlock_bh(&ct->lock);
852 
853 			/* Only repeat if we can actually remove the timer.
854 			 * Destruction may already be in progress in process
855 			 * context and we must give it a chance to terminate.
856 			 */
857 			if (nf_ct_kill(ct))
858 				return -NF_REPEAT;
859 			return NF_DROP;
860 		}
861 		/* Fall through */
862 	case TCP_CONNTRACK_IGNORE:
863 		/* Ignored packets:
864 		 *
865 		 * Our connection entry may be out of sync, so ignore
866 		 * packets which may signal the real connection between
867 		 * the client and the server.
868 		 *
869 		 * a) SYN in ORIGINAL
870 		 * b) SYN/ACK in REPLY
871 		 * c) ACK in reply direction after initial SYN in original.
872 		 *
873 		 * If the ignored packet is invalid, the receiver will send
874 		 * a RST we'll catch below.
875 		 */
876 		if (index == TCP_SYNACK_SET
877 		    && ct->proto.tcp.last_index == TCP_SYN_SET
878 		    && ct->proto.tcp.last_dir != dir
879 		    && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
880 			/* b) This SYN/ACK acknowledges a SYN that we earlier
881 			 * ignored as invalid. This means that the client and
882 			 * the server are both in sync, while the firewall is
883 			 * not. We get in sync from the previously annotated
884 			 * values.
885 			 */
886 			old_state = TCP_CONNTRACK_SYN_SENT;
887 			new_state = TCP_CONNTRACK_SYN_RECV;
888 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
889 				ct->proto.tcp.last_end;
890 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
891 				ct->proto.tcp.last_end;
892 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
893 				ct->proto.tcp.last_win == 0 ?
894 					1 : ct->proto.tcp.last_win;
895 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
896 				ct->proto.tcp.last_wscale;
897 			ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
898 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
899 				ct->proto.tcp.last_flags;
900 			memset(&ct->proto.tcp.seen[dir], 0,
901 			       sizeof(struct ip_ct_tcp_state));
902 			break;
903 		}
904 		ct->proto.tcp.last_index = index;
905 		ct->proto.tcp.last_dir = dir;
906 		ct->proto.tcp.last_seq = ntohl(th->seq);
907 		ct->proto.tcp.last_end =
908 		    segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
909 		ct->proto.tcp.last_win = ntohs(th->window);
910 
911 		/* a) This is a SYN in ORIGINAL. The client and the server
912 		 * may be in sync but we are not. In that case, we annotate
913 		 * the TCP options and let the packet go through. If it is a
914 		 * valid SYN packet, the server will reply with a SYN/ACK, and
915 		 * then we'll get in sync. Otherwise, the server potentially
916 		 * responds with a challenge ACK if implementing RFC5961.
917 		 */
918 		if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
919 			struct ip_ct_tcp_state seen = {};
920 
921 			ct->proto.tcp.last_flags =
922 			ct->proto.tcp.last_wscale = 0;
923 			tcp_options(skb, dataoff, th, &seen);
924 			if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
925 				ct->proto.tcp.last_flags |=
926 					IP_CT_TCP_FLAG_WINDOW_SCALE;
927 				ct->proto.tcp.last_wscale = seen.td_scale;
928 			}
929 			if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
930 				ct->proto.tcp.last_flags |=
931 					IP_CT_TCP_FLAG_SACK_PERM;
932 			}
933 			/* Mark the potential for RFC5961 challenge ACK,
934 			 * this pose a special problem for LAST_ACK state
935 			 * as ACK is intrepretated as ACKing last FIN.
936 			 */
937 			if (old_state == TCP_CONNTRACK_LAST_ACK)
938 				ct->proto.tcp.last_flags |=
939 					IP_CT_EXP_CHALLENGE_ACK;
940 		}
941 		spin_unlock_bh(&ct->lock);
942 		if (LOG_INVALID(net, IPPROTO_TCP))
943 			nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
944 				  "nf_ct_tcp: invalid packet ignored in "
945 				  "state %s ", tcp_conntrack_names[old_state]);
946 		return NF_ACCEPT;
947 	case TCP_CONNTRACK_MAX:
948 		/* Special case for SYN proxy: when the SYN to the server or
949 		 * the SYN/ACK from the server is lost, the client may transmit
950 		 * a keep-alive packet while in SYN_SENT state. This needs to
951 		 * be associated with the original conntrack entry in order to
952 		 * generate a new SYN with the correct sequence number.
953 		 */
954 		if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
955 		    index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
956 		    ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
957 		    ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
958 			pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
959 			spin_unlock_bh(&ct->lock);
960 			return NF_ACCEPT;
961 		}
962 
963 		/* Invalid packet */
964 		pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
965 			 dir, get_conntrack_index(th), old_state);
966 		spin_unlock_bh(&ct->lock);
967 		if (LOG_INVALID(net, IPPROTO_TCP))
968 			nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
969 				  "nf_ct_tcp: invalid state ");
970 		return -NF_ACCEPT;
971 	case TCP_CONNTRACK_TIME_WAIT:
972 		/* RFC5961 compliance cause stack to send "challenge-ACK"
973 		 * e.g. in response to spurious SYNs.  Conntrack MUST
974 		 * not believe this ACK is acking last FIN.
975 		 */
976 		if (old_state == TCP_CONNTRACK_LAST_ACK &&
977 		    index == TCP_ACK_SET &&
978 		    ct->proto.tcp.last_dir != dir &&
979 		    ct->proto.tcp.last_index == TCP_SYN_SET &&
980 		    (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
981 			/* Detected RFC5961 challenge ACK */
982 			ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
983 			spin_unlock_bh(&ct->lock);
984 			if (LOG_INVALID(net, IPPROTO_TCP))
985 				nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
986 				      "nf_ct_tcp: challenge-ACK ignored ");
987 			return NF_ACCEPT; /* Don't change state */
988 		}
989 		break;
990 	case TCP_CONNTRACK_CLOSE:
991 		if (index == TCP_RST_SET
992 		    && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
993 		    && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
994 			/* Invalid RST  */
995 			spin_unlock_bh(&ct->lock);
996 			if (LOG_INVALID(net, IPPROTO_TCP))
997 				nf_log_packet(net, pf, 0, skb, NULL, NULL,
998 					      NULL, "nf_ct_tcp: invalid RST ");
999 			return -NF_ACCEPT;
1000 		}
1001 		if (index == TCP_RST_SET
1002 		    && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1003 			 && ct->proto.tcp.last_index == TCP_SYN_SET)
1004 			|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
1005 			    && ct->proto.tcp.last_index == TCP_ACK_SET))
1006 		    && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1007 			/* RST sent to invalid SYN or ACK we had let through
1008 			 * at a) and c) above:
1009 			 *
1010 			 * a) SYN was in window then
1011 			 * c) we hold a half-open connection.
1012 			 *
1013 			 * Delete our connection entry.
1014 			 * We skip window checking, because packet might ACK
1015 			 * segments we ignored. */
1016 			goto in_window;
1017 		}
1018 		/* Just fall through */
1019 	default:
1020 		/* Keep compilers happy. */
1021 		break;
1022 	}
1023 
1024 	if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
1025 			   skb, dataoff, th, pf)) {
1026 		spin_unlock_bh(&ct->lock);
1027 		return -NF_ACCEPT;
1028 	}
1029      in_window:
1030 	/* From now on we have got in-window packets */
1031 	ct->proto.tcp.last_index = index;
1032 	ct->proto.tcp.last_dir = dir;
1033 
1034 	pr_debug("tcp_conntracks: ");
1035 	nf_ct_dump_tuple(tuple);
1036 	pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
1037 		 (th->syn ? 1 : 0), (th->ack ? 1 : 0),
1038 		 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
1039 		 old_state, new_state);
1040 
1041 	ct->proto.tcp.state = new_state;
1042 	if (old_state != new_state
1043 	    && new_state == TCP_CONNTRACK_FIN_WAIT)
1044 		ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1045 
1046 	if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1047 	    timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1048 		timeout = timeouts[TCP_CONNTRACK_RETRANS];
1049 	else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1050 		 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1051 		 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1052 		timeout = timeouts[TCP_CONNTRACK_UNACK];
1053 	else
1054 		timeout = timeouts[new_state];
1055 	spin_unlock_bh(&ct->lock);
1056 
1057 	if (new_state != old_state)
1058 		nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1059 
1060 	if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1061 		/* If only reply is a RST, we can consider ourselves not to
1062 		   have an established connection: this is a fairly common
1063 		   problem case, so we can delete the conntrack
1064 		   immediately.  --RR */
1065 		if (th->rst) {
1066 			nf_ct_kill_acct(ct, ctinfo, skb);
1067 			return NF_ACCEPT;
1068 		}
1069 		/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1070 		 * pickup with loose=1. Avoid large ESTABLISHED timeout.
1071 		 */
1072 		if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1073 		    timeout > timeouts[TCP_CONNTRACK_UNACK])
1074 			timeout = timeouts[TCP_CONNTRACK_UNACK];
1075 	} else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1076 		   && (old_state == TCP_CONNTRACK_SYN_RECV
1077 		       || old_state == TCP_CONNTRACK_ESTABLISHED)
1078 		   && new_state == TCP_CONNTRACK_ESTABLISHED) {
1079 		/* Set ASSURED if we see see valid ack in ESTABLISHED
1080 		   after SYN_RECV or a valid answer for a picked up
1081 		   connection. */
1082 		set_bit(IPS_ASSURED_BIT, &ct->status);
1083 		nf_conntrack_event_cache(IPCT_ASSURED, ct);
1084 	}
1085 	nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1086 
1087 	return NF_ACCEPT;
1088 }
1089 
1090 /* Called when a new connection for this protocol found. */
1091 static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1092 		    unsigned int dataoff, unsigned int *timeouts)
1093 {
1094 	enum tcp_conntrack new_state;
1095 	const struct tcphdr *th;
1096 	struct tcphdr _tcph;
1097 	struct net *net = nf_ct_net(ct);
1098 	struct nf_tcp_net *tn = tcp_pernet(net);
1099 	const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
1100 	const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
1101 
1102 	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
1103 	BUG_ON(th == NULL);
1104 
1105 	/* Don't need lock here: this conntrack not in circulation yet */
1106 	new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
1107 
1108 	/* Invalid: delete conntrack */
1109 	if (new_state >= TCP_CONNTRACK_MAX) {
1110 		pr_debug("nf_ct_tcp: invalid new deleting.\n");
1111 		return false;
1112 	}
1113 
1114 	if (new_state == TCP_CONNTRACK_SYN_SENT) {
1115 		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
1116 		/* SYN packet */
1117 		ct->proto.tcp.seen[0].td_end =
1118 			segment_seq_plus_len(ntohl(th->seq), skb->len,
1119 					     dataoff, th);
1120 		ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
1121 		if (ct->proto.tcp.seen[0].td_maxwin == 0)
1122 			ct->proto.tcp.seen[0].td_maxwin = 1;
1123 		ct->proto.tcp.seen[0].td_maxend =
1124 			ct->proto.tcp.seen[0].td_end;
1125 
1126 		tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
1127 	} else if (tn->tcp_loose == 0) {
1128 		/* Don't try to pick up connections. */
1129 		return false;
1130 	} else {
1131 		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
1132 		/*
1133 		 * We are in the middle of a connection,
1134 		 * its history is lost for us.
1135 		 * Let's try to use the data from the packet.
1136 		 */
1137 		ct->proto.tcp.seen[0].td_end =
1138 			segment_seq_plus_len(ntohl(th->seq), skb->len,
1139 					     dataoff, th);
1140 		ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
1141 		if (ct->proto.tcp.seen[0].td_maxwin == 0)
1142 			ct->proto.tcp.seen[0].td_maxwin = 1;
1143 		ct->proto.tcp.seen[0].td_maxend =
1144 			ct->proto.tcp.seen[0].td_end +
1145 			ct->proto.tcp.seen[0].td_maxwin;
1146 
1147 		/* We assume SACK and liberal window checking to handle
1148 		 * window scaling */
1149 		ct->proto.tcp.seen[0].flags =
1150 		ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1151 					      IP_CT_TCP_FLAG_BE_LIBERAL;
1152 	}
1153 
1154 	/* tcp_packet will set them */
1155 	ct->proto.tcp.last_index = TCP_NONE_SET;
1156 
1157 	pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
1158 		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
1159 		 sender->td_end, sender->td_maxend, sender->td_maxwin,
1160 		 sender->td_scale,
1161 		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
1162 		 receiver->td_scale);
1163 	return true;
1164 }
1165 
1166 static bool tcp_can_early_drop(const struct nf_conn *ct)
1167 {
1168 	switch (ct->proto.tcp.state) {
1169 	case TCP_CONNTRACK_FIN_WAIT:
1170 	case TCP_CONNTRACK_LAST_ACK:
1171 	case TCP_CONNTRACK_TIME_WAIT:
1172 	case TCP_CONNTRACK_CLOSE:
1173 	case TCP_CONNTRACK_CLOSE_WAIT:
1174 		return true;
1175 	default:
1176 		break;
1177 	}
1178 
1179 	return false;
1180 }
1181 
1182 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1183 
1184 #include <linux/netfilter/nfnetlink.h>
1185 #include <linux/netfilter/nfnetlink_conntrack.h>
1186 
1187 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1188 			 struct nf_conn *ct)
1189 {
1190 	struct nlattr *nest_parms;
1191 	struct nf_ct_tcp_flags tmp = {};
1192 
1193 	spin_lock_bh(&ct->lock);
1194 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP | NLA_F_NESTED);
1195 	if (!nest_parms)
1196 		goto nla_put_failure;
1197 
1198 	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
1199 	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1200 		       ct->proto.tcp.seen[0].td_scale) ||
1201 	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1202 		       ct->proto.tcp.seen[1].td_scale))
1203 		goto nla_put_failure;
1204 
1205 	tmp.flags = ct->proto.tcp.seen[0].flags;
1206 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1207 		    sizeof(struct nf_ct_tcp_flags), &tmp))
1208 		goto nla_put_failure;
1209 
1210 	tmp.flags = ct->proto.tcp.seen[1].flags;
1211 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1212 		    sizeof(struct nf_ct_tcp_flags), &tmp))
1213 		goto nla_put_failure;
1214 	spin_unlock_bh(&ct->lock);
1215 
1216 	nla_nest_end(skb, nest_parms);
1217 
1218 	return 0;
1219 
1220 nla_put_failure:
1221 	spin_unlock_bh(&ct->lock);
1222 	return -1;
1223 }
1224 
1225 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1226 	[CTA_PROTOINFO_TCP_STATE]	    = { .type = NLA_U8 },
1227 	[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1228 	[CTA_PROTOINFO_TCP_WSCALE_REPLY]    = { .type = NLA_U8 },
1229 	[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]  = { .len = sizeof(struct nf_ct_tcp_flags) },
1230 	[CTA_PROTOINFO_TCP_FLAGS_REPLY]	    = { .len =  sizeof(struct nf_ct_tcp_flags) },
1231 };
1232 
1233 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1234 {
1235 	struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1236 	struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1237 	int err;
1238 
1239 	/* updates could not contain anything about the private
1240 	 * protocol info, in that case skip the parsing */
1241 	if (!pattr)
1242 		return 0;
1243 
1244 	err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr,
1245 			       tcp_nla_policy, NULL);
1246 	if (err < 0)
1247 		return err;
1248 
1249 	if (tb[CTA_PROTOINFO_TCP_STATE] &&
1250 	    nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1251 		return -EINVAL;
1252 
1253 	spin_lock_bh(&ct->lock);
1254 	if (tb[CTA_PROTOINFO_TCP_STATE])
1255 		ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1256 
1257 	if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1258 		struct nf_ct_tcp_flags *attr =
1259 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1260 		ct->proto.tcp.seen[0].flags &= ~attr->mask;
1261 		ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1262 	}
1263 
1264 	if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1265 		struct nf_ct_tcp_flags *attr =
1266 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1267 		ct->proto.tcp.seen[1].flags &= ~attr->mask;
1268 		ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1269 	}
1270 
1271 	if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1272 	    tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1273 	    ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1274 	    ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1275 		ct->proto.tcp.seen[0].td_scale =
1276 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1277 		ct->proto.tcp.seen[1].td_scale =
1278 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1279 	}
1280 	spin_unlock_bh(&ct->lock);
1281 
1282 	return 0;
1283 }
1284 
1285 static int tcp_nlattr_size(void)
1286 {
1287 	return nla_total_size(0)	   /* CTA_PROTOINFO_TCP */
1288 		+ nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
1289 }
1290 
1291 static int tcp_nlattr_tuple_size(void)
1292 {
1293 	return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1294 }
1295 #endif
1296 
1297 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1298 
1299 #include <linux/netfilter/nfnetlink.h>
1300 #include <linux/netfilter/nfnetlink_cttimeout.h>
1301 
1302 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1303 				     struct net *net, void *data)
1304 {
1305 	unsigned int *timeouts = data;
1306 	struct nf_tcp_net *tn = tcp_pernet(net);
1307 	int i;
1308 
1309 	/* set default TCP timeouts. */
1310 	for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1311 		timeouts[i] = tn->timeouts[i];
1312 
1313 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1314 		timeouts[TCP_CONNTRACK_SYN_SENT] =
1315 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1316 	}
1317 	if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1318 		timeouts[TCP_CONNTRACK_SYN_RECV] =
1319 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1320 	}
1321 	if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1322 		timeouts[TCP_CONNTRACK_ESTABLISHED] =
1323 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1324 	}
1325 	if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1326 		timeouts[TCP_CONNTRACK_FIN_WAIT] =
1327 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1328 	}
1329 	if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1330 		timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1331 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1332 	}
1333 	if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1334 		timeouts[TCP_CONNTRACK_LAST_ACK] =
1335 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1336 	}
1337 	if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1338 		timeouts[TCP_CONNTRACK_TIME_WAIT] =
1339 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1340 	}
1341 	if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1342 		timeouts[TCP_CONNTRACK_CLOSE] =
1343 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1344 	}
1345 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1346 		timeouts[TCP_CONNTRACK_SYN_SENT2] =
1347 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1348 	}
1349 	if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1350 		timeouts[TCP_CONNTRACK_RETRANS] =
1351 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1352 	}
1353 	if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1354 		timeouts[TCP_CONNTRACK_UNACK] =
1355 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1356 	}
1357 	return 0;
1358 }
1359 
1360 static int
1361 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1362 {
1363 	const unsigned int *timeouts = data;
1364 
1365 	if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1366 			htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1367 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1368 			 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1369 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1370 			 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1371 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1372 			 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1373 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1374 			 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1375 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1376 			 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1377 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1378 			 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1379 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1380 			 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1381 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1382 			 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1383 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1384 			 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1385 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1386 			 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1387 		goto nla_put_failure;
1388 	return 0;
1389 
1390 nla_put_failure:
1391 	return -ENOSPC;
1392 }
1393 
1394 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1395 	[CTA_TIMEOUT_TCP_SYN_SENT]	= { .type = NLA_U32 },
1396 	[CTA_TIMEOUT_TCP_SYN_RECV]	= { .type = NLA_U32 },
1397 	[CTA_TIMEOUT_TCP_ESTABLISHED]	= { .type = NLA_U32 },
1398 	[CTA_TIMEOUT_TCP_FIN_WAIT]	= { .type = NLA_U32 },
1399 	[CTA_TIMEOUT_TCP_CLOSE_WAIT]	= { .type = NLA_U32 },
1400 	[CTA_TIMEOUT_TCP_LAST_ACK]	= { .type = NLA_U32 },
1401 	[CTA_TIMEOUT_TCP_TIME_WAIT]	= { .type = NLA_U32 },
1402 	[CTA_TIMEOUT_TCP_CLOSE]		= { .type = NLA_U32 },
1403 	[CTA_TIMEOUT_TCP_SYN_SENT2]	= { .type = NLA_U32 },
1404 	[CTA_TIMEOUT_TCP_RETRANS]	= { .type = NLA_U32 },
1405 	[CTA_TIMEOUT_TCP_UNACK]		= { .type = NLA_U32 },
1406 };
1407 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1408 
1409 #ifdef CONFIG_SYSCTL
1410 static struct ctl_table tcp_sysctl_table[] = {
1411 	{
1412 		.procname	= "nf_conntrack_tcp_timeout_syn_sent",
1413 		.maxlen		= sizeof(unsigned int),
1414 		.mode		= 0644,
1415 		.proc_handler	= proc_dointvec_jiffies,
1416 	},
1417 	{
1418 		.procname	= "nf_conntrack_tcp_timeout_syn_recv",
1419 		.maxlen		= sizeof(unsigned int),
1420 		.mode		= 0644,
1421 		.proc_handler	= proc_dointvec_jiffies,
1422 	},
1423 	{
1424 		.procname	= "nf_conntrack_tcp_timeout_established",
1425 		.maxlen		= sizeof(unsigned int),
1426 		.mode		= 0644,
1427 		.proc_handler	= proc_dointvec_jiffies,
1428 	},
1429 	{
1430 		.procname	= "nf_conntrack_tcp_timeout_fin_wait",
1431 		.maxlen		= sizeof(unsigned int),
1432 		.mode		= 0644,
1433 		.proc_handler	= proc_dointvec_jiffies,
1434 	},
1435 	{
1436 		.procname	= "nf_conntrack_tcp_timeout_close_wait",
1437 		.maxlen		= sizeof(unsigned int),
1438 		.mode		= 0644,
1439 		.proc_handler	= proc_dointvec_jiffies,
1440 	},
1441 	{
1442 		.procname	= "nf_conntrack_tcp_timeout_last_ack",
1443 		.maxlen		= sizeof(unsigned int),
1444 		.mode		= 0644,
1445 		.proc_handler	= proc_dointvec_jiffies,
1446 	},
1447 	{
1448 		.procname	= "nf_conntrack_tcp_timeout_time_wait",
1449 		.maxlen		= sizeof(unsigned int),
1450 		.mode		= 0644,
1451 		.proc_handler	= proc_dointvec_jiffies,
1452 	},
1453 	{
1454 		.procname	= "nf_conntrack_tcp_timeout_close",
1455 		.maxlen		= sizeof(unsigned int),
1456 		.mode		= 0644,
1457 		.proc_handler	= proc_dointvec_jiffies,
1458 	},
1459 	{
1460 		.procname	= "nf_conntrack_tcp_timeout_max_retrans",
1461 		.maxlen		= sizeof(unsigned int),
1462 		.mode		= 0644,
1463 		.proc_handler	= proc_dointvec_jiffies,
1464 	},
1465 	{
1466 		.procname	= "nf_conntrack_tcp_timeout_unacknowledged",
1467 		.maxlen		= sizeof(unsigned int),
1468 		.mode		= 0644,
1469 		.proc_handler	= proc_dointvec_jiffies,
1470 	},
1471 	{
1472 		.procname	= "nf_conntrack_tcp_loose",
1473 		.maxlen		= sizeof(unsigned int),
1474 		.mode		= 0644,
1475 		.proc_handler	= proc_dointvec,
1476 	},
1477 	{
1478 		.procname       = "nf_conntrack_tcp_be_liberal",
1479 		.maxlen         = sizeof(unsigned int),
1480 		.mode           = 0644,
1481 		.proc_handler   = proc_dointvec,
1482 	},
1483 	{
1484 		.procname	= "nf_conntrack_tcp_max_retrans",
1485 		.maxlen		= sizeof(unsigned int),
1486 		.mode		= 0644,
1487 		.proc_handler	= proc_dointvec,
1488 	},
1489 	{ }
1490 };
1491 #endif /* CONFIG_SYSCTL */
1492 
1493 static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
1494 				    struct nf_tcp_net *tn)
1495 {
1496 #ifdef CONFIG_SYSCTL
1497 	if (pn->ctl_table)
1498 		return 0;
1499 
1500 	pn->ctl_table = kmemdup(tcp_sysctl_table,
1501 				sizeof(tcp_sysctl_table),
1502 				GFP_KERNEL);
1503 	if (!pn->ctl_table)
1504 		return -ENOMEM;
1505 
1506 	pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
1507 	pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
1508 	pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
1509 	pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
1510 	pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
1511 	pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
1512 	pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
1513 	pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
1514 	pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
1515 	pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
1516 	pn->ctl_table[10].data = &tn->tcp_loose;
1517 	pn->ctl_table[11].data = &tn->tcp_be_liberal;
1518 	pn->ctl_table[12].data = &tn->tcp_max_retrans;
1519 #endif
1520 	return 0;
1521 }
1522 
1523 static int tcp_init_net(struct net *net, u_int16_t proto)
1524 {
1525 	struct nf_tcp_net *tn = tcp_pernet(net);
1526 	struct nf_proto_net *pn = &tn->pn;
1527 
1528 	if (!pn->users) {
1529 		int i;
1530 
1531 		for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1532 			tn->timeouts[i] = tcp_timeouts[i];
1533 
1534 		tn->tcp_loose = nf_ct_tcp_loose;
1535 		tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
1536 		tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
1537 	}
1538 
1539 	return tcp_kmemdup_sysctl_table(pn, tn);
1540 }
1541 
1542 static struct nf_proto_net *tcp_get_net_proto(struct net *net)
1543 {
1544 	return &net->ct.nf_ct_proto.tcp.pn;
1545 }
1546 
1547 struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1548 {
1549 	.l3proto		= PF_INET,
1550 	.l4proto 		= IPPROTO_TCP,
1551 	.pkt_to_tuple 		= tcp_pkt_to_tuple,
1552 	.invert_tuple 		= tcp_invert_tuple,
1553 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1554 	.print_conntrack 	= tcp_print_conntrack,
1555 #endif
1556 	.packet 		= tcp_packet,
1557 	.get_timeouts		= tcp_get_timeouts,
1558 	.new 			= tcp_new,
1559 	.error			= tcp_error,
1560 	.can_early_drop		= tcp_can_early_drop,
1561 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1562 	.to_nlattr		= tcp_to_nlattr,
1563 	.nlattr_size		= tcp_nlattr_size,
1564 	.from_nlattr		= nlattr_to_tcp,
1565 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
1566 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
1567 	.nlattr_tuple_size	= tcp_nlattr_tuple_size,
1568 	.nla_policy		= nf_ct_port_nla_policy,
1569 #endif
1570 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1571 	.ctnl_timeout		= {
1572 		.nlattr_to_obj	= tcp_timeout_nlattr_to_obj,
1573 		.obj_to_nlattr	= tcp_timeout_obj_to_nlattr,
1574 		.nlattr_max	= CTA_TIMEOUT_TCP_MAX,
1575 		.obj_size	= sizeof(unsigned int) *
1576 					TCP_CONNTRACK_TIMEOUT_MAX,
1577 		.nla_policy	= tcp_timeout_nla_policy,
1578 	},
1579 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1580 	.init_net		= tcp_init_net,
1581 	.get_net_proto		= tcp_get_net_proto,
1582 };
1583 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
1584 
1585 struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1586 {
1587 	.l3proto		= PF_INET6,
1588 	.l4proto 		= IPPROTO_TCP,
1589 	.pkt_to_tuple 		= tcp_pkt_to_tuple,
1590 	.invert_tuple 		= tcp_invert_tuple,
1591 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1592 	.print_conntrack 	= tcp_print_conntrack,
1593 #endif
1594 	.packet 		= tcp_packet,
1595 	.get_timeouts		= tcp_get_timeouts,
1596 	.new 			= tcp_new,
1597 	.error			= tcp_error,
1598 	.can_early_drop		= tcp_can_early_drop,
1599 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1600 	.to_nlattr		= tcp_to_nlattr,
1601 	.nlattr_size		= tcp_nlattr_size,
1602 	.from_nlattr		= nlattr_to_tcp,
1603 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
1604 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
1605 	.nlattr_tuple_size	= tcp_nlattr_tuple_size,
1606 	.nla_policy		= nf_ct_port_nla_policy,
1607 #endif
1608 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1609 	.ctnl_timeout		= {
1610 		.nlattr_to_obj	= tcp_timeout_nlattr_to_obj,
1611 		.obj_to_nlattr	= tcp_timeout_obj_to_nlattr,
1612 		.nlattr_max	= CTA_TIMEOUT_TCP_MAX,
1613 		.obj_size	= sizeof(unsigned int) *
1614 					TCP_CONNTRACK_TIMEOUT_MAX,
1615 		.nla_policy	= tcp_timeout_nla_policy,
1616 	},
1617 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1618 	.init_net		= tcp_init_net,
1619 	.get_net_proto		= tcp_get_net_proto,
1620 };
1621 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
1622