1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket
51da177e4SLinus Torvalds * interface as the means of communication with the user level.
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds *
902c30a84SJesper Juhl * Authors: Ross Biro
101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds */
211da177e4SLinus Torvalds
221da177e4SLinus Torvalds #include <net/tcp.h>
231da177e4SLinus Torvalds #include <net/xfrm.h>
24e5907459SAlexander Duyck #include <net/busy_poll.h>
251da177e4SLinus Torvalds
tcp_in_window(u32 seq,u32 end_seq,u32 s_win,u32 e_win)26a2a385d6SEric Dumazet static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
271da177e4SLinus Torvalds {
281da177e4SLinus Torvalds if (seq == s_win)
29a2a385d6SEric Dumazet return true;
301da177e4SLinus Torvalds if (after(end_seq, s_win) && before(seq, e_win))
31a2a385d6SEric Dumazet return true;
32a02cec21SEric Dumazet return seq == e_win && seq == end_seq;
331da177e4SLinus Torvalds }
341da177e4SLinus Torvalds
354fb17a60SNeal Cardwell static enum tcp_tw_status
tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock * tw,const struct sk_buff * skb,int mib_idx)364fb17a60SNeal Cardwell tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
374fb17a60SNeal Cardwell const struct sk_buff *skb, int mib_idx)
384fb17a60SNeal Cardwell {
394fb17a60SNeal Cardwell struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
404fb17a60SNeal Cardwell
414fb17a60SNeal Cardwell if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
424fb17a60SNeal Cardwell &tcptw->tw_last_oow_ack_time)) {
434fb17a60SNeal Cardwell /* Send ACK. Note, we do not put the bucket,
444fb17a60SNeal Cardwell * it will be released by caller.
454fb17a60SNeal Cardwell */
464fb17a60SNeal Cardwell return TCP_TW_ACK;
474fb17a60SNeal Cardwell }
484fb17a60SNeal Cardwell
494fb17a60SNeal Cardwell /* We are rate-limiting, so just release the tw sock and drop skb. */
504fb17a60SNeal Cardwell inet_twsk_put(tw);
514fb17a60SNeal Cardwell return TCP_TW_SUCCESS;
524fb17a60SNeal Cardwell }
534fb17a60SNeal Cardwell
541da177e4SLinus Torvalds /*
551da177e4SLinus Torvalds * * Main purpose of TIME-WAIT state is to close connection gracefully,
561da177e4SLinus Torvalds * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
571da177e4SLinus Torvalds * (and, probably, tail of data) and one or more our ACKs are lost.
581da177e4SLinus Torvalds * * What is TIME-WAIT timeout? It is associated with maximal packet
591da177e4SLinus Torvalds * lifetime in the internet, which results in wrong conclusion, that
601da177e4SLinus Torvalds * it is set to catch "old duplicate segments" wandering out of their path.
611da177e4SLinus Torvalds * It is not quite correct. This timeout is calculated so that it exceeds
621da177e4SLinus Torvalds * maximal retransmission timeout enough to allow to lose one (or more)
631da177e4SLinus Torvalds * segments sent by peer and our ACKs. This time may be calculated from RTO.
641da177e4SLinus Torvalds * * When TIME-WAIT socket receives RST, it means that another end
651da177e4SLinus Torvalds * finally closed and we are allowed to kill TIME-WAIT too.
661da177e4SLinus Torvalds * * Second purpose of TIME-WAIT is catching old duplicate segments.
671da177e4SLinus Torvalds * Well, certainly it is pure paranoia, but if we load TIME-WAIT
681da177e4SLinus Torvalds * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
691da177e4SLinus Torvalds * * If we invented some more clever way to catch duplicates
701da177e4SLinus Torvalds * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
711da177e4SLinus Torvalds *
721da177e4SLinus Torvalds * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
731da177e4SLinus Torvalds * When you compare it to RFCs, please, read section SEGMENT ARRIVES
741da177e4SLinus Torvalds * from the very beginning.
751da177e4SLinus Torvalds *
761da177e4SLinus Torvalds * NOTE. With recycling (and later with fin-wait-2) TW bucket
771da177e4SLinus Torvalds * is _not_ stateless. It means, that strictly speaking we must
781da177e4SLinus Torvalds * spinlock it. I do not want! Well, probability of misbehaviour
791da177e4SLinus Torvalds * is ridiculously low and, seems, we could use some mb() tricks
801da177e4SLinus Torvalds * to avoid misread sequence numbers, states etc. --ANK
814308fc58SAlan Cox *
824308fc58SAlan Cox * We don't need to initialize tmp_out.sack_ok as we don't use the results
831da177e4SLinus Torvalds */
841da177e4SLinus Torvalds enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock * tw,struct sk_buff * skb,const struct tcphdr * th)858feaf0c0SArnaldo Carvalho de Melo tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
868feaf0c0SArnaldo Carvalho de Melo const struct tcphdr *th)
871da177e4SLinus Torvalds {
881da177e4SLinus Torvalds struct tcp_options_received tmp_opt;
894957faadSWilliam Allen Simpson struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
90a2a385d6SEric Dumazet bool paws_reject = false;
911da177e4SLinus Torvalds
92bb5b7c11SDavid S. Miller tmp_opt.saw_tstamp = 0;
938feaf0c0SArnaldo Carvalho de Melo if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
94eed29f17SEric Dumazet tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
951da177e4SLinus Torvalds
961da177e4SLinus Torvalds if (tmp_opt.saw_tstamp) {
97eee2faabSAlexey Kodanev if (tmp_opt.rcv_tsecr)
98ee684b6fSAndrey Vagin tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
998feaf0c0SArnaldo Carvalho de Melo tmp_opt.ts_recent = tcptw->tw_ts_recent;
1008feaf0c0SArnaldo Carvalho de Melo tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
101c887e6d2SIlpo Järvinen paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
1021da177e4SLinus Torvalds }
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds
1051da177e4SLinus Torvalds if (tw->tw_substate == TCP_FIN_WAIT2) {
1061da177e4SLinus Torvalds /* Just repeat all the checks of tcp_rcv_state_process() */
1071da177e4SLinus Torvalds
1081da177e4SLinus Torvalds /* Out of window, send ACK */
1091da177e4SLinus Torvalds if (paws_reject ||
1101da177e4SLinus Torvalds !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
1118feaf0c0SArnaldo Carvalho de Melo tcptw->tw_rcv_nxt,
1128feaf0c0SArnaldo Carvalho de Melo tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
1134fb17a60SNeal Cardwell return tcp_timewait_check_oow_rate_limit(
1144fb17a60SNeal Cardwell tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
1151da177e4SLinus Torvalds
1161da177e4SLinus Torvalds if (th->rst)
1171da177e4SLinus Torvalds goto kill;
1181da177e4SLinus Torvalds
1198feaf0c0SArnaldo Carvalho de Melo if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
120271c3b9bSFlorian Westphal return TCP_TW_RST;
1211da177e4SLinus Torvalds
1221da177e4SLinus Torvalds /* Dup ACK? */
1231ac530b3SWei Yongjun if (!th->ack ||
1241ac530b3SWei Yongjun !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
1251da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
1268feaf0c0SArnaldo Carvalho de Melo inet_twsk_put(tw);
1271da177e4SLinus Torvalds return TCP_TW_SUCCESS;
1281da177e4SLinus Torvalds }
1291da177e4SLinus Torvalds
1301da177e4SLinus Torvalds /* New data or FIN. If new data arrive after half-duplex close,
1311da177e4SLinus Torvalds * reset.
1321da177e4SLinus Torvalds */
1331da177e4SLinus Torvalds if (!th->fin ||
134271c3b9bSFlorian Westphal TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
1351da177e4SLinus Torvalds return TCP_TW_RST;
1361da177e4SLinus Torvalds
1371da177e4SLinus Torvalds /* FIN arrived, enter true time-wait state. */
1381da177e4SLinus Torvalds tw->tw_substate = TCP_TIME_WAIT;
1398feaf0c0SArnaldo Carvalho de Melo tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1401da177e4SLinus Torvalds if (tmp_opt.saw_tstamp) {
141cca9bab1SArnd Bergmann tcptw->tw_ts_recent_stamp = ktime_get_seconds();
1428feaf0c0SArnaldo Carvalho de Melo tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
1431da177e4SLinus Torvalds }
1441da177e4SLinus Torvalds
145ed2e9239SEric Dumazet inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1461da177e4SLinus Torvalds return TCP_TW_ACK;
1471da177e4SLinus Torvalds }
1481da177e4SLinus Torvalds
1491da177e4SLinus Torvalds /*
1501da177e4SLinus Torvalds * Now real TIME-WAIT state.
1511da177e4SLinus Torvalds *
1521da177e4SLinus Torvalds * RFC 1122:
1531da177e4SLinus Torvalds * "When a connection is [...] on TIME-WAIT state [...]
1541da177e4SLinus Torvalds * [a TCP] MAY accept a new SYN from the remote TCP to
1551da177e4SLinus Torvalds * reopen the connection directly, if it:
1561da177e4SLinus Torvalds *
1571da177e4SLinus Torvalds * (1) assigns its initial sequence number for the new
1581da177e4SLinus Torvalds * connection to be larger than the largest sequence
1591da177e4SLinus Torvalds * number it used on the previous connection incarnation,
1601da177e4SLinus Torvalds * and
1611da177e4SLinus Torvalds *
1621da177e4SLinus Torvalds * (2) returns to TIME-WAIT state if the SYN turns out
1631da177e4SLinus Torvalds * to be an old duplicate".
1641da177e4SLinus Torvalds */
1651da177e4SLinus Torvalds
1661da177e4SLinus Torvalds if (!paws_reject &&
1678feaf0c0SArnaldo Carvalho de Melo (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
1681da177e4SLinus Torvalds (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
1691da177e4SLinus Torvalds /* In window segment, it may be only reset or bare ack. */
1701da177e4SLinus Torvalds
1711da177e4SLinus Torvalds if (th->rst) {
172caa20d9aSStephen Hemminger /* This is TIME_WAIT assassination, in two flavors.
1731da177e4SLinus Torvalds * Oh well... nobody has a sufficient solution to this
1741da177e4SLinus Torvalds * protocol bug yet.
1751da177e4SLinus Torvalds */
1760b484c91SKuniyuki Iwashima if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
1771da177e4SLinus Torvalds kill:
178dbe7faa4SEric Dumazet inet_twsk_deschedule_put(tw);
1791da177e4SLinus Torvalds return TCP_TW_SUCCESS;
1801da177e4SLinus Torvalds }
18163cc357fSFlorian Westphal } else {
182ed2e9239SEric Dumazet inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
18363cc357fSFlorian Westphal }
1841da177e4SLinus Torvalds
1851da177e4SLinus Torvalds if (tmp_opt.saw_tstamp) {
1868feaf0c0SArnaldo Carvalho de Melo tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
187cca9bab1SArnd Bergmann tcptw->tw_ts_recent_stamp = ktime_get_seconds();
1881da177e4SLinus Torvalds }
1891da177e4SLinus Torvalds
1908feaf0c0SArnaldo Carvalho de Melo inet_twsk_put(tw);
1911da177e4SLinus Torvalds return TCP_TW_SUCCESS;
1921da177e4SLinus Torvalds }
1931da177e4SLinus Torvalds
1941da177e4SLinus Torvalds /* Out of window segment.
1951da177e4SLinus Torvalds
1961da177e4SLinus Torvalds All the segments are ACKed immediately.
1971da177e4SLinus Torvalds
1981da177e4SLinus Torvalds The only exception is new SYN. We accept it, if it is
1991da177e4SLinus Torvalds not old duplicate and we are not in danger to be killed
2001da177e4SLinus Torvalds by delayed old duplicates. RFC check is that it has
2011da177e4SLinus Torvalds newer sequence number works at rates <40Mbit/sec.
2021da177e4SLinus Torvalds However, if paws works, it is reliable AND even more,
2031da177e4SLinus Torvalds we even may relax silly seq space cutoff.
2041da177e4SLinus Torvalds
2051da177e4SLinus Torvalds RED-PEN: we violate main RFC requirement, if this SYN will appear
2061da177e4SLinus Torvalds old duplicate (i.e. we receive RST in reply to SYN-ACK),
2071da177e4SLinus Torvalds we must return socket to time-wait state. It is not good,
2081da177e4SLinus Torvalds but not fatal yet.
2091da177e4SLinus Torvalds */
2101da177e4SLinus Torvalds
2111da177e4SLinus Torvalds if (th->syn && !th->rst && !th->ack && !paws_reject &&
2128feaf0c0SArnaldo Carvalho de Melo (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
2138feaf0c0SArnaldo Carvalho de Melo (tmp_opt.saw_tstamp &&
2148feaf0c0SArnaldo Carvalho de Melo (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
2158feaf0c0SArnaldo Carvalho de Melo u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
2161da177e4SLinus Torvalds if (isn == 0)
2171da177e4SLinus Torvalds isn++;
21804317dafSEric Dumazet TCP_SKB_CB(skb)->tcp_tw_isn = isn;
2191da177e4SLinus Torvalds return TCP_TW_SYN;
2201da177e4SLinus Torvalds }
2211da177e4SLinus Torvalds
2221da177e4SLinus Torvalds if (paws_reject)
22302a1d6e7SEric Dumazet __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
2241da177e4SLinus Torvalds
2251da177e4SLinus Torvalds if (!th->rst) {
2261da177e4SLinus Torvalds /* In this case we must reset the TIMEWAIT timer.
2271da177e4SLinus Torvalds *
2281da177e4SLinus Torvalds * If it is ACKless SYN it may be both old duplicate
2291da177e4SLinus Torvalds * and new good SYN with random sequence number <rcv_nxt.
2301da177e4SLinus Torvalds * Do not reschedule in the last case.
2311da177e4SLinus Torvalds */
2321da177e4SLinus Torvalds if (paws_reject || th->ack)
233ed2e9239SEric Dumazet inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
2341da177e4SLinus Torvalds
2354fb17a60SNeal Cardwell return tcp_timewait_check_oow_rate_limit(
2364fb17a60SNeal Cardwell tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
2371da177e4SLinus Torvalds }
2388feaf0c0SArnaldo Carvalho de Melo inet_twsk_put(tw);
2391da177e4SLinus Torvalds return TCP_TW_SUCCESS;
2401da177e4SLinus Torvalds }
2414bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_timewait_state_process);
2421da177e4SLinus Torvalds
tcp_time_wait_init(struct sock * sk,struct tcp_timewait_sock * tcptw)243c5b8b515SDmitry Safonov static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
244c5b8b515SDmitry Safonov {
245c5b8b515SDmitry Safonov #ifdef CONFIG_TCP_MD5SIG
246c5b8b515SDmitry Safonov const struct tcp_sock *tp = tcp_sk(sk);
247c5b8b515SDmitry Safonov struct tcp_md5sig_key *key;
248c5b8b515SDmitry Safonov
249c5b8b515SDmitry Safonov /*
250c5b8b515SDmitry Safonov * The timewait bucket does not have the key DB from the
251c5b8b515SDmitry Safonov * sock structure. We just make a quick copy of the
252c5b8b515SDmitry Safonov * md5 key being used (if indeed we are using one)
253c5b8b515SDmitry Safonov * so the timewait ack generating code has the key.
254c5b8b515SDmitry Safonov */
255c5b8b515SDmitry Safonov tcptw->tw_md5_key = NULL;
256c5b8b515SDmitry Safonov if (!static_branch_unlikely(&tcp_md5_needed.key))
257c5b8b515SDmitry Safonov return;
258c5b8b515SDmitry Safonov
259c5b8b515SDmitry Safonov key = tp->af_specific->md5_lookup(sk, sk);
260c5b8b515SDmitry Safonov if (key) {
261c5b8b515SDmitry Safonov tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
262c5b8b515SDmitry Safonov if (!tcptw->tw_md5_key)
263c5b8b515SDmitry Safonov return;
264c5b8b515SDmitry Safonov if (!tcp_alloc_md5sig_pool())
265c5b8b515SDmitry Safonov goto out_free;
266c5b8b515SDmitry Safonov if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
267c5b8b515SDmitry Safonov goto out_free;
268c5b8b515SDmitry Safonov }
269c5b8b515SDmitry Safonov return;
270c5b8b515SDmitry Safonov out_free:
271c5b8b515SDmitry Safonov WARN_ON_ONCE(1);
272c5b8b515SDmitry Safonov kfree(tcptw->tw_md5_key);
273c5b8b515SDmitry Safonov tcptw->tw_md5_key = NULL;
274c5b8b515SDmitry Safonov #endif
275c5b8b515SDmitry Safonov }
276c5b8b515SDmitry Safonov
2771da177e4SLinus Torvalds /*
2781da177e4SLinus Torvalds * Move a socket to time-wait or dead fin-wait-2 state.
2791da177e4SLinus Torvalds */
tcp_time_wait(struct sock * sk,int state,int timeo)2801da177e4SLinus Torvalds void tcp_time_wait(struct sock *sk, int state, int timeo)
2811da177e4SLinus Torvalds {
2828292a17aSArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk);
2838feaf0c0SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk);
28408eaef90SKuniyuki Iwashima struct net *net = sock_net(sk);
285789f558cSEric Dumazet struct inet_timewait_sock *tw;
2861da177e4SLinus Torvalds
287e9bd0ccaSKuniyuki Iwashima tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
2881da177e4SLinus Torvalds
28900db4124SIan Morris if (tw) {
2908feaf0c0SArnaldo Carvalho de Melo struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
291463c84b9SArnaldo Carvalho de Melo const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
2928feaf0c0SArnaldo Carvalho de Melo
2934bd0623fSEric Dumazet tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
29400483690SJon Maxwell tw->tw_mark = sk->sk_mark;
295f6c0f5d2SEric Dumazet tw->tw_priority = sk->sk_priority;
2961da177e4SLinus Torvalds tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
2978feaf0c0SArnaldo Carvalho de Melo tcptw->tw_rcv_nxt = tp->rcv_nxt;
2988feaf0c0SArnaldo Carvalho de Melo tcptw->tw_snd_nxt = tp->snd_nxt;
2998feaf0c0SArnaldo Carvalho de Melo tcptw->tw_rcv_wnd = tcp_receive_window(tp);
3008feaf0c0SArnaldo Carvalho de Melo tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
3018feaf0c0SArnaldo Carvalho de Melo tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
302ceaa1fefSAndrey Vagin tcptw->tw_ts_offset = tp->tsoffset;
3034fb17a60SNeal Cardwell tcptw->tw_last_oow_ack_time = 0;
304a842fe14SEric Dumazet tcptw->tw_tx_delay = tp->tcp_tx_delay;
3054fbfde4eSAntoine Tenart tw->tw_txhash = sk->sk_txhash;
306dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
3071da177e4SLinus Torvalds if (tw->tw_family == PF_INET6) {
3081da177e4SLinus Torvalds struct ipv6_pinfo *np = inet6_sk(sk);
3091da177e4SLinus Torvalds
310efe4208fSEric Dumazet tw->tw_v6_daddr = sk->sk_v6_daddr;
311efe4208fSEric Dumazet tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
312b903d324SEric Dumazet tw->tw_tclass = np->tclass;
31321858cd0SFlorent Fourcot tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
3149fe516baSEric Dumazet tw->tw_ipv6only = sk->sk_ipv6only;
315c676270bSArnaldo Carvalho de Melo }
3161da177e4SLinus Torvalds #endif
317cfb6eeb4SYOSHIFUJI Hideaki
318c5b8b515SDmitry Safonov tcp_time_wait_init(sk, tcptw);
319cfb6eeb4SYOSHIFUJI Hideaki
3201da177e4SLinus Torvalds /* Get the TIME_WAIT timeout firing. */
3211da177e4SLinus Torvalds if (timeo < rto)
3221da177e4SLinus Torvalds timeo = rto;
3231da177e4SLinus Torvalds
3241da177e4SLinus Torvalds if (state == TCP_TIME_WAIT)
3251da177e4SLinus Torvalds timeo = TCP_TIMEWAIT_LEN;
3261da177e4SLinus Torvalds
327cfac7f83SEric Dumazet /* tw_timer is pinned, so we need to make sure BH are disabled
328cfac7f83SEric Dumazet * in following section, otherwise timer handler could run before
329cfac7f83SEric Dumazet * we complete the initialization.
330cfac7f83SEric Dumazet */
331cfac7f83SEric Dumazet local_bh_disable();
332789f558cSEric Dumazet inet_twsk_schedule(tw, timeo);
333ec94c269SEric Dumazet /* Linkage updates.
334ec94c269SEric Dumazet * Note that access to tw after this point is illegal.
335ec94c269SEric Dumazet */
3364461568aSKuniyuki Iwashima inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
337cfac7f83SEric Dumazet local_bh_enable();
3381da177e4SLinus Torvalds } else {
3391da177e4SLinus Torvalds /* Sorry, if we're out of memory, just CLOSE this
3401da177e4SLinus Torvalds * socket up. We've got bigger problems than
3411da177e4SLinus Torvalds * non-graceful socket closings.
3421da177e4SLinus Torvalds */
34308eaef90SKuniyuki Iwashima NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
3441da177e4SLinus Torvalds }
3451da177e4SLinus Torvalds
3461da177e4SLinus Torvalds tcp_update_metrics(sk);
3471da177e4SLinus Torvalds tcp_done(sk);
3481da177e4SLinus Torvalds }
349cc35c88aSAtul Gupta EXPORT_SYMBOL(tcp_time_wait);
3501da177e4SLinus Torvalds
tcp_twsk_destructor(struct sock * sk)351cfb6eeb4SYOSHIFUJI Hideaki void tcp_twsk_destructor(struct sock *sk)
352cfb6eeb4SYOSHIFUJI Hideaki {
353b6242b9bSDavid S. Miller #ifdef CONFIG_TCP_MD5SIG
354459837b5SDmitry Safonov if (static_branch_unlikely(&tcp_md5_needed.key)) {
355a928630aSDavid S. Miller struct tcp_timewait_sock *twsk = tcp_twsk(sk);
3562397849bSDavid S. Miller
357459837b5SDmitry Safonov if (twsk->tw_md5_key) {
358a915da9bSEric Dumazet kfree_rcu(twsk->tw_md5_key, rcu);
359459837b5SDmitry Safonov static_branch_slow_dec_deferred(&tcp_md5_needed);
360459837b5SDmitry Safonov }
3616aedbf98SEric Dumazet }
362cfb6eeb4SYOSHIFUJI Hideaki #endif
363cfb6eeb4SYOSHIFUJI Hideaki }
364cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
365cfb6eeb4SYOSHIFUJI Hideaki
tcp_twsk_purge(struct list_head * net_exit_list)36673480616SEric Dumazet void tcp_twsk_purge(struct list_head *net_exit_list)
367edc12f03SKuniyuki Iwashima {
368d1e5e640SKuniyuki Iwashima bool purged_once = false;
369edc12f03SKuniyuki Iwashima struct net *net;
370edc12f03SKuniyuki Iwashima
371edc12f03SKuniyuki Iwashima list_for_each_entry(net, net_exit_list, exit_list) {
372740ea3c4SKuniyuki Iwashima if (net->ipv4.tcp_death_row.hashinfo->pernet) {
373740ea3c4SKuniyuki Iwashima /* Even if tw_refcount == 1, we must clean up kernel reqsk */
37473480616SEric Dumazet inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
375740ea3c4SKuniyuki Iwashima } else if (!purged_once) {
37673480616SEric Dumazet inet_twsk_purge(&tcp_hashinfo);
377d1e5e640SKuniyuki Iwashima purged_once = true;
378d1e5e640SKuniyuki Iwashima }
379edc12f03SKuniyuki Iwashima }
380edc12f03SKuniyuki Iwashima }
381edc12f03SKuniyuki Iwashima
382b1964b5fSEric Dumazet /* Warning : This function is called without sk_listener being locked.
383b1964b5fSEric Dumazet * Be sure to read socket fields once, as their value could change under us.
384b1964b5fSEric Dumazet */
tcp_openreq_init_rwin(struct request_sock * req,const struct sock * sk_listener,const struct dst_entry * dst)385843f4a55SYuchung Cheng void tcp_openreq_init_rwin(struct request_sock *req,
386b1964b5fSEric Dumazet const struct sock *sk_listener,
387b1964b5fSEric Dumazet const struct dst_entry *dst)
388843f4a55SYuchung Cheng {
389843f4a55SYuchung Cheng struct inet_request_sock *ireq = inet_rsk(req);
390b1964b5fSEric Dumazet const struct tcp_sock *tp = tcp_sk(sk_listener);
391b1964b5fSEric Dumazet int full_space = tcp_full_space(sk_listener);
392b1964b5fSEric Dumazet u32 window_clamp;
393b1964b5fSEric Dumazet __u8 rcv_wscale;
39413d3b1ebSLawrence Brakmo u32 rcv_wnd;
3953541f9e8SEric Dumazet int mss;
396843f4a55SYuchung Cheng
3973541f9e8SEric Dumazet mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
398b1964b5fSEric Dumazet window_clamp = READ_ONCE(tp->window_clamp);
399843f4a55SYuchung Cheng /* Set this up on the first call only */
400ed53d0abSEric Dumazet req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
401843f4a55SYuchung Cheng
402843f4a55SYuchung Cheng /* limit the window selection if the user enforce a smaller rx buffer */
403b1964b5fSEric Dumazet if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
404ed53d0abSEric Dumazet (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
405ed53d0abSEric Dumazet req->rsk_window_clamp = full_space;
406843f4a55SYuchung Cheng
40713d3b1ebSLawrence Brakmo rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
40813d3b1ebSLawrence Brakmo if (rcv_wnd == 0)
40913d3b1ebSLawrence Brakmo rcv_wnd = dst_metric(dst, RTAX_INITRWND);
41013d3b1ebSLawrence Brakmo else if (full_space < rcv_wnd * mss)
41113d3b1ebSLawrence Brakmo full_space = rcv_wnd * mss;
41213d3b1ebSLawrence Brakmo
413843f4a55SYuchung Cheng /* tcp_full_space because it is guaranteed to be the first packet */
414ceef9ab6SEric Dumazet tcp_select_initial_window(sk_listener, full_space,
415843f4a55SYuchung Cheng mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
416ed53d0abSEric Dumazet &req->rsk_rcv_wnd,
417ed53d0abSEric Dumazet &req->rsk_window_clamp,
418843f4a55SYuchung Cheng ireq->wscale_ok,
419843f4a55SYuchung Cheng &rcv_wscale,
42013d3b1ebSLawrence Brakmo rcv_wnd);
421843f4a55SYuchung Cheng ireq->rcv_wscale = rcv_wscale;
422843f4a55SYuchung Cheng }
423843f4a55SYuchung Cheng EXPORT_SYMBOL(tcp_openreq_init_rwin);
424843f4a55SYuchung Cheng
tcp_ecn_openreq_child(struct tcp_sock * tp,const struct request_sock * req)425735d3831SFlorian Westphal static void tcp_ecn_openreq_child(struct tcp_sock *tp,
426735d3831SFlorian Westphal const struct request_sock *req)
427bdf1ee5dSIlpo Järvinen {
428bdf1ee5dSIlpo Järvinen tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
429bdf1ee5dSIlpo Järvinen }
430bdf1ee5dSIlpo Järvinen
tcp_ca_openreq_child(struct sock * sk,const struct dst_entry * dst)43181164413SDaniel Borkmann void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
43281164413SDaniel Borkmann {
43381164413SDaniel Borkmann struct inet_connection_sock *icsk = inet_csk(sk);
43481164413SDaniel Borkmann u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
43581164413SDaniel Borkmann bool ca_got_dst = false;
43681164413SDaniel Borkmann
43781164413SDaniel Borkmann if (ca_key != TCP_CA_UNSPEC) {
43881164413SDaniel Borkmann const struct tcp_congestion_ops *ca;
43981164413SDaniel Borkmann
44081164413SDaniel Borkmann rcu_read_lock();
44181164413SDaniel Borkmann ca = tcp_ca_find_key(ca_key);
4420baf26b0SMartin KaFai Lau if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
44381164413SDaniel Borkmann icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
44481164413SDaniel Borkmann icsk->icsk_ca_ops = ca;
44581164413SDaniel Borkmann ca_got_dst = true;
44681164413SDaniel Borkmann }
44781164413SDaniel Borkmann rcu_read_unlock();
44881164413SDaniel Borkmann }
44981164413SDaniel Borkmann
4509f950415SNeal Cardwell /* If no valid choice made yet, assign current system default ca. */
4519f950415SNeal Cardwell if (!ca_got_dst &&
4529f950415SNeal Cardwell (!icsk->icsk_ca_setsockopt ||
4530baf26b0SMartin KaFai Lau !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
45481164413SDaniel Borkmann tcp_assign_congestion_control(sk);
45581164413SDaniel Borkmann
45681164413SDaniel Borkmann tcp_set_ca_state(sk, TCP_CA_Open);
45781164413SDaniel Borkmann }
45881164413SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
45981164413SDaniel Borkmann
smc_check_reset_syn_req(const struct tcp_sock * oldtp,struct request_sock * req,struct tcp_sock * newtp)460e9d9da91SEric Dumazet static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
46160e2a778SUrsula Braun struct request_sock *req,
46260e2a778SUrsula Braun struct tcp_sock *newtp)
46360e2a778SUrsula Braun {
46460e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
46560e2a778SUrsula Braun struct inet_request_sock *ireq;
46660e2a778SUrsula Braun
46760e2a778SUrsula Braun if (static_branch_unlikely(&tcp_have_smc)) {
46860e2a778SUrsula Braun ireq = inet_rsk(req);
46960e2a778SUrsula Braun if (oldtp->syn_smc && !ireq->smc_ok)
47060e2a778SUrsula Braun newtp->syn_smc = 0;
47160e2a778SUrsula Braun }
47260e2a778SUrsula Braun #endif
47360e2a778SUrsula Braun }
47460e2a778SUrsula Braun
4751da177e4SLinus Torvalds /* This is not only more efficient than what we used to do, it eliminates
4761da177e4SLinus Torvalds * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
4771da177e4SLinus Torvalds *
4781da177e4SLinus Torvalds * Actually, we could lots of memory writes here. tp of listening
4791da177e4SLinus Torvalds * socket contains all necessary default parameters.
4801da177e4SLinus Torvalds */
tcp_create_openreq_child(const struct sock * sk,struct request_sock * req,struct sk_buff * skb)481c28c6f04SEric Dumazet struct sock *tcp_create_openreq_child(const struct sock *sk,
482c28c6f04SEric Dumazet struct request_sock *req,
483c28c6f04SEric Dumazet struct sk_buff *skb)
4841da177e4SLinus Torvalds {
485e56c57d0SEric Dumazet struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
4869f1d2604SArnaldo Carvalho de Melo const struct inet_request_sock *ireq = inet_rsk(req);
4872e6599cbSArnaldo Carvalho de Melo struct tcp_request_sock *treq = tcp_rsk(req);
488242b1bbeSEric Dumazet struct inet_connection_sock *newicsk;
489e9d9da91SEric Dumazet const struct tcp_sock *oldtp;
490e9d9da91SEric Dumazet struct tcp_sock *newtp;
491dba7d9b8SEric Dumazet u32 seq;
492242b1bbeSEric Dumazet
493242b1bbeSEric Dumazet if (!newsk)
494242b1bbeSEric Dumazet return NULL;
495242b1bbeSEric Dumazet
496242b1bbeSEric Dumazet newicsk = inet_csk(newsk);
497242b1bbeSEric Dumazet newtp = tcp_sk(newsk);
498242b1bbeSEric Dumazet oldtp = tcp_sk(sk);
49960e2a778SUrsula Braun
50060e2a778SUrsula Braun smc_check_reset_syn_req(oldtp, req, newtp);
5011da177e4SLinus Torvalds
5021da177e4SLinus Torvalds /* Now setup tcp_sock */
50331770e34SFlorian Westphal newtp->pred_flags = 0;
50431770e34SFlorian Westphal
505dba7d9b8SEric Dumazet seq = treq->rcv_isn + 1;
506dba7d9b8SEric Dumazet newtp->rcv_wup = seq;
5077db48e98SEric Dumazet WRITE_ONCE(newtp->copied_seq, seq);
508dba7d9b8SEric Dumazet WRITE_ONCE(newtp->rcv_nxt, seq);
509a9d99ce2SEric Dumazet newtp->segs_in = 1;
510435cf559SWilliam Allen Simpson
511e0d694d6SEric Dumazet seq = treq->snt_isn + 1;
512e0d694d6SEric Dumazet newtp->snd_sml = newtp->snd_una = seq;
513e0d694d6SEric Dumazet WRITE_ONCE(newtp->snd_nxt, seq);
514e0d694d6SEric Dumazet newtp->snd_up = seq;
5151da177e4SLinus Torvalds
51646d3ceabSEric Dumazet INIT_LIST_HEAD(&newtp->tsq_node);
517e2080072SEric Dumazet INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
5181da177e4SLinus Torvalds
519ee7537b6SHantzis Fotis tcp_init_wl(newtp, treq->rcv_isn);
5201da177e4SLinus Torvalds
521ac9517fcSEric Dumazet minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
52270eabf0eSEric Dumazet newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
5231da177e4SLinus Torvalds
5249a568de4SEric Dumazet newtp->lsndtime = tcp_jiffies32;
5255e526552SEric Dumazet newsk->sk_txhash = READ_ONCE(treq->txhash);
526375fe02cSYuchung Cheng newtp->total_retrans = req->num_retrans;
5271da177e4SLinus Torvalds
5281da177e4SLinus Torvalds tcp_init_xmit_timers(newsk);
5290f317464SEric Dumazet WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
5301da177e4SLinus Torvalds
5311da177e4SLinus Torvalds if (sock_flag(newsk, SOCK_KEEPOPEN))
532463c84b9SArnaldo Carvalho de Melo inet_csk_reset_keepalive_timer(newsk,
5331da177e4SLinus Torvalds keepalive_time_when(newtp));
5341da177e4SLinus Torvalds
5352e6599cbSArnaldo Carvalho de Melo newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
536713bafeaSYuchung Cheng newtp->rx_opt.sack_ok = ireq->sack_ok;
537ed53d0abSEric Dumazet newtp->window_clamp = req->rsk_window_clamp;
538ed53d0abSEric Dumazet newtp->rcv_ssthresh = req->rsk_rcv_wnd;
539ed53d0abSEric Dumazet newtp->rcv_wnd = req->rsk_rcv_wnd;
5402e6599cbSArnaldo Carvalho de Melo newtp->rx_opt.wscale_ok = ireq->wscale_ok;
5411da177e4SLinus Torvalds if (newtp->rx_opt.wscale_ok) {
5422e6599cbSArnaldo Carvalho de Melo newtp->rx_opt.snd_wscale = ireq->snd_wscale;
5432e6599cbSArnaldo Carvalho de Melo newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
5441da177e4SLinus Torvalds } else {
5451da177e4SLinus Torvalds newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
5461da177e4SLinus Torvalds newtp->window_clamp = min(newtp->window_clamp, 65535U);
5471da177e4SLinus Torvalds }
548242b1bbeSEric Dumazet newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
5491da177e4SLinus Torvalds newtp->max_window = newtp->snd_wnd;
5501da177e4SLinus Torvalds
5511da177e4SLinus Torvalds if (newtp->rx_opt.tstamp_ok) {
552eba20811SEric Dumazet newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
553cca9bab1SArnd Bergmann newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
5541da177e4SLinus Torvalds newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
5551da177e4SLinus Torvalds } else {
5561da177e4SLinus Torvalds newtp->rx_opt.ts_recent_stamp = 0;
5571da177e4SLinus Torvalds newtp->tcp_header_len = sizeof(struct tcphdr);
5581da177e4SLinus Torvalds }
559336c39a0SYuchung Cheng if (req->num_timeout) {
560336c39a0SYuchung Cheng newtp->undo_marker = treq->snt_isn;
561336c39a0SYuchung Cheng newtp->retrans_stamp = div_u64(treq->snt_synack,
562336c39a0SYuchung Cheng USEC_PER_SEC / TCP_TS_HZ);
563*718c49f8SAananth V newtp->total_rto = req->num_timeout;
564*718c49f8SAananth V newtp->total_rto_recoveries = 1;
565*718c49f8SAananth V newtp->total_rto_time = tcp_time_stamp_raw() -
566*718c49f8SAananth V newtp->retrans_stamp;
567336c39a0SYuchung Cheng }
56895a22caeSFlorian Westphal newtp->tsoffset = treq->ts_off;
569cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
570cfb6eeb4SYOSHIFUJI Hideaki newtp->md5sig_info = NULL; /*XXX*/
571cfb6eeb4SYOSHIFUJI Hideaki #endif
572bee7ca9eSWilliam Allen Simpson if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
573463c84b9SArnaldo Carvalho de Melo newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
5741da177e4SLinus Torvalds newtp->rx_opt.mss_clamp = req->mss;
575735d3831SFlorian Westphal tcp_ecn_openreq_child(newtp, req);
5768b485ce6SEric Dumazet newtp->fastopen_req = NULL;
577d983ea6fSEric Dumazet RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
5781da177e4SLinus Torvalds
579061ff040SMartin KaFai Lau newtp->bpf_chg_cc_inprogress = 0;
580e8025155SJakub Sitnicki tcp_bpf_clone(sk, newsk);
581e8025155SJakub Sitnicki
58290bbcc60SEric Dumazet __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
583242b1bbeSEric Dumazet
5841da177e4SLinus Torvalds return newsk;
5851da177e4SLinus Torvalds }
5864bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_create_openreq_child);
5871da177e4SLinus Torvalds
5881da177e4SLinus Torvalds /*
5898336886fSJerry Chu * Process an incoming packet for SYN_RECV sockets represented as a
5908336886fSJerry Chu * request_sock. Normally sk is the listener socket but for TFO it
5918336886fSJerry Chu * points to the child socket.
5928336886fSJerry Chu *
5938336886fSJerry Chu * XXX (TFO) - The current impl contains a special check for ack
5948336886fSJerry Chu * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
5954308fc58SAlan Cox *
5964308fc58SAlan Cox * We don't need to initialize tmp_opt.sack_ok as we don't use the results
597580f98ccSEric Dumazet *
598580f98ccSEric Dumazet * Note: If @fastopen is true, this can be called from process context.
599580f98ccSEric Dumazet * Otherwise, this is from BH context.
6001da177e4SLinus Torvalds */
6011da177e4SLinus Torvalds
tcp_check_req(struct sock * sk,struct sk_buff * skb,struct request_sock * req,bool fastopen,bool * req_stolen)6021da177e4SLinus Torvalds struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
60360236fddSArnaldo Carvalho de Melo struct request_sock *req,
604e0f9759fSEric Dumazet bool fastopen, bool *req_stolen)
6051da177e4SLinus Torvalds {
6064957faadSWilliam Allen Simpson struct tcp_options_received tmp_opt;
6074957faadSWilliam Allen Simpson struct sock *child;
608aa8223c7SArnaldo Carvalho de Melo const struct tcphdr *th = tcp_hdr(skb);
609714e85beSAl Viro __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
610a2a385d6SEric Dumazet bool paws_reject = false;
6115e0724d0SEric Dumazet bool own_req;
6121da177e4SLinus Torvalds
613bb5b7c11SDavid S. Miller tmp_opt.saw_tstamp = 0;
614bb5b7c11SDavid S. Miller if (th->doff > (sizeof(struct tcphdr)>>2)) {
615eed29f17SEric Dumazet tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
6161da177e4SLinus Torvalds
6171da177e4SLinus Torvalds if (tmp_opt.saw_tstamp) {
618eba20811SEric Dumazet tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
61995a22caeSFlorian Westphal if (tmp_opt.rcv_tsecr)
62095a22caeSFlorian Westphal tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
6211da177e4SLinus Torvalds /* We do not store true stamp, but it is not required,
6221da177e4SLinus Torvalds * it can be estimated (approximately)
6231da177e4SLinus Torvalds * from another data.
6241da177e4SLinus Torvalds */
6255903123fSAkhmat Karakotov tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
626c887e6d2SIlpo Järvinen paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
6271da177e4SLinus Torvalds }
6281da177e4SLinus Torvalds }
6291da177e4SLinus Torvalds
6301da177e4SLinus Torvalds /* Check for pure retransmitted SYN. */
6312e6599cbSArnaldo Carvalho de Melo if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
6321da177e4SLinus Torvalds flg == TCP_FLAG_SYN &&
6331da177e4SLinus Torvalds !paws_reject) {
6341da177e4SLinus Torvalds /*
6351da177e4SLinus Torvalds * RFC793 draws (Incorrectly! It was fixed in RFC1122)
6361da177e4SLinus Torvalds * this case on figure 6 and figure 8, but formal
6371da177e4SLinus Torvalds * protocol description says NOTHING.
6381da177e4SLinus Torvalds * To be more exact, it says that we should send ACK,
6391da177e4SLinus Torvalds * because this segment (at least, if it has no data)
6401da177e4SLinus Torvalds * is out of window.
6411da177e4SLinus Torvalds *
6421da177e4SLinus Torvalds * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
6431da177e4SLinus Torvalds * describe SYN-RECV state. All the description
6441da177e4SLinus Torvalds * is wrong, we cannot believe to it and should
6451da177e4SLinus Torvalds * rely only on common sense and implementation
6461da177e4SLinus Torvalds * experience.
6471da177e4SLinus Torvalds *
6481da177e4SLinus Torvalds * Enforce "SYN-ACK" according to figure 8, figure 6
6491da177e4SLinus Torvalds * of RFC793, fixed by RFC1122.
6508336886fSJerry Chu *
6518336886fSJerry Chu * Note that even if there is new data in the SYN packet
6528336886fSJerry Chu * they will be thrown away too.
653cd75eff6SYuchung Cheng *
654cd75eff6SYuchung Cheng * Reset timer after retransmitting SYNACK, similar to
655cd75eff6SYuchung Cheng * the idea of fast retransmit in recovery.
6561da177e4SLinus Torvalds */
657a9b2c06dSNeal Cardwell if (!tcp_oow_rate_limited(sock_net(sk), skb,
658a9b2c06dSNeal Cardwell LINUX_MIB_TCPACKSKIPPEDSYNRECV,
659a9b2c06dSNeal Cardwell &tcp_rsk(req)->last_oow_ack_time) &&
660a9b2c06dSNeal Cardwell
661dd929c1bSEric Dumazet !inet_rtx_syn_ack(sk, req)) {
662dd929c1bSEric Dumazet unsigned long expires = jiffies;
663dd929c1bSEric Dumazet
6645903123fSAkhmat Karakotov expires += reqsk_timeout(req, TCP_RTO_MAX);
665dd929c1bSEric Dumazet if (!fastopen)
666dd929c1bSEric Dumazet mod_timer_pending(&req->rsk_timer, expires);
667dd929c1bSEric Dumazet else
668dd929c1bSEric Dumazet req->rsk_timer.expires = expires;
669dd929c1bSEric Dumazet }
6701da177e4SLinus Torvalds return NULL;
6711da177e4SLinus Torvalds }
6721da177e4SLinus Torvalds
6731da177e4SLinus Torvalds /* Further reproduces section "SEGMENT ARRIVES"
6741da177e4SLinus Torvalds for state SYN-RECEIVED of RFC793.
6751da177e4SLinus Torvalds It is broken, however, it does not work only
6761da177e4SLinus Torvalds when SYNs are crossed.
6771da177e4SLinus Torvalds
6781da177e4SLinus Torvalds You would think that SYN crossing is impossible here, since
6791da177e4SLinus Torvalds we should have a SYN_SENT socket (from connect()) on our end,
6801da177e4SLinus Torvalds but this is not true if the crossed SYNs were sent to both
6811da177e4SLinus Torvalds ends by a malicious third party. We must defend against this,
6821da177e4SLinus Torvalds and to do that we first verify the ACK (as per RFC793, page
6831da177e4SLinus Torvalds 36) and reset if it is invalid. Is this a true full defense?
6841da177e4SLinus Torvalds To convince ourselves, let us consider a way in which the ACK
6851da177e4SLinus Torvalds test can still pass in this 'malicious crossed SYNs' case.
6861da177e4SLinus Torvalds Malicious sender sends identical SYNs (and thus identical sequence
6871da177e4SLinus Torvalds numbers) to both A and B:
6881da177e4SLinus Torvalds
6891da177e4SLinus Torvalds A: gets SYN, seq=7
6901da177e4SLinus Torvalds B: gets SYN, seq=7
6911da177e4SLinus Torvalds
6921da177e4SLinus Torvalds By our good fortune, both A and B select the same initial
6931da177e4SLinus Torvalds send sequence number of seven :-)
6941da177e4SLinus Torvalds
6951da177e4SLinus Torvalds A: sends SYN|ACK, seq=7, ack_seq=8
6961da177e4SLinus Torvalds B: sends SYN|ACK, seq=7, ack_seq=8
6971da177e4SLinus Torvalds
6981da177e4SLinus Torvalds So we are now A eating this SYN|ACK, ACK test passes. So
6991da177e4SLinus Torvalds does sequence test, SYN is truncated, and thus we consider
7001da177e4SLinus Torvalds it a bare ACK.
7011da177e4SLinus Torvalds
702ec0a1966SDavid S. Miller If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
703ec0a1966SDavid S. Miller bare ACK. Otherwise, we create an established connection. Both
704ec0a1966SDavid S. Miller ends (listening sockets) accept the new incoming connection and try
705ec0a1966SDavid S. Miller to talk to each other. 8-)
7061da177e4SLinus Torvalds
7071da177e4SLinus Torvalds Note: This case is both harmless, and rare. Possibility is about the
7081da177e4SLinus Torvalds same as us discovering intelligent life on another plant tomorrow.
7091da177e4SLinus Torvalds
7101da177e4SLinus Torvalds But generally, we should (RFC lies!) to accept ACK
7111da177e4SLinus Torvalds from SYNACK both here and in tcp_rcv_state_process().
7121da177e4SLinus Torvalds tcp_rcv_state_process() does not, hence, we do not too.
7131da177e4SLinus Torvalds
7141da177e4SLinus Torvalds Note that the case is absolutely generic:
7151da177e4SLinus Torvalds we cannot optimize anything here without
7161da177e4SLinus Torvalds violating protocol. All the checks must be made
7171da177e4SLinus Torvalds before attempt to create socket.
7181da177e4SLinus Torvalds */
7191da177e4SLinus Torvalds
7201da177e4SLinus Torvalds /* RFC793 page 36: "If the connection is in any non-synchronized state ...
7211da177e4SLinus Torvalds * and the incoming segment acknowledges something not yet
722caa20d9aSStephen Hemminger * sent (the segment carries an unacceptable ACK) ...
7231da177e4SLinus Torvalds * a reset is sent."
7241da177e4SLinus Torvalds *
7258336886fSJerry Chu * Invalid ACK: reset will be sent by listening socket.
7268336886fSJerry Chu * Note that the ACK validity check for a Fast Open socket is done
7278336886fSJerry Chu * elsewhere and is checked directly against the child socket rather
7288336886fSJerry Chu * than req because user data may have been sent out.
7291da177e4SLinus Torvalds */
7308336886fSJerry Chu if ((flg & TCP_FLAG_ACK) && !fastopen &&
731435cf559SWilliam Allen Simpson (TCP_SKB_CB(skb)->ack_seq !=
7321a2c6181SChristoph Paasch tcp_rsk(req)->snt_isn + 1))
7331da177e4SLinus Torvalds return sk;
7341da177e4SLinus Torvalds
7351da177e4SLinus Torvalds /* Also, it would be not so bad idea to check rcv_tsecr, which
7361da177e4SLinus Torvalds * is essentially ACK extension and too early or too late values
7371da177e4SLinus Torvalds * should cause reset in unsynchronized states.
7381da177e4SLinus Torvalds */
7391da177e4SLinus Torvalds
7401da177e4SLinus Torvalds /* RFC793: "first check sequence number". */
7411da177e4SLinus Torvalds
7421da177e4SLinus Torvalds if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
743ed53d0abSEric Dumazet tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
7441da177e4SLinus Torvalds /* Out of window: send ACK and drop. */
7454ce7e93cSEric Dumazet if (!(flg & TCP_FLAG_RST) &&
7464ce7e93cSEric Dumazet !tcp_oow_rate_limited(sock_net(sk), skb,
7474ce7e93cSEric Dumazet LINUX_MIB_TCPACKSKIPPEDSYNRECV,
7484ce7e93cSEric Dumazet &tcp_rsk(req)->last_oow_ack_time))
7496edafaafSGui Jianfeng req->rsk_ops->send_ack(sk, skb, req);
7501da177e4SLinus Torvalds if (paws_reject)
751580f98ccSEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
7521da177e4SLinus Torvalds return NULL;
7531da177e4SLinus Torvalds }
7541da177e4SLinus Torvalds
7551da177e4SLinus Torvalds /* In sequence, PAWS is OK. */
7561da177e4SLinus Torvalds
757eba20811SEric Dumazet /* TODO: We probably should defer ts_recent change once
758eba20811SEric Dumazet * we take ownership of @req.
759eba20811SEric Dumazet */
7608336886fSJerry Chu if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
761eba20811SEric Dumazet WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
7621da177e4SLinus Torvalds
7632e6599cbSArnaldo Carvalho de Melo if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
7641da177e4SLinus Torvalds /* Truncate SYN, it is out of window starting
7652e6599cbSArnaldo Carvalho de Melo at tcp_rsk(req)->rcv_isn + 1. */
7661da177e4SLinus Torvalds flg &= ~TCP_FLAG_SYN;
7671da177e4SLinus Torvalds }
7681da177e4SLinus Torvalds
7691da177e4SLinus Torvalds /* RFC793: "second check the RST bit" and
7701da177e4SLinus Torvalds * "fourth, check the SYN bit"
7711da177e4SLinus Torvalds */
7723687b1dcSWei Yongjun if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
773580f98ccSEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
7741da177e4SLinus Torvalds goto embryonic_reset;
7753687b1dcSWei Yongjun }
7761da177e4SLinus Torvalds
7771da177e4SLinus Torvalds /* ACK sequence verified above, just make sure ACK is
7781da177e4SLinus Torvalds * set. If ACK not set, just silently drop the packet.
7798336886fSJerry Chu *
7808336886fSJerry Chu * XXX (TFO) - if we ever allow "data after SYN", the
7818336886fSJerry Chu * following check needs to be removed.
7821da177e4SLinus Torvalds */
7831da177e4SLinus Torvalds if (!(flg & TCP_FLAG_ACK))
7841da177e4SLinus Torvalds return NULL;
7851da177e4SLinus Torvalds
7868336886fSJerry Chu /* For Fast Open no more processing is needed (sk is the
7878336886fSJerry Chu * child socket).
7888336886fSJerry Chu */
7898336886fSJerry Chu if (fastopen)
7908336886fSJerry Chu return sk;
7918336886fSJerry Chu
792d1b99ba4SJulian Anastasov /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
7936e97ba55SEric Dumazet if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
794ec0a1966SDavid S. Miller TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
795ec0a1966SDavid S. Miller inet_rsk(req)->acked = 1;
79602a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
797ec0a1966SDavid S. Miller return NULL;
798ec0a1966SDavid S. Miller }
799ec0a1966SDavid S. Miller
8001da177e4SLinus Torvalds /* OK, ACK is valid, create big socket and
8011da177e4SLinus Torvalds * feed this segment to it. It will repeat all
8021da177e4SLinus Torvalds * the tests. THIS SEGMENT MUST MOVE SOCKET TO
8031da177e4SLinus Torvalds * ESTABLISHED STATE. If it will be dropped after
8041da177e4SLinus Torvalds * socket is created, wait for troubles.
8051da177e4SLinus Torvalds */
8065e0724d0SEric Dumazet child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
8075e0724d0SEric Dumazet req, &own_req);
80851456b29SIan Morris if (!child)
8091da177e4SLinus Torvalds goto listen_overflow;
8101da177e4SLinus Torvalds
81190bf4513SPaolo Abeni if (own_req && rsk_drop_req(req)) {
812d4f2c86bSKuniyuki Iwashima reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
813d4f2c86bSKuniyuki Iwashima inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
814f296234cSPeter Krystad return child;
815f296234cSPeter Krystad }
816f296234cSPeter Krystad
8176bcfd7f8SEric Dumazet sock_rps_save_rxhash(child, skb);
8180f1c28aeSYuchung Cheng tcp_synack_rtt_meas(child, req);
819e0f9759fSEric Dumazet *req_stolen = !own_req;
8205e0724d0SEric Dumazet return inet_csk_complete_hashdance(sk, child, req, own_req);
8211da177e4SLinus Torvalds
8221da177e4SLinus Torvalds listen_overflow:
82355d444b3SKuniyuki Iwashima if (sk != req->rsk_listener)
82455d444b3SKuniyuki Iwashima __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
82555d444b3SKuniyuki Iwashima
8262d17d9c7SKuniyuki Iwashima if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
8272e6599cbSArnaldo Carvalho de Melo inet_rsk(req)->acked = 1;
8281da177e4SLinus Torvalds return NULL;
8291da177e4SLinus Torvalds }
8301da177e4SLinus Torvalds
8311da177e4SLinus Torvalds embryonic_reset:
8328336886fSJerry Chu if (!(flg & TCP_FLAG_RST)) {
8338336886fSJerry Chu /* Received a bad SYN pkt - for TFO We try not to reset
8348336886fSJerry Chu * the local connection unless it's really necessary to
8358336886fSJerry Chu * avoid becoming vulnerable to outside attack aiming at
8368336886fSJerry Chu * resetting legit local connections.
8378336886fSJerry Chu */
838cfb6eeb4SYOSHIFUJI Hideaki req->rsk_ops->send_reset(sk, skb);
8398336886fSJerry Chu } else if (fastopen) { /* received a valid RST pkt */
8408336886fSJerry Chu reqsk_fastopen_remove(sk, req, true);
841049fe386SFlorian Westphal tcp_reset(sk, skb);
8428336886fSJerry Chu }
8438336886fSJerry Chu if (!fastopen) {
8447233da86SAlexander Ovechkin bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
8457233da86SAlexander Ovechkin
8467233da86SAlexander Ovechkin if (unlinked)
84702a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
8487233da86SAlexander Ovechkin *req_stolen = !unlinked;
8498336886fSJerry Chu }
8501da177e4SLinus Torvalds return NULL;
8511da177e4SLinus Torvalds }
8524bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_check_req);
8531da177e4SLinus Torvalds
8541da177e4SLinus Torvalds /*
8551da177e4SLinus Torvalds * Queue segment on the new socket if the new socket is active,
8561da177e4SLinus Torvalds * otherwise we just shortcircuit this and continue with
8571da177e4SLinus Torvalds * the new socket.
8588336886fSJerry Chu *
8598336886fSJerry Chu * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
8608336886fSJerry Chu * when entering. But other states are possible due to a race condition
8618336886fSJerry Chu * where after __inet_lookup_established() fails but before the listener
8628336886fSJerry Chu * locked is obtained, other packets cause the same connection to
8638336886fSJerry Chu * be created.
8641da177e4SLinus Torvalds */
8651da177e4SLinus Torvalds
tcp_child_process(struct sock * parent,struct sock * child,struct sk_buff * skb)8661da177e4SLinus Torvalds int tcp_child_process(struct sock *parent, struct sock *child,
8671da177e4SLinus Torvalds struct sk_buff *skb)
868734c8f75SJules Irenge __releases(&((child)->sk_lock.slock))
8691da177e4SLinus Torvalds {
8701da177e4SLinus Torvalds int ret = 0;
8711da177e4SLinus Torvalds int state = child->sk_state;
8721da177e4SLinus Torvalds
87303cfda4fSEric Dumazet /* record sk_napi_id and sk_rx_queue_mapping of child. */
87403cfda4fSEric Dumazet sk_mark_napi_id_set(child, skb);
875e5907459SAlexander Duyck
876a44d6eacSMartin KaFai Lau tcp_segs_in(tcp_sk(child), skb);
8771da177e4SLinus Torvalds if (!sock_owned_by_user(child)) {
87872ab4a86SEric Dumazet ret = tcp_rcv_state_process(child, skb);
8791da177e4SLinus Torvalds /* Wakeup parent, send SIGIO */
8801da177e4SLinus Torvalds if (state == TCP_SYN_RECV && child->sk_state != state)
881676d2369SDavid S. Miller parent->sk_data_ready(parent);
8821da177e4SLinus Torvalds } else {
8831da177e4SLinus Torvalds /* Alas, it is possible again, because we do lookup
8841da177e4SLinus Torvalds * in main socket hash table and lock on listening
8851da177e4SLinus Torvalds * socket does not protect us more.
8861da177e4SLinus Torvalds */
887a3a858ffSZhu Yi __sk_add_backlog(child, skb);
8881da177e4SLinus Torvalds }
8891da177e4SLinus Torvalds
8901da177e4SLinus Torvalds bh_unlock_sock(child);
8911da177e4SLinus Torvalds sock_put(child);
8921da177e4SLinus Torvalds return ret;
8931da177e4SLinus Torvalds }
8941da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_child_process);
895