1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/slab.h> 24 #include <linux/sysctl.h> 25 #include <linux/workqueue.h> 26 #include <linux/static_key.h> 27 #include <net/tcp.h> 28 #include <net/inet_common.h> 29 #include <net/xfrm.h> 30 #include <net/busy_poll.h> 31 32 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 33 { 34 if (seq == s_win) 35 return true; 36 if (after(end_seq, s_win) && before(seq, e_win)) 37 return true; 38 return seq == e_win && seq == end_seq; 39 } 40 41 static enum tcp_tw_status 42 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, 43 const struct sk_buff *skb, int mib_idx) 44 { 45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 46 47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, 48 &tcptw->tw_last_oow_ack_time)) { 49 /* Send ACK. Note, we do not put the bucket, 50 * it will be released by caller. 51 */ 52 return TCP_TW_ACK; 53 } 54 55 /* We are rate-limiting, so just release the tw sock and drop skb. */ 56 inet_twsk_put(tw); 57 return TCP_TW_SUCCESS; 58 } 59 60 /* 61 * * Main purpose of TIME-WAIT state is to close connection gracefully, 62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 63 * (and, probably, tail of data) and one or more our ACKs are lost. 64 * * What is TIME-WAIT timeout? It is associated with maximal packet 65 * lifetime in the internet, which results in wrong conclusion, that 66 * it is set to catch "old duplicate segments" wandering out of their path. 67 * It is not quite correct. This timeout is calculated so that it exceeds 68 * maximal retransmission timeout enough to allow to lose one (or more) 69 * segments sent by peer and our ACKs. This time may be calculated from RTO. 70 * * When TIME-WAIT socket receives RST, it means that another end 71 * finally closed and we are allowed to kill TIME-WAIT too. 72 * * Second purpose of TIME-WAIT is catching old duplicate segments. 73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT 74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. 75 * * If we invented some more clever way to catch duplicates 76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. 77 * 78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs. 79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES 80 * from the very beginning. 81 * 82 * NOTE. With recycling (and later with fin-wait-2) TW bucket 83 * is _not_ stateless. It means, that strictly speaking we must 84 * spinlock it. I do not want! Well, probability of misbehaviour 85 * is ridiculously low and, seems, we could use some mb() tricks 86 * to avoid misread sequence numbers, states etc. --ANK 87 * 88 * We don't need to initialize tmp_out.sack_ok as we don't use the results 89 */ 90 enum tcp_tw_status 91 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 92 const struct tcphdr *th) 93 { 94 struct tcp_options_received tmp_opt; 95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 96 bool paws_reject = false; 97 98 tmp_opt.saw_tstamp = 0; 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 100 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); 101 102 if (tmp_opt.saw_tstamp) { 103 if (tmp_opt.rcv_tsecr) 104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; 105 tmp_opt.ts_recent = tcptw->tw_ts_recent; 106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 108 } 109 } 110 111 if (tw->tw_substate == TCP_FIN_WAIT2) { 112 /* Just repeat all the checks of tcp_rcv_state_process() */ 113 114 /* Out of window, send ACK */ 115 if (paws_reject || 116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 117 tcptw->tw_rcv_nxt, 118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 119 return tcp_timewait_check_oow_rate_limit( 120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); 121 122 if (th->rst) 123 goto kill; 124 125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) 126 return TCP_TW_RST; 127 128 /* Dup ACK? */ 129 if (!th->ack || 130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 132 inet_twsk_put(tw); 133 return TCP_TW_SUCCESS; 134 } 135 136 /* New data or FIN. If new data arrive after half-duplex close, 137 * reset. 138 */ 139 if (!th->fin || 140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) 141 return TCP_TW_RST; 142 143 /* FIN arrived, enter true time-wait state. */ 144 tw->tw_substate = TCP_TIME_WAIT; 145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 146 if (tmp_opt.saw_tstamp) { 147 tcptw->tw_ts_recent_stamp = ktime_get_seconds(); 148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 149 } 150 151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 152 return TCP_TW_ACK; 153 } 154 155 /* 156 * Now real TIME-WAIT state. 157 * 158 * RFC 1122: 159 * "When a connection is [...] on TIME-WAIT state [...] 160 * [a TCP] MAY accept a new SYN from the remote TCP to 161 * reopen the connection directly, if it: 162 * 163 * (1) assigns its initial sequence number for the new 164 * connection to be larger than the largest sequence 165 * number it used on the previous connection incarnation, 166 * and 167 * 168 * (2) returns to TIME-WAIT state if the SYN turns out 169 * to be an old duplicate". 170 */ 171 172 if (!paws_reject && 173 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && 174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 175 /* In window segment, it may be only reset or bare ack. */ 176 177 if (th->rst) { 178 /* This is TIME_WAIT assassination, in two flavors. 179 * Oh well... nobody has a sufficient solution to this 180 * protocol bug yet. 181 */ 182 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) { 183 kill: 184 inet_twsk_deschedule_put(tw); 185 return TCP_TW_SUCCESS; 186 } 187 } else { 188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 189 } 190 191 if (tmp_opt.saw_tstamp) { 192 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 193 tcptw->tw_ts_recent_stamp = ktime_get_seconds(); 194 } 195 196 inet_twsk_put(tw); 197 return TCP_TW_SUCCESS; 198 } 199 200 /* Out of window segment. 201 202 All the segments are ACKed immediately. 203 204 The only exception is new SYN. We accept it, if it is 205 not old duplicate and we are not in danger to be killed 206 by delayed old duplicates. RFC check is that it has 207 newer sequence number works at rates <40Mbit/sec. 208 However, if paws works, it is reliable AND even more, 209 we even may relax silly seq space cutoff. 210 211 RED-PEN: we violate main RFC requirement, if this SYN will appear 212 old duplicate (i.e. we receive RST in reply to SYN-ACK), 213 we must return socket to time-wait state. It is not good, 214 but not fatal yet. 215 */ 216 217 if (th->syn && !th->rst && !th->ack && !paws_reject && 218 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || 219 (tmp_opt.saw_tstamp && 220 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 221 u32 isn = tcptw->tw_snd_nxt + 65535 + 2; 222 if (isn == 0) 223 isn++; 224 TCP_SKB_CB(skb)->tcp_tw_isn = isn; 225 return TCP_TW_SYN; 226 } 227 228 if (paws_reject) 229 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); 230 231 if (!th->rst) { 232 /* In this case we must reset the TIMEWAIT timer. 233 * 234 * If it is ACKless SYN it may be both old duplicate 235 * and new good SYN with random sequence number <rcv_nxt. 236 * Do not reschedule in the last case. 237 */ 238 if (paws_reject || th->ack) 239 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 240 241 return tcp_timewait_check_oow_rate_limit( 242 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 243 } 244 inet_twsk_put(tw); 245 return TCP_TW_SUCCESS; 246 } 247 EXPORT_SYMBOL(tcp_timewait_state_process); 248 249 /* 250 * Move a socket to time-wait or dead fin-wait-2 state. 251 */ 252 void tcp_time_wait(struct sock *sk, int state, int timeo) 253 { 254 const struct inet_connection_sock *icsk = inet_csk(sk); 255 const struct tcp_sock *tp = tcp_sk(sk); 256 struct inet_timewait_sock *tw; 257 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; 258 259 tw = inet_twsk_alloc(sk, tcp_death_row, state); 260 261 if (tw) { 262 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 263 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 264 struct inet_sock *inet = inet_sk(sk); 265 266 tw->tw_transparent = inet->transparent; 267 tw->tw_mark = sk->sk_mark; 268 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 269 tcptw->tw_rcv_nxt = tp->rcv_nxt; 270 tcptw->tw_snd_nxt = tp->snd_nxt; 271 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 272 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 273 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 274 tcptw->tw_ts_offset = tp->tsoffset; 275 tcptw->tw_last_oow_ack_time = 0; 276 277 #if IS_ENABLED(CONFIG_IPV6) 278 if (tw->tw_family == PF_INET6) { 279 struct ipv6_pinfo *np = inet6_sk(sk); 280 281 tw->tw_v6_daddr = sk->sk_v6_daddr; 282 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 283 tw->tw_tclass = np->tclass; 284 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); 285 tw->tw_ipv6only = sk->sk_ipv6only; 286 } 287 #endif 288 289 #ifdef CONFIG_TCP_MD5SIG 290 /* 291 * The timewait bucket does not have the key DB from the 292 * sock structure. We just make a quick copy of the 293 * md5 key being used (if indeed we are using one) 294 * so the timewait ack generating code has the key. 295 */ 296 do { 297 struct tcp_md5sig_key *key; 298 tcptw->tw_md5_key = NULL; 299 key = tp->af_specific->md5_lookup(sk, sk); 300 if (key) { 301 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 302 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()); 303 } 304 } while (0); 305 #endif 306 307 /* Get the TIME_WAIT timeout firing. */ 308 if (timeo < rto) 309 timeo = rto; 310 311 if (state == TCP_TIME_WAIT) 312 timeo = TCP_TIMEWAIT_LEN; 313 314 /* tw_timer is pinned, so we need to make sure BH are disabled 315 * in following section, otherwise timer handler could run before 316 * we complete the initialization. 317 */ 318 local_bh_disable(); 319 inet_twsk_schedule(tw, timeo); 320 /* Linkage updates. 321 * Note that access to tw after this point is illegal. 322 */ 323 inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 324 local_bh_enable(); 325 } else { 326 /* Sorry, if we're out of memory, just CLOSE this 327 * socket up. We've got bigger problems than 328 * non-graceful socket closings. 329 */ 330 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); 331 } 332 333 tcp_update_metrics(sk); 334 tcp_done(sk); 335 } 336 EXPORT_SYMBOL(tcp_time_wait); 337 338 void tcp_twsk_destructor(struct sock *sk) 339 { 340 #ifdef CONFIG_TCP_MD5SIG 341 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 342 343 if (twsk->tw_md5_key) 344 kfree_rcu(twsk->tw_md5_key, rcu); 345 #endif 346 } 347 EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 348 349 /* Warning : This function is called without sk_listener being locked. 350 * Be sure to read socket fields once, as their value could change under us. 351 */ 352 void tcp_openreq_init_rwin(struct request_sock *req, 353 const struct sock *sk_listener, 354 const struct dst_entry *dst) 355 { 356 struct inet_request_sock *ireq = inet_rsk(req); 357 const struct tcp_sock *tp = tcp_sk(sk_listener); 358 int full_space = tcp_full_space(sk_listener); 359 u32 window_clamp; 360 __u8 rcv_wscale; 361 u32 rcv_wnd; 362 int mss; 363 364 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 365 window_clamp = READ_ONCE(tp->window_clamp); 366 /* Set this up on the first call only */ 367 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); 368 369 /* limit the window selection if the user enforce a smaller rx buffer */ 370 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && 371 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) 372 req->rsk_window_clamp = full_space; 373 374 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); 375 if (rcv_wnd == 0) 376 rcv_wnd = dst_metric(dst, RTAX_INITRWND); 377 else if (full_space < rcv_wnd * mss) 378 full_space = rcv_wnd * mss; 379 380 /* tcp_full_space because it is guaranteed to be the first packet */ 381 tcp_select_initial_window(sk_listener, full_space, 382 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 383 &req->rsk_rcv_wnd, 384 &req->rsk_window_clamp, 385 ireq->wscale_ok, 386 &rcv_wscale, 387 rcv_wnd); 388 ireq->rcv_wscale = rcv_wscale; 389 } 390 EXPORT_SYMBOL(tcp_openreq_init_rwin); 391 392 static void tcp_ecn_openreq_child(struct tcp_sock *tp, 393 const struct request_sock *req) 394 { 395 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; 396 } 397 398 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) 399 { 400 struct inet_connection_sock *icsk = inet_csk(sk); 401 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 402 bool ca_got_dst = false; 403 404 if (ca_key != TCP_CA_UNSPEC) { 405 const struct tcp_congestion_ops *ca; 406 407 rcu_read_lock(); 408 ca = tcp_ca_find_key(ca_key); 409 if (likely(ca && try_module_get(ca->owner))) { 410 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 411 icsk->icsk_ca_ops = ca; 412 ca_got_dst = true; 413 } 414 rcu_read_unlock(); 415 } 416 417 /* If no valid choice made yet, assign current system default ca. */ 418 if (!ca_got_dst && 419 (!icsk->icsk_ca_setsockopt || 420 !try_module_get(icsk->icsk_ca_ops->owner))) 421 tcp_assign_congestion_control(sk); 422 423 tcp_set_ca_state(sk, TCP_CA_Open); 424 } 425 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); 426 427 static void smc_check_reset_syn_req(struct tcp_sock *oldtp, 428 struct request_sock *req, 429 struct tcp_sock *newtp) 430 { 431 #if IS_ENABLED(CONFIG_SMC) 432 struct inet_request_sock *ireq; 433 434 if (static_branch_unlikely(&tcp_have_smc)) { 435 ireq = inet_rsk(req); 436 if (oldtp->syn_smc && !ireq->smc_ok) 437 newtp->syn_smc = 0; 438 } 439 #endif 440 } 441 442 /* This is not only more efficient than what we used to do, it eliminates 443 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 444 * 445 * Actually, we could lots of memory writes here. tp of listening 446 * socket contains all necessary default parameters. 447 */ 448 struct sock *tcp_create_openreq_child(const struct sock *sk, 449 struct request_sock *req, 450 struct sk_buff *skb) 451 { 452 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 453 const struct inet_request_sock *ireq = inet_rsk(req); 454 struct tcp_request_sock *treq = tcp_rsk(req); 455 struct inet_connection_sock *newicsk; 456 struct tcp_sock *oldtp, *newtp; 457 458 if (!newsk) 459 return NULL; 460 461 newicsk = inet_csk(newsk); 462 newtp = tcp_sk(newsk); 463 oldtp = tcp_sk(sk); 464 465 smc_check_reset_syn_req(oldtp, req, newtp); 466 467 /* Now setup tcp_sock */ 468 newtp->pred_flags = 0; 469 470 newtp->rcv_wup = newtp->copied_seq = 471 newtp->rcv_nxt = treq->rcv_isn + 1; 472 newtp->segs_in = 1; 473 474 newtp->snd_sml = newtp->snd_una = 475 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; 476 477 INIT_LIST_HEAD(&newtp->tsq_node); 478 INIT_LIST_HEAD(&newtp->tsorted_sent_queue); 479 480 tcp_init_wl(newtp, treq->rcv_isn); 481 482 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); 483 newicsk->icsk_ack.lrcvtime = tcp_jiffies32; 484 485 newtp->lsndtime = tcp_jiffies32; 486 newsk->sk_txhash = treq->txhash; 487 newtp->total_retrans = req->num_retrans; 488 489 tcp_init_xmit_timers(newsk); 490 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; 491 492 if (sock_flag(newsk, SOCK_KEEPOPEN)) 493 inet_csk_reset_keepalive_timer(newsk, 494 keepalive_time_when(newtp)); 495 496 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 497 newtp->rx_opt.sack_ok = ireq->sack_ok; 498 newtp->window_clamp = req->rsk_window_clamp; 499 newtp->rcv_ssthresh = req->rsk_rcv_wnd; 500 newtp->rcv_wnd = req->rsk_rcv_wnd; 501 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 502 if (newtp->rx_opt.wscale_ok) { 503 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 504 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 505 } else { 506 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 507 newtp->window_clamp = min(newtp->window_clamp, 65535U); 508 } 509 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; 510 newtp->max_window = newtp->snd_wnd; 511 512 if (newtp->rx_opt.tstamp_ok) { 513 newtp->rx_opt.ts_recent = req->ts_recent; 514 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); 515 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 516 } else { 517 newtp->rx_opt.ts_recent_stamp = 0; 518 newtp->tcp_header_len = sizeof(struct tcphdr); 519 } 520 newtp->tsoffset = treq->ts_off; 521 #ifdef CONFIG_TCP_MD5SIG 522 newtp->md5sig_info = NULL; /*XXX*/ 523 if (newtp->af_specific->md5_lookup(sk, newsk)) 524 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 525 #endif 526 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 527 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 528 newtp->rx_opt.mss_clamp = req->mss; 529 tcp_ecn_openreq_child(newtp, req); 530 newtp->fastopen_req = NULL; 531 newtp->fastopen_rsk = NULL; 532 533 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 534 535 return newsk; 536 } 537 EXPORT_SYMBOL(tcp_create_openreq_child); 538 539 /* 540 * Process an incoming packet for SYN_RECV sockets represented as a 541 * request_sock. Normally sk is the listener socket but for TFO it 542 * points to the child socket. 543 * 544 * XXX (TFO) - The current impl contains a special check for ack 545 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? 546 * 547 * We don't need to initialize tmp_opt.sack_ok as we don't use the results 548 */ 549 550 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 551 struct request_sock *req, 552 bool fastopen, bool *req_stolen) 553 { 554 struct tcp_options_received tmp_opt; 555 struct sock *child; 556 const struct tcphdr *th = tcp_hdr(skb); 557 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 558 bool paws_reject = false; 559 bool own_req; 560 561 tmp_opt.saw_tstamp = 0; 562 if (th->doff > (sizeof(struct tcphdr)>>2)) { 563 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); 564 565 if (tmp_opt.saw_tstamp) { 566 tmp_opt.ts_recent = req->ts_recent; 567 if (tmp_opt.rcv_tsecr) 568 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; 569 /* We do not store true stamp, but it is not required, 570 * it can be estimated (approximately) 571 * from another data. 572 */ 573 tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); 574 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 575 } 576 } 577 578 /* Check for pure retransmitted SYN. */ 579 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 580 flg == TCP_FLAG_SYN && 581 !paws_reject) { 582 /* 583 * RFC793 draws (Incorrectly! It was fixed in RFC1122) 584 * this case on figure 6 and figure 8, but formal 585 * protocol description says NOTHING. 586 * To be more exact, it says that we should send ACK, 587 * because this segment (at least, if it has no data) 588 * is out of window. 589 * 590 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT 591 * describe SYN-RECV state. All the description 592 * is wrong, we cannot believe to it and should 593 * rely only on common sense and implementation 594 * experience. 595 * 596 * Enforce "SYN-ACK" according to figure 8, figure 6 597 * of RFC793, fixed by RFC1122. 598 * 599 * Note that even if there is new data in the SYN packet 600 * they will be thrown away too. 601 * 602 * Reset timer after retransmitting SYNACK, similar to 603 * the idea of fast retransmit in recovery. 604 */ 605 if (!tcp_oow_rate_limited(sock_net(sk), skb, 606 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 607 &tcp_rsk(req)->last_oow_ack_time) && 608 609 !inet_rtx_syn_ack(sk, req)) { 610 unsigned long expires = jiffies; 611 612 expires += min(TCP_TIMEOUT_INIT << req->num_timeout, 613 TCP_RTO_MAX); 614 if (!fastopen) 615 mod_timer_pending(&req->rsk_timer, expires); 616 else 617 req->rsk_timer.expires = expires; 618 } 619 return NULL; 620 } 621 622 /* Further reproduces section "SEGMENT ARRIVES" 623 for state SYN-RECEIVED of RFC793. 624 It is broken, however, it does not work only 625 when SYNs are crossed. 626 627 You would think that SYN crossing is impossible here, since 628 we should have a SYN_SENT socket (from connect()) on our end, 629 but this is not true if the crossed SYNs were sent to both 630 ends by a malicious third party. We must defend against this, 631 and to do that we first verify the ACK (as per RFC793, page 632 36) and reset if it is invalid. Is this a true full defense? 633 To convince ourselves, let us consider a way in which the ACK 634 test can still pass in this 'malicious crossed SYNs' case. 635 Malicious sender sends identical SYNs (and thus identical sequence 636 numbers) to both A and B: 637 638 A: gets SYN, seq=7 639 B: gets SYN, seq=7 640 641 By our good fortune, both A and B select the same initial 642 send sequence number of seven :-) 643 644 A: sends SYN|ACK, seq=7, ack_seq=8 645 B: sends SYN|ACK, seq=7, ack_seq=8 646 647 So we are now A eating this SYN|ACK, ACK test passes. So 648 does sequence test, SYN is truncated, and thus we consider 649 it a bare ACK. 650 651 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 652 bare ACK. Otherwise, we create an established connection. Both 653 ends (listening sockets) accept the new incoming connection and try 654 to talk to each other. 8-) 655 656 Note: This case is both harmless, and rare. Possibility is about the 657 same as us discovering intelligent life on another plant tomorrow. 658 659 But generally, we should (RFC lies!) to accept ACK 660 from SYNACK both here and in tcp_rcv_state_process(). 661 tcp_rcv_state_process() does not, hence, we do not too. 662 663 Note that the case is absolutely generic: 664 we cannot optimize anything here without 665 violating protocol. All the checks must be made 666 before attempt to create socket. 667 */ 668 669 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 670 * and the incoming segment acknowledges something not yet 671 * sent (the segment carries an unacceptable ACK) ... 672 * a reset is sent." 673 * 674 * Invalid ACK: reset will be sent by listening socket. 675 * Note that the ACK validity check for a Fast Open socket is done 676 * elsewhere and is checked directly against the child socket rather 677 * than req because user data may have been sent out. 678 */ 679 if ((flg & TCP_FLAG_ACK) && !fastopen && 680 (TCP_SKB_CB(skb)->ack_seq != 681 tcp_rsk(req)->snt_isn + 1)) 682 return sk; 683 684 /* Also, it would be not so bad idea to check rcv_tsecr, which 685 * is essentially ACK extension and too early or too late values 686 * should cause reset in unsynchronized states. 687 */ 688 689 /* RFC793: "first check sequence number". */ 690 691 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 692 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { 693 /* Out of window: send ACK and drop. */ 694 if (!(flg & TCP_FLAG_RST) && 695 !tcp_oow_rate_limited(sock_net(sk), skb, 696 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 697 &tcp_rsk(req)->last_oow_ack_time)) 698 req->rsk_ops->send_ack(sk, skb, req); 699 if (paws_reject) 700 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 701 return NULL; 702 } 703 704 /* In sequence, PAWS is OK. */ 705 706 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) 707 req->ts_recent = tmp_opt.rcv_tsval; 708 709 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 710 /* Truncate SYN, it is out of window starting 711 at tcp_rsk(req)->rcv_isn + 1. */ 712 flg &= ~TCP_FLAG_SYN; 713 } 714 715 /* RFC793: "second check the RST bit" and 716 * "fourth, check the SYN bit" 717 */ 718 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 719 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 720 goto embryonic_reset; 721 } 722 723 /* ACK sequence verified above, just make sure ACK is 724 * set. If ACK not set, just silently drop the packet. 725 * 726 * XXX (TFO) - if we ever allow "data after SYN", the 727 * following check needs to be removed. 728 */ 729 if (!(flg & TCP_FLAG_ACK)) 730 return NULL; 731 732 /* For Fast Open no more processing is needed (sk is the 733 * child socket). 734 */ 735 if (fastopen) 736 return sk; 737 738 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 739 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 740 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 741 inet_rsk(req)->acked = 1; 742 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 743 return NULL; 744 } 745 746 /* OK, ACK is valid, create big socket and 747 * feed this segment to it. It will repeat all 748 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 749 * ESTABLISHED STATE. If it will be dropped after 750 * socket is created, wait for troubles. 751 */ 752 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 753 req, &own_req); 754 if (!child) 755 goto listen_overflow; 756 757 sock_rps_save_rxhash(child, skb); 758 tcp_synack_rtt_meas(child, req); 759 *req_stolen = !own_req; 760 return inet_csk_complete_hashdance(sk, child, req, own_req); 761 762 listen_overflow: 763 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) { 764 inet_rsk(req)->acked = 1; 765 return NULL; 766 } 767 768 embryonic_reset: 769 if (!(flg & TCP_FLAG_RST)) { 770 /* Received a bad SYN pkt - for TFO We try not to reset 771 * the local connection unless it's really necessary to 772 * avoid becoming vulnerable to outside attack aiming at 773 * resetting legit local connections. 774 */ 775 req->rsk_ops->send_reset(sk, skb); 776 } else if (fastopen) { /* received a valid RST pkt */ 777 reqsk_fastopen_remove(sk, req, true); 778 tcp_reset(sk); 779 } 780 if (!fastopen) { 781 inet_csk_reqsk_queue_drop(sk, req); 782 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 783 } 784 return NULL; 785 } 786 EXPORT_SYMBOL(tcp_check_req); 787 788 /* 789 * Queue segment on the new socket if the new socket is active, 790 * otherwise we just shortcircuit this and continue with 791 * the new socket. 792 * 793 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV 794 * when entering. But other states are possible due to a race condition 795 * where after __inet_lookup_established() fails but before the listener 796 * locked is obtained, other packets cause the same connection to 797 * be created. 798 */ 799 800 int tcp_child_process(struct sock *parent, struct sock *child, 801 struct sk_buff *skb) 802 { 803 int ret = 0; 804 int state = child->sk_state; 805 806 /* record NAPI ID of child */ 807 sk_mark_napi_id(child, skb); 808 809 tcp_segs_in(tcp_sk(child), skb); 810 if (!sock_owned_by_user(child)) { 811 ret = tcp_rcv_state_process(child, skb); 812 /* Wakeup parent, send SIGIO */ 813 if (state == TCP_SYN_RECV && child->sk_state != state) 814 parent->sk_data_ready(parent); 815 } else { 816 /* Alas, it is possible again, because we do lookup 817 * in main socket hash table and lock on listening 818 * socket does not protect us more. 819 */ 820 __sk_add_backlog(child, skb); 821 } 822 823 bh_unlock_sock(child); 824 sock_put(child); 825 return ret; 826 } 827 EXPORT_SYMBOL(tcp_child_process); 828