1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/slab.h> 24 #include <linux/sysctl.h> 25 #include <linux/workqueue.h> 26 #include <linux/static_key.h> 27 #include <net/tcp.h> 28 #include <net/inet_common.h> 29 #include <net/xfrm.h> 30 #include <net/busy_poll.h> 31 32 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 33 { 34 if (seq == s_win) 35 return true; 36 if (after(end_seq, s_win) && before(seq, e_win)) 37 return true; 38 return seq == e_win && seq == end_seq; 39 } 40 41 static enum tcp_tw_status 42 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, 43 const struct sk_buff *skb, int mib_idx) 44 { 45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 46 47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, 48 &tcptw->tw_last_oow_ack_time)) { 49 /* Send ACK. Note, we do not put the bucket, 50 * it will be released by caller. 51 */ 52 return TCP_TW_ACK; 53 } 54 55 /* We are rate-limiting, so just release the tw sock and drop skb. */ 56 inet_twsk_put(tw); 57 return TCP_TW_SUCCESS; 58 } 59 60 /* 61 * * Main purpose of TIME-WAIT state is to close connection gracefully, 62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 63 * (and, probably, tail of data) and one or more our ACKs are lost. 64 * * What is TIME-WAIT timeout? It is associated with maximal packet 65 * lifetime in the internet, which results in wrong conclusion, that 66 * it is set to catch "old duplicate segments" wandering out of their path. 67 * It is not quite correct. This timeout is calculated so that it exceeds 68 * maximal retransmission timeout enough to allow to lose one (or more) 69 * segments sent by peer and our ACKs. This time may be calculated from RTO. 70 * * When TIME-WAIT socket receives RST, it means that another end 71 * finally closed and we are allowed to kill TIME-WAIT too. 72 * * Second purpose of TIME-WAIT is catching old duplicate segments. 73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT 74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. 75 * * If we invented some more clever way to catch duplicates 76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. 77 * 78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs. 79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES 80 * from the very beginning. 81 * 82 * NOTE. With recycling (and later with fin-wait-2) TW bucket 83 * is _not_ stateless. It means, that strictly speaking we must 84 * spinlock it. I do not want! Well, probability of misbehaviour 85 * is ridiculously low and, seems, we could use some mb() tricks 86 * to avoid misread sequence numbers, states etc. --ANK 87 * 88 * We don't need to initialize tmp_out.sack_ok as we don't use the results 89 */ 90 enum tcp_tw_status 91 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 92 const struct tcphdr *th) 93 { 94 struct tcp_options_received tmp_opt; 95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 96 bool paws_reject = false; 97 98 tmp_opt.saw_tstamp = 0; 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 100 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); 101 102 if (tmp_opt.saw_tstamp) { 103 if (tmp_opt.rcv_tsecr) 104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; 105 tmp_opt.ts_recent = tcptw->tw_ts_recent; 106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 108 } 109 } 110 111 if (tw->tw_substate == TCP_FIN_WAIT2) { 112 /* Just repeat all the checks of tcp_rcv_state_process() */ 113 114 /* Out of window, send ACK */ 115 if (paws_reject || 116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 117 tcptw->tw_rcv_nxt, 118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 119 return tcp_timewait_check_oow_rate_limit( 120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); 121 122 if (th->rst) 123 goto kill; 124 125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) 126 return TCP_TW_RST; 127 128 /* Dup ACK? */ 129 if (!th->ack || 130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 132 inet_twsk_put(tw); 133 return TCP_TW_SUCCESS; 134 } 135 136 /* New data or FIN. If new data arrive after half-duplex close, 137 * reset. 138 */ 139 if (!th->fin || 140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) 141 return TCP_TW_RST; 142 143 /* FIN arrived, enter true time-wait state. */ 144 tw->tw_substate = TCP_TIME_WAIT; 145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 146 if (tmp_opt.saw_tstamp) { 147 tcptw->tw_ts_recent_stamp = get_seconds(); 148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 149 } 150 151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 152 return TCP_TW_ACK; 153 } 154 155 /* 156 * Now real TIME-WAIT state. 157 * 158 * RFC 1122: 159 * "When a connection is [...] on TIME-WAIT state [...] 160 * [a TCP] MAY accept a new SYN from the remote TCP to 161 * reopen the connection directly, if it: 162 * 163 * (1) assigns its initial sequence number for the new 164 * connection to be larger than the largest sequence 165 * number it used on the previous connection incarnation, 166 * and 167 * 168 * (2) returns to TIME-WAIT state if the SYN turns out 169 * to be an old duplicate". 170 */ 171 172 if (!paws_reject && 173 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && 174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 175 /* In window segment, it may be only reset or bare ack. */ 176 177 if (th->rst) { 178 /* This is TIME_WAIT assassination, in two flavors. 179 * Oh well... nobody has a sufficient solution to this 180 * protocol bug yet. 181 */ 182 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) { 183 kill: 184 inet_twsk_deschedule_put(tw); 185 return TCP_TW_SUCCESS; 186 } 187 } 188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 189 190 if (tmp_opt.saw_tstamp) { 191 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 192 tcptw->tw_ts_recent_stamp = get_seconds(); 193 } 194 195 inet_twsk_put(tw); 196 return TCP_TW_SUCCESS; 197 } 198 199 /* Out of window segment. 200 201 All the segments are ACKed immediately. 202 203 The only exception is new SYN. We accept it, if it is 204 not old duplicate and we are not in danger to be killed 205 by delayed old duplicates. RFC check is that it has 206 newer sequence number works at rates <40Mbit/sec. 207 However, if paws works, it is reliable AND even more, 208 we even may relax silly seq space cutoff. 209 210 RED-PEN: we violate main RFC requirement, if this SYN will appear 211 old duplicate (i.e. we receive RST in reply to SYN-ACK), 212 we must return socket to time-wait state. It is not good, 213 but not fatal yet. 214 */ 215 216 if (th->syn && !th->rst && !th->ack && !paws_reject && 217 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || 218 (tmp_opt.saw_tstamp && 219 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 220 u32 isn = tcptw->tw_snd_nxt + 65535 + 2; 221 if (isn == 0) 222 isn++; 223 TCP_SKB_CB(skb)->tcp_tw_isn = isn; 224 return TCP_TW_SYN; 225 } 226 227 if (paws_reject) 228 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); 229 230 if (!th->rst) { 231 /* In this case we must reset the TIMEWAIT timer. 232 * 233 * If it is ACKless SYN it may be both old duplicate 234 * and new good SYN with random sequence number <rcv_nxt. 235 * Do not reschedule in the last case. 236 */ 237 if (paws_reject || th->ack) 238 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 239 240 return tcp_timewait_check_oow_rate_limit( 241 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 242 } 243 inet_twsk_put(tw); 244 return TCP_TW_SUCCESS; 245 } 246 EXPORT_SYMBOL(tcp_timewait_state_process); 247 248 /* 249 * Move a socket to time-wait or dead fin-wait-2 state. 250 */ 251 void tcp_time_wait(struct sock *sk, int state, int timeo) 252 { 253 const struct inet_connection_sock *icsk = inet_csk(sk); 254 const struct tcp_sock *tp = tcp_sk(sk); 255 struct inet_timewait_sock *tw; 256 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; 257 258 tw = inet_twsk_alloc(sk, tcp_death_row, state); 259 260 if (tw) { 261 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 262 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 263 struct inet_sock *inet = inet_sk(sk); 264 265 tw->tw_transparent = inet->transparent; 266 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 267 tcptw->tw_rcv_nxt = tp->rcv_nxt; 268 tcptw->tw_snd_nxt = tp->snd_nxt; 269 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 270 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 271 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 272 tcptw->tw_ts_offset = tp->tsoffset; 273 tcptw->tw_last_oow_ack_time = 0; 274 275 #if IS_ENABLED(CONFIG_IPV6) 276 if (tw->tw_family == PF_INET6) { 277 struct ipv6_pinfo *np = inet6_sk(sk); 278 279 tw->tw_v6_daddr = sk->sk_v6_daddr; 280 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 281 tw->tw_tclass = np->tclass; 282 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); 283 tw->tw_ipv6only = sk->sk_ipv6only; 284 } 285 #endif 286 287 #ifdef CONFIG_TCP_MD5SIG 288 /* 289 * The timewait bucket does not have the key DB from the 290 * sock structure. We just make a quick copy of the 291 * md5 key being used (if indeed we are using one) 292 * so the timewait ack generating code has the key. 293 */ 294 do { 295 struct tcp_md5sig_key *key; 296 tcptw->tw_md5_key = NULL; 297 key = tp->af_specific->md5_lookup(sk, sk); 298 if (key) { 299 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 300 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()); 301 } 302 } while (0); 303 #endif 304 305 /* Get the TIME_WAIT timeout firing. */ 306 if (timeo < rto) 307 timeo = rto; 308 309 tw->tw_timeout = TCP_TIMEWAIT_LEN; 310 if (state == TCP_TIME_WAIT) 311 timeo = TCP_TIMEWAIT_LEN; 312 313 /* tw_timer is pinned, so we need to make sure BH are disabled 314 * in following section, otherwise timer handler could run before 315 * we complete the initialization. 316 */ 317 local_bh_disable(); 318 inet_twsk_schedule(tw, timeo); 319 /* Linkage updates. 320 * Note that access to tw after this point is illegal. 321 */ 322 inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 323 local_bh_enable(); 324 } else { 325 /* Sorry, if we're out of memory, just CLOSE this 326 * socket up. We've got bigger problems than 327 * non-graceful socket closings. 328 */ 329 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); 330 } 331 332 tcp_update_metrics(sk); 333 tcp_done(sk); 334 } 335 EXPORT_SYMBOL(tcp_time_wait); 336 337 void tcp_twsk_destructor(struct sock *sk) 338 { 339 #ifdef CONFIG_TCP_MD5SIG 340 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 341 342 if (twsk->tw_md5_key) 343 kfree_rcu(twsk->tw_md5_key, rcu); 344 #endif 345 } 346 EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 347 348 /* Warning : This function is called without sk_listener being locked. 349 * Be sure to read socket fields once, as their value could change under us. 350 */ 351 void tcp_openreq_init_rwin(struct request_sock *req, 352 const struct sock *sk_listener, 353 const struct dst_entry *dst) 354 { 355 struct inet_request_sock *ireq = inet_rsk(req); 356 const struct tcp_sock *tp = tcp_sk(sk_listener); 357 int full_space = tcp_full_space(sk_listener); 358 u32 window_clamp; 359 __u8 rcv_wscale; 360 u32 rcv_wnd; 361 int mss; 362 363 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 364 window_clamp = READ_ONCE(tp->window_clamp); 365 /* Set this up on the first call only */ 366 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); 367 368 /* limit the window selection if the user enforce a smaller rx buffer */ 369 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && 370 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) 371 req->rsk_window_clamp = full_space; 372 373 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); 374 if (rcv_wnd == 0) 375 rcv_wnd = dst_metric(dst, RTAX_INITRWND); 376 else if (full_space < rcv_wnd * mss) 377 full_space = rcv_wnd * mss; 378 379 /* tcp_full_space because it is guaranteed to be the first packet */ 380 tcp_select_initial_window(sk_listener, full_space, 381 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 382 &req->rsk_rcv_wnd, 383 &req->rsk_window_clamp, 384 ireq->wscale_ok, 385 &rcv_wscale, 386 rcv_wnd); 387 ireq->rcv_wscale = rcv_wscale; 388 } 389 EXPORT_SYMBOL(tcp_openreq_init_rwin); 390 391 static void tcp_ecn_openreq_child(struct tcp_sock *tp, 392 const struct request_sock *req) 393 { 394 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; 395 } 396 397 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) 398 { 399 struct inet_connection_sock *icsk = inet_csk(sk); 400 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 401 bool ca_got_dst = false; 402 403 if (ca_key != TCP_CA_UNSPEC) { 404 const struct tcp_congestion_ops *ca; 405 406 rcu_read_lock(); 407 ca = tcp_ca_find_key(ca_key); 408 if (likely(ca && try_module_get(ca->owner))) { 409 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 410 icsk->icsk_ca_ops = ca; 411 ca_got_dst = true; 412 } 413 rcu_read_unlock(); 414 } 415 416 /* If no valid choice made yet, assign current system default ca. */ 417 if (!ca_got_dst && 418 (!icsk->icsk_ca_setsockopt || 419 !try_module_get(icsk->icsk_ca_ops->owner))) 420 tcp_assign_congestion_control(sk); 421 422 tcp_set_ca_state(sk, TCP_CA_Open); 423 } 424 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); 425 426 static void smc_check_reset_syn_req(struct tcp_sock *oldtp, 427 struct request_sock *req, 428 struct tcp_sock *newtp) 429 { 430 #if IS_ENABLED(CONFIG_SMC) 431 struct inet_request_sock *ireq; 432 433 if (static_branch_unlikely(&tcp_have_smc)) { 434 ireq = inet_rsk(req); 435 if (oldtp->syn_smc && !ireq->smc_ok) 436 newtp->syn_smc = 0; 437 } 438 #endif 439 } 440 441 /* This is not only more efficient than what we used to do, it eliminates 442 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 443 * 444 * Actually, we could lots of memory writes here. tp of listening 445 * socket contains all necessary default parameters. 446 */ 447 struct sock *tcp_create_openreq_child(const struct sock *sk, 448 struct request_sock *req, 449 struct sk_buff *skb) 450 { 451 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 452 453 if (newsk) { 454 const struct inet_request_sock *ireq = inet_rsk(req); 455 struct tcp_request_sock *treq = tcp_rsk(req); 456 struct inet_connection_sock *newicsk = inet_csk(newsk); 457 struct tcp_sock *newtp = tcp_sk(newsk); 458 struct tcp_sock *oldtp = tcp_sk(sk); 459 460 smc_check_reset_syn_req(oldtp, req, newtp); 461 462 /* Now setup tcp_sock */ 463 newtp->pred_flags = 0; 464 465 newtp->rcv_wup = newtp->copied_seq = 466 newtp->rcv_nxt = treq->rcv_isn + 1; 467 newtp->segs_in = 1; 468 469 newtp->snd_sml = newtp->snd_una = 470 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; 471 472 INIT_LIST_HEAD(&newtp->tsq_node); 473 INIT_LIST_HEAD(&newtp->tsorted_sent_queue); 474 475 tcp_init_wl(newtp, treq->rcv_isn); 476 477 newtp->srtt_us = 0; 478 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 479 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); 480 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 481 newicsk->icsk_ack.lrcvtime = tcp_jiffies32; 482 483 newtp->packets_out = 0; 484 newtp->retrans_out = 0; 485 newtp->sacked_out = 0; 486 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 487 newtp->tlp_high_seq = 0; 488 newtp->lsndtime = tcp_jiffies32; 489 newsk->sk_txhash = treq->txhash; 490 newtp->last_oow_ack_time = 0; 491 newtp->total_retrans = req->num_retrans; 492 493 /* So many TCP implementations out there (incorrectly) count the 494 * initial SYN frame in their delayed-ACK and congestion control 495 * algorithms that we must have the following bandaid to talk 496 * efficiently to them. -DaveM 497 */ 498 newtp->snd_cwnd = TCP_INIT_CWND; 499 newtp->snd_cwnd_cnt = 0; 500 501 /* There's a bubble in the pipe until at least the first ACK. */ 502 newtp->app_limited = ~0U; 503 504 tcp_init_xmit_timers(newsk); 505 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; 506 507 newtp->rx_opt.saw_tstamp = 0; 508 509 newtp->rx_opt.dsack = 0; 510 newtp->rx_opt.num_sacks = 0; 511 512 newtp->urg_data = 0; 513 514 if (sock_flag(newsk, SOCK_KEEPOPEN)) 515 inet_csk_reset_keepalive_timer(newsk, 516 keepalive_time_when(newtp)); 517 518 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 519 newtp->rx_opt.sack_ok = ireq->sack_ok; 520 newtp->window_clamp = req->rsk_window_clamp; 521 newtp->rcv_ssthresh = req->rsk_rcv_wnd; 522 newtp->rcv_wnd = req->rsk_rcv_wnd; 523 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 524 if (newtp->rx_opt.wscale_ok) { 525 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 526 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 527 } else { 528 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 529 newtp->window_clamp = min(newtp->window_clamp, 65535U); 530 } 531 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << 532 newtp->rx_opt.snd_wscale); 533 newtp->max_window = newtp->snd_wnd; 534 535 if (newtp->rx_opt.tstamp_ok) { 536 newtp->rx_opt.ts_recent = req->ts_recent; 537 newtp->rx_opt.ts_recent_stamp = get_seconds(); 538 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 539 } else { 540 newtp->rx_opt.ts_recent_stamp = 0; 541 newtp->tcp_header_len = sizeof(struct tcphdr); 542 } 543 newtp->tsoffset = treq->ts_off; 544 #ifdef CONFIG_TCP_MD5SIG 545 newtp->md5sig_info = NULL; /*XXX*/ 546 if (newtp->af_specific->md5_lookup(sk, newsk)) 547 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 548 #endif 549 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 550 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 551 newtp->rx_opt.mss_clamp = req->mss; 552 tcp_ecn_openreq_child(newtp, req); 553 newtp->fastopen_req = NULL; 554 newtp->fastopen_rsk = NULL; 555 newtp->syn_data_acked = 0; 556 newtp->rack.mstamp = 0; 557 newtp->rack.advanced = 0; 558 newtp->rack.reo_wnd_steps = 1; 559 newtp->rack.last_delivered = 0; 560 newtp->rack.reo_wnd_persist = 0; 561 newtp->rack.dsack_seen = 0; 562 563 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 564 } 565 return newsk; 566 } 567 EXPORT_SYMBOL(tcp_create_openreq_child); 568 569 /* 570 * Process an incoming packet for SYN_RECV sockets represented as a 571 * request_sock. Normally sk is the listener socket but for TFO it 572 * points to the child socket. 573 * 574 * XXX (TFO) - The current impl contains a special check for ack 575 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? 576 * 577 * We don't need to initialize tmp_opt.sack_ok as we don't use the results 578 */ 579 580 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 581 struct request_sock *req, 582 bool fastopen, bool *req_stolen) 583 { 584 struct tcp_options_received tmp_opt; 585 struct sock *child; 586 const struct tcphdr *th = tcp_hdr(skb); 587 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 588 bool paws_reject = false; 589 bool own_req; 590 591 tmp_opt.saw_tstamp = 0; 592 if (th->doff > (sizeof(struct tcphdr)>>2)) { 593 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); 594 595 if (tmp_opt.saw_tstamp) { 596 tmp_opt.ts_recent = req->ts_recent; 597 if (tmp_opt.rcv_tsecr) 598 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; 599 /* We do not store true stamp, but it is not required, 600 * it can be estimated (approximately) 601 * from another data. 602 */ 603 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); 604 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 605 } 606 } 607 608 /* Check for pure retransmitted SYN. */ 609 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 610 flg == TCP_FLAG_SYN && 611 !paws_reject) { 612 /* 613 * RFC793 draws (Incorrectly! It was fixed in RFC1122) 614 * this case on figure 6 and figure 8, but formal 615 * protocol description says NOTHING. 616 * To be more exact, it says that we should send ACK, 617 * because this segment (at least, if it has no data) 618 * is out of window. 619 * 620 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT 621 * describe SYN-RECV state. All the description 622 * is wrong, we cannot believe to it and should 623 * rely only on common sense and implementation 624 * experience. 625 * 626 * Enforce "SYN-ACK" according to figure 8, figure 6 627 * of RFC793, fixed by RFC1122. 628 * 629 * Note that even if there is new data in the SYN packet 630 * they will be thrown away too. 631 * 632 * Reset timer after retransmitting SYNACK, similar to 633 * the idea of fast retransmit in recovery. 634 */ 635 if (!tcp_oow_rate_limited(sock_net(sk), skb, 636 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 637 &tcp_rsk(req)->last_oow_ack_time) && 638 639 !inet_rtx_syn_ack(sk, req)) { 640 unsigned long expires = jiffies; 641 642 expires += min(TCP_TIMEOUT_INIT << req->num_timeout, 643 TCP_RTO_MAX); 644 if (!fastopen) 645 mod_timer_pending(&req->rsk_timer, expires); 646 else 647 req->rsk_timer.expires = expires; 648 } 649 return NULL; 650 } 651 652 /* Further reproduces section "SEGMENT ARRIVES" 653 for state SYN-RECEIVED of RFC793. 654 It is broken, however, it does not work only 655 when SYNs are crossed. 656 657 You would think that SYN crossing is impossible here, since 658 we should have a SYN_SENT socket (from connect()) on our end, 659 but this is not true if the crossed SYNs were sent to both 660 ends by a malicious third party. We must defend against this, 661 and to do that we first verify the ACK (as per RFC793, page 662 36) and reset if it is invalid. Is this a true full defense? 663 To convince ourselves, let us consider a way in which the ACK 664 test can still pass in this 'malicious crossed SYNs' case. 665 Malicious sender sends identical SYNs (and thus identical sequence 666 numbers) to both A and B: 667 668 A: gets SYN, seq=7 669 B: gets SYN, seq=7 670 671 By our good fortune, both A and B select the same initial 672 send sequence number of seven :-) 673 674 A: sends SYN|ACK, seq=7, ack_seq=8 675 B: sends SYN|ACK, seq=7, ack_seq=8 676 677 So we are now A eating this SYN|ACK, ACK test passes. So 678 does sequence test, SYN is truncated, and thus we consider 679 it a bare ACK. 680 681 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 682 bare ACK. Otherwise, we create an established connection. Both 683 ends (listening sockets) accept the new incoming connection and try 684 to talk to each other. 8-) 685 686 Note: This case is both harmless, and rare. Possibility is about the 687 same as us discovering intelligent life on another plant tomorrow. 688 689 But generally, we should (RFC lies!) to accept ACK 690 from SYNACK both here and in tcp_rcv_state_process(). 691 tcp_rcv_state_process() does not, hence, we do not too. 692 693 Note that the case is absolutely generic: 694 we cannot optimize anything here without 695 violating protocol. All the checks must be made 696 before attempt to create socket. 697 */ 698 699 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 700 * and the incoming segment acknowledges something not yet 701 * sent (the segment carries an unacceptable ACK) ... 702 * a reset is sent." 703 * 704 * Invalid ACK: reset will be sent by listening socket. 705 * Note that the ACK validity check for a Fast Open socket is done 706 * elsewhere and is checked directly against the child socket rather 707 * than req because user data may have been sent out. 708 */ 709 if ((flg & TCP_FLAG_ACK) && !fastopen && 710 (TCP_SKB_CB(skb)->ack_seq != 711 tcp_rsk(req)->snt_isn + 1)) 712 return sk; 713 714 /* Also, it would be not so bad idea to check rcv_tsecr, which 715 * is essentially ACK extension and too early or too late values 716 * should cause reset in unsynchronized states. 717 */ 718 719 /* RFC793: "first check sequence number". */ 720 721 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 722 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { 723 /* Out of window: send ACK and drop. */ 724 if (!(flg & TCP_FLAG_RST) && 725 !tcp_oow_rate_limited(sock_net(sk), skb, 726 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 727 &tcp_rsk(req)->last_oow_ack_time)) 728 req->rsk_ops->send_ack(sk, skb, req); 729 if (paws_reject) 730 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 731 return NULL; 732 } 733 734 /* In sequence, PAWS is OK. */ 735 736 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) 737 req->ts_recent = tmp_opt.rcv_tsval; 738 739 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 740 /* Truncate SYN, it is out of window starting 741 at tcp_rsk(req)->rcv_isn + 1. */ 742 flg &= ~TCP_FLAG_SYN; 743 } 744 745 /* RFC793: "second check the RST bit" and 746 * "fourth, check the SYN bit" 747 */ 748 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 749 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 750 goto embryonic_reset; 751 } 752 753 /* ACK sequence verified above, just make sure ACK is 754 * set. If ACK not set, just silently drop the packet. 755 * 756 * XXX (TFO) - if we ever allow "data after SYN", the 757 * following check needs to be removed. 758 */ 759 if (!(flg & TCP_FLAG_ACK)) 760 return NULL; 761 762 /* For Fast Open no more processing is needed (sk is the 763 * child socket). 764 */ 765 if (fastopen) 766 return sk; 767 768 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 769 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 770 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 771 inet_rsk(req)->acked = 1; 772 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 773 return NULL; 774 } 775 776 /* OK, ACK is valid, create big socket and 777 * feed this segment to it. It will repeat all 778 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 779 * ESTABLISHED STATE. If it will be dropped after 780 * socket is created, wait for troubles. 781 */ 782 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 783 req, &own_req); 784 if (!child) 785 goto listen_overflow; 786 787 sock_rps_save_rxhash(child, skb); 788 tcp_synack_rtt_meas(child, req); 789 *req_stolen = !own_req; 790 return inet_csk_complete_hashdance(sk, child, req, own_req); 791 792 listen_overflow: 793 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) { 794 inet_rsk(req)->acked = 1; 795 return NULL; 796 } 797 798 embryonic_reset: 799 if (!(flg & TCP_FLAG_RST)) { 800 /* Received a bad SYN pkt - for TFO We try not to reset 801 * the local connection unless it's really necessary to 802 * avoid becoming vulnerable to outside attack aiming at 803 * resetting legit local connections. 804 */ 805 req->rsk_ops->send_reset(sk, skb); 806 } else if (fastopen) { /* received a valid RST pkt */ 807 reqsk_fastopen_remove(sk, req, true); 808 tcp_reset(sk); 809 } 810 if (!fastopen) { 811 inet_csk_reqsk_queue_drop(sk, req); 812 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 813 } 814 return NULL; 815 } 816 EXPORT_SYMBOL(tcp_check_req); 817 818 /* 819 * Queue segment on the new socket if the new socket is active, 820 * otherwise we just shortcircuit this and continue with 821 * the new socket. 822 * 823 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV 824 * when entering. But other states are possible due to a race condition 825 * where after __inet_lookup_established() fails but before the listener 826 * locked is obtained, other packets cause the same connection to 827 * be created. 828 */ 829 830 int tcp_child_process(struct sock *parent, struct sock *child, 831 struct sk_buff *skb) 832 { 833 int ret = 0; 834 int state = child->sk_state; 835 836 /* record NAPI ID of child */ 837 sk_mark_napi_id(child, skb); 838 839 tcp_segs_in(tcp_sk(child), skb); 840 if (!sock_owned_by_user(child)) { 841 ret = tcp_rcv_state_process(child, skb); 842 /* Wakeup parent, send SIGIO */ 843 if (state == TCP_SYN_RECV && child->sk_state != state) 844 parent->sk_data_ready(parent); 845 } else { 846 /* Alas, it is possible again, because we do lookup 847 * in main socket hash table and lock on listening 848 * socket does not protect us more. 849 */ 850 __sk_add_backlog(child, skb); 851 } 852 853 bh_unlock_sock(child); 854 sock_put(child); 855 return ret; 856 } 857 EXPORT_SYMBOL(tcp_child_process); 858