1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/slab.h> 24 #include <linux/sysctl.h> 25 #include <linux/workqueue.h> 26 #include <linux/static_key.h> 27 #include <net/tcp.h> 28 #include <net/inet_common.h> 29 #include <net/xfrm.h> 30 #include <net/busy_poll.h> 31 32 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 33 { 34 if (seq == s_win) 35 return true; 36 if (after(end_seq, s_win) && before(seq, e_win)) 37 return true; 38 return seq == e_win && seq == end_seq; 39 } 40 41 static enum tcp_tw_status 42 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, 43 const struct sk_buff *skb, int mib_idx) 44 { 45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 46 47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, 48 &tcptw->tw_last_oow_ack_time)) { 49 /* Send ACK. Note, we do not put the bucket, 50 * it will be released by caller. 51 */ 52 return TCP_TW_ACK; 53 } 54 55 /* We are rate-limiting, so just release the tw sock and drop skb. */ 56 inet_twsk_put(tw); 57 return TCP_TW_SUCCESS; 58 } 59 60 /* 61 * * Main purpose of TIME-WAIT state is to close connection gracefully, 62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 63 * (and, probably, tail of data) and one or more our ACKs are lost. 64 * * What is TIME-WAIT timeout? It is associated with maximal packet 65 * lifetime in the internet, which results in wrong conclusion, that 66 * it is set to catch "old duplicate segments" wandering out of their path. 67 * It is not quite correct. This timeout is calculated so that it exceeds 68 * maximal retransmission timeout enough to allow to lose one (or more) 69 * segments sent by peer and our ACKs. This time may be calculated from RTO. 70 * * When TIME-WAIT socket receives RST, it means that another end 71 * finally closed and we are allowed to kill TIME-WAIT too. 72 * * Second purpose of TIME-WAIT is catching old duplicate segments. 73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT 74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. 75 * * If we invented some more clever way to catch duplicates 76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. 77 * 78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs. 79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES 80 * from the very beginning. 81 * 82 * NOTE. With recycling (and later with fin-wait-2) TW bucket 83 * is _not_ stateless. It means, that strictly speaking we must 84 * spinlock it. I do not want! Well, probability of misbehaviour 85 * is ridiculously low and, seems, we could use some mb() tricks 86 * to avoid misread sequence numbers, states etc. --ANK 87 * 88 * We don't need to initialize tmp_out.sack_ok as we don't use the results 89 */ 90 enum tcp_tw_status 91 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 92 const struct tcphdr *th) 93 { 94 struct tcp_options_received tmp_opt; 95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 96 bool paws_reject = false; 97 98 tmp_opt.saw_tstamp = 0; 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 100 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); 101 102 if (tmp_opt.saw_tstamp) { 103 if (tmp_opt.rcv_tsecr) 104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; 105 tmp_opt.ts_recent = tcptw->tw_ts_recent; 106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 108 } 109 } 110 111 if (tw->tw_substate == TCP_FIN_WAIT2) { 112 /* Just repeat all the checks of tcp_rcv_state_process() */ 113 114 /* Out of window, send ACK */ 115 if (paws_reject || 116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 117 tcptw->tw_rcv_nxt, 118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 119 return tcp_timewait_check_oow_rate_limit( 120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); 121 122 if (th->rst) 123 goto kill; 124 125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) 126 return TCP_TW_RST; 127 128 /* Dup ACK? */ 129 if (!th->ack || 130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 132 inet_twsk_put(tw); 133 return TCP_TW_SUCCESS; 134 } 135 136 /* New data or FIN. If new data arrive after half-duplex close, 137 * reset. 138 */ 139 if (!th->fin || 140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) 141 return TCP_TW_RST; 142 143 /* FIN arrived, enter true time-wait state. */ 144 tw->tw_substate = TCP_TIME_WAIT; 145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 146 if (tmp_opt.saw_tstamp) { 147 tcptw->tw_ts_recent_stamp = get_seconds(); 148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 149 } 150 151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 152 return TCP_TW_ACK; 153 } 154 155 /* 156 * Now real TIME-WAIT state. 157 * 158 * RFC 1122: 159 * "When a connection is [...] on TIME-WAIT state [...] 160 * [a TCP] MAY accept a new SYN from the remote TCP to 161 * reopen the connection directly, if it: 162 * 163 * (1) assigns its initial sequence number for the new 164 * connection to be larger than the largest sequence 165 * number it used on the previous connection incarnation, 166 * and 167 * 168 * (2) returns to TIME-WAIT state if the SYN turns out 169 * to be an old duplicate". 170 */ 171 172 if (!paws_reject && 173 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && 174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 175 /* In window segment, it may be only reset or bare ack. */ 176 177 if (th->rst) { 178 /* This is TIME_WAIT assassination, in two flavors. 179 * Oh well... nobody has a sufficient solution to this 180 * protocol bug yet. 181 */ 182 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) { 183 kill: 184 inet_twsk_deschedule_put(tw); 185 return TCP_TW_SUCCESS; 186 } 187 } 188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 189 190 if (tmp_opt.saw_tstamp) { 191 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 192 tcptw->tw_ts_recent_stamp = get_seconds(); 193 } 194 195 inet_twsk_put(tw); 196 return TCP_TW_SUCCESS; 197 } 198 199 /* Out of window segment. 200 201 All the segments are ACKed immediately. 202 203 The only exception is new SYN. We accept it, if it is 204 not old duplicate and we are not in danger to be killed 205 by delayed old duplicates. RFC check is that it has 206 newer sequence number works at rates <40Mbit/sec. 207 However, if paws works, it is reliable AND even more, 208 we even may relax silly seq space cutoff. 209 210 RED-PEN: we violate main RFC requirement, if this SYN will appear 211 old duplicate (i.e. we receive RST in reply to SYN-ACK), 212 we must return socket to time-wait state. It is not good, 213 but not fatal yet. 214 */ 215 216 if (th->syn && !th->rst && !th->ack && !paws_reject && 217 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || 218 (tmp_opt.saw_tstamp && 219 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 220 u32 isn = tcptw->tw_snd_nxt + 65535 + 2; 221 if (isn == 0) 222 isn++; 223 TCP_SKB_CB(skb)->tcp_tw_isn = isn; 224 return TCP_TW_SYN; 225 } 226 227 if (paws_reject) 228 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); 229 230 if (!th->rst) { 231 /* In this case we must reset the TIMEWAIT timer. 232 * 233 * If it is ACKless SYN it may be both old duplicate 234 * and new good SYN with random sequence number <rcv_nxt. 235 * Do not reschedule in the last case. 236 */ 237 if (paws_reject || th->ack) 238 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 239 240 return tcp_timewait_check_oow_rate_limit( 241 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 242 } 243 inet_twsk_put(tw); 244 return TCP_TW_SUCCESS; 245 } 246 EXPORT_SYMBOL(tcp_timewait_state_process); 247 248 /* 249 * Move a socket to time-wait or dead fin-wait-2 state. 250 */ 251 void tcp_time_wait(struct sock *sk, int state, int timeo) 252 { 253 const struct inet_connection_sock *icsk = inet_csk(sk); 254 const struct tcp_sock *tp = tcp_sk(sk); 255 struct inet_timewait_sock *tw; 256 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; 257 258 tw = inet_twsk_alloc(sk, tcp_death_row, state); 259 260 if (tw) { 261 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 262 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 263 struct inet_sock *inet = inet_sk(sk); 264 265 tw->tw_transparent = inet->transparent; 266 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 267 tcptw->tw_rcv_nxt = tp->rcv_nxt; 268 tcptw->tw_snd_nxt = tp->snd_nxt; 269 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 270 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 271 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 272 tcptw->tw_ts_offset = tp->tsoffset; 273 tcptw->tw_last_oow_ack_time = 0; 274 275 #if IS_ENABLED(CONFIG_IPV6) 276 if (tw->tw_family == PF_INET6) { 277 struct ipv6_pinfo *np = inet6_sk(sk); 278 279 tw->tw_v6_daddr = sk->sk_v6_daddr; 280 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 281 tw->tw_tclass = np->tclass; 282 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); 283 tw->tw_ipv6only = sk->sk_ipv6only; 284 } 285 #endif 286 287 #ifdef CONFIG_TCP_MD5SIG 288 /* 289 * The timewait bucket does not have the key DB from the 290 * sock structure. We just make a quick copy of the 291 * md5 key being used (if indeed we are using one) 292 * so the timewait ack generating code has the key. 293 */ 294 do { 295 struct tcp_md5sig_key *key; 296 tcptw->tw_md5_key = NULL; 297 key = tp->af_specific->md5_lookup(sk, sk); 298 if (key) { 299 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 300 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()); 301 } 302 } while (0); 303 #endif 304 305 /* Get the TIME_WAIT timeout firing. */ 306 if (timeo < rto) 307 timeo = rto; 308 309 tw->tw_timeout = TCP_TIMEWAIT_LEN; 310 if (state == TCP_TIME_WAIT) 311 timeo = TCP_TIMEWAIT_LEN; 312 313 /* tw_timer is pinned, so we need to make sure BH are disabled 314 * in following section, otherwise timer handler could run before 315 * we complete the initialization. 316 */ 317 local_bh_disable(); 318 inet_twsk_schedule(tw, timeo); 319 /* Linkage updates. 320 * Note that access to tw after this point is illegal. 321 */ 322 inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 323 local_bh_enable(); 324 } else { 325 /* Sorry, if we're out of memory, just CLOSE this 326 * socket up. We've got bigger problems than 327 * non-graceful socket closings. 328 */ 329 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); 330 } 331 332 tcp_update_metrics(sk); 333 tcp_done(sk); 334 } 335 336 void tcp_twsk_destructor(struct sock *sk) 337 { 338 #ifdef CONFIG_TCP_MD5SIG 339 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 340 341 if (twsk->tw_md5_key) 342 kfree_rcu(twsk->tw_md5_key, rcu); 343 #endif 344 } 345 EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 346 347 /* Warning : This function is called without sk_listener being locked. 348 * Be sure to read socket fields once, as their value could change under us. 349 */ 350 void tcp_openreq_init_rwin(struct request_sock *req, 351 const struct sock *sk_listener, 352 const struct dst_entry *dst) 353 { 354 struct inet_request_sock *ireq = inet_rsk(req); 355 const struct tcp_sock *tp = tcp_sk(sk_listener); 356 int full_space = tcp_full_space(sk_listener); 357 u32 window_clamp; 358 __u8 rcv_wscale; 359 u32 rcv_wnd; 360 int mss; 361 362 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 363 window_clamp = READ_ONCE(tp->window_clamp); 364 /* Set this up on the first call only */ 365 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); 366 367 /* limit the window selection if the user enforce a smaller rx buffer */ 368 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && 369 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) 370 req->rsk_window_clamp = full_space; 371 372 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); 373 if (rcv_wnd == 0) 374 rcv_wnd = dst_metric(dst, RTAX_INITRWND); 375 else if (full_space < rcv_wnd * mss) 376 full_space = rcv_wnd * mss; 377 378 /* tcp_full_space because it is guaranteed to be the first packet */ 379 tcp_select_initial_window(sk_listener, full_space, 380 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 381 &req->rsk_rcv_wnd, 382 &req->rsk_window_clamp, 383 ireq->wscale_ok, 384 &rcv_wscale, 385 rcv_wnd); 386 ireq->rcv_wscale = rcv_wscale; 387 } 388 EXPORT_SYMBOL(tcp_openreq_init_rwin); 389 390 static void tcp_ecn_openreq_child(struct tcp_sock *tp, 391 const struct request_sock *req) 392 { 393 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; 394 } 395 396 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) 397 { 398 struct inet_connection_sock *icsk = inet_csk(sk); 399 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 400 bool ca_got_dst = false; 401 402 if (ca_key != TCP_CA_UNSPEC) { 403 const struct tcp_congestion_ops *ca; 404 405 rcu_read_lock(); 406 ca = tcp_ca_find_key(ca_key); 407 if (likely(ca && try_module_get(ca->owner))) { 408 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 409 icsk->icsk_ca_ops = ca; 410 ca_got_dst = true; 411 } 412 rcu_read_unlock(); 413 } 414 415 /* If no valid choice made yet, assign current system default ca. */ 416 if (!ca_got_dst && 417 (!icsk->icsk_ca_setsockopt || 418 !try_module_get(icsk->icsk_ca_ops->owner))) 419 tcp_assign_congestion_control(sk); 420 421 tcp_set_ca_state(sk, TCP_CA_Open); 422 } 423 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); 424 425 static void smc_check_reset_syn_req(struct tcp_sock *oldtp, 426 struct request_sock *req, 427 struct tcp_sock *newtp) 428 { 429 #if IS_ENABLED(CONFIG_SMC) 430 struct inet_request_sock *ireq; 431 432 if (static_branch_unlikely(&tcp_have_smc)) { 433 ireq = inet_rsk(req); 434 if (oldtp->syn_smc && !ireq->smc_ok) 435 newtp->syn_smc = 0; 436 } 437 #endif 438 } 439 440 /* This is not only more efficient than what we used to do, it eliminates 441 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 442 * 443 * Actually, we could lots of memory writes here. tp of listening 444 * socket contains all necessary default parameters. 445 */ 446 struct sock *tcp_create_openreq_child(const struct sock *sk, 447 struct request_sock *req, 448 struct sk_buff *skb) 449 { 450 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 451 452 if (newsk) { 453 const struct inet_request_sock *ireq = inet_rsk(req); 454 struct tcp_request_sock *treq = tcp_rsk(req); 455 struct inet_connection_sock *newicsk = inet_csk(newsk); 456 struct tcp_sock *newtp = tcp_sk(newsk); 457 struct tcp_sock *oldtp = tcp_sk(sk); 458 459 smc_check_reset_syn_req(oldtp, req, newtp); 460 461 /* Now setup tcp_sock */ 462 newtp->pred_flags = 0; 463 464 newtp->rcv_wup = newtp->copied_seq = 465 newtp->rcv_nxt = treq->rcv_isn + 1; 466 newtp->segs_in = 1; 467 468 newtp->snd_sml = newtp->snd_una = 469 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; 470 471 INIT_LIST_HEAD(&newtp->tsq_node); 472 INIT_LIST_HEAD(&newtp->tsorted_sent_queue); 473 474 tcp_init_wl(newtp, treq->rcv_isn); 475 476 newtp->srtt_us = 0; 477 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 478 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); 479 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 480 newicsk->icsk_ack.lrcvtime = tcp_jiffies32; 481 482 newtp->packets_out = 0; 483 newtp->retrans_out = 0; 484 newtp->sacked_out = 0; 485 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 486 newtp->tlp_high_seq = 0; 487 newtp->lsndtime = tcp_jiffies32; 488 newsk->sk_txhash = treq->txhash; 489 newtp->last_oow_ack_time = 0; 490 newtp->total_retrans = req->num_retrans; 491 492 /* So many TCP implementations out there (incorrectly) count the 493 * initial SYN frame in their delayed-ACK and congestion control 494 * algorithms that we must have the following bandaid to talk 495 * efficiently to them. -DaveM 496 */ 497 newtp->snd_cwnd = TCP_INIT_CWND; 498 newtp->snd_cwnd_cnt = 0; 499 500 /* There's a bubble in the pipe until at least the first ACK. */ 501 newtp->app_limited = ~0U; 502 503 tcp_init_xmit_timers(newsk); 504 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; 505 506 newtp->rx_opt.saw_tstamp = 0; 507 508 newtp->rx_opt.dsack = 0; 509 newtp->rx_opt.num_sacks = 0; 510 511 newtp->urg_data = 0; 512 513 if (sock_flag(newsk, SOCK_KEEPOPEN)) 514 inet_csk_reset_keepalive_timer(newsk, 515 keepalive_time_when(newtp)); 516 517 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 518 newtp->rx_opt.sack_ok = ireq->sack_ok; 519 newtp->window_clamp = req->rsk_window_clamp; 520 newtp->rcv_ssthresh = req->rsk_rcv_wnd; 521 newtp->rcv_wnd = req->rsk_rcv_wnd; 522 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 523 if (newtp->rx_opt.wscale_ok) { 524 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 525 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 526 } else { 527 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 528 newtp->window_clamp = min(newtp->window_clamp, 65535U); 529 } 530 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << 531 newtp->rx_opt.snd_wscale); 532 newtp->max_window = newtp->snd_wnd; 533 534 if (newtp->rx_opt.tstamp_ok) { 535 newtp->rx_opt.ts_recent = req->ts_recent; 536 newtp->rx_opt.ts_recent_stamp = get_seconds(); 537 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 538 } else { 539 newtp->rx_opt.ts_recent_stamp = 0; 540 newtp->tcp_header_len = sizeof(struct tcphdr); 541 } 542 newtp->tsoffset = treq->ts_off; 543 #ifdef CONFIG_TCP_MD5SIG 544 newtp->md5sig_info = NULL; /*XXX*/ 545 if (newtp->af_specific->md5_lookup(sk, newsk)) 546 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 547 #endif 548 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 549 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 550 newtp->rx_opt.mss_clamp = req->mss; 551 tcp_ecn_openreq_child(newtp, req); 552 newtp->fastopen_req = NULL; 553 newtp->fastopen_rsk = NULL; 554 newtp->syn_data_acked = 0; 555 newtp->rack.mstamp = 0; 556 newtp->rack.advanced = 0; 557 newtp->rack.reo_wnd_steps = 1; 558 newtp->rack.last_delivered = 0; 559 newtp->rack.reo_wnd_persist = 0; 560 newtp->rack.dsack_seen = 0; 561 562 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 563 } 564 return newsk; 565 } 566 EXPORT_SYMBOL(tcp_create_openreq_child); 567 568 /* 569 * Process an incoming packet for SYN_RECV sockets represented as a 570 * request_sock. Normally sk is the listener socket but for TFO it 571 * points to the child socket. 572 * 573 * XXX (TFO) - The current impl contains a special check for ack 574 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? 575 * 576 * We don't need to initialize tmp_opt.sack_ok as we don't use the results 577 */ 578 579 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 580 struct request_sock *req, 581 bool fastopen) 582 { 583 struct tcp_options_received tmp_opt; 584 struct sock *child; 585 const struct tcphdr *th = tcp_hdr(skb); 586 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 587 bool paws_reject = false; 588 bool own_req; 589 590 tmp_opt.saw_tstamp = 0; 591 if (th->doff > (sizeof(struct tcphdr)>>2)) { 592 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); 593 594 if (tmp_opt.saw_tstamp) { 595 tmp_opt.ts_recent = req->ts_recent; 596 if (tmp_opt.rcv_tsecr) 597 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; 598 /* We do not store true stamp, but it is not required, 599 * it can be estimated (approximately) 600 * from another data. 601 */ 602 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); 603 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 604 } 605 } 606 607 /* Check for pure retransmitted SYN. */ 608 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 609 flg == TCP_FLAG_SYN && 610 !paws_reject) { 611 /* 612 * RFC793 draws (Incorrectly! It was fixed in RFC1122) 613 * this case on figure 6 and figure 8, but formal 614 * protocol description says NOTHING. 615 * To be more exact, it says that we should send ACK, 616 * because this segment (at least, if it has no data) 617 * is out of window. 618 * 619 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT 620 * describe SYN-RECV state. All the description 621 * is wrong, we cannot believe to it and should 622 * rely only on common sense and implementation 623 * experience. 624 * 625 * Enforce "SYN-ACK" according to figure 8, figure 6 626 * of RFC793, fixed by RFC1122. 627 * 628 * Note that even if there is new data in the SYN packet 629 * they will be thrown away too. 630 * 631 * Reset timer after retransmitting SYNACK, similar to 632 * the idea of fast retransmit in recovery. 633 */ 634 if (!tcp_oow_rate_limited(sock_net(sk), skb, 635 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 636 &tcp_rsk(req)->last_oow_ack_time) && 637 638 !inet_rtx_syn_ack(sk, req)) { 639 unsigned long expires = jiffies; 640 641 expires += min(TCP_TIMEOUT_INIT << req->num_timeout, 642 TCP_RTO_MAX); 643 if (!fastopen) 644 mod_timer_pending(&req->rsk_timer, expires); 645 else 646 req->rsk_timer.expires = expires; 647 } 648 return NULL; 649 } 650 651 /* Further reproduces section "SEGMENT ARRIVES" 652 for state SYN-RECEIVED of RFC793. 653 It is broken, however, it does not work only 654 when SYNs are crossed. 655 656 You would think that SYN crossing is impossible here, since 657 we should have a SYN_SENT socket (from connect()) on our end, 658 but this is not true if the crossed SYNs were sent to both 659 ends by a malicious third party. We must defend against this, 660 and to do that we first verify the ACK (as per RFC793, page 661 36) and reset if it is invalid. Is this a true full defense? 662 To convince ourselves, let us consider a way in which the ACK 663 test can still pass in this 'malicious crossed SYNs' case. 664 Malicious sender sends identical SYNs (and thus identical sequence 665 numbers) to both A and B: 666 667 A: gets SYN, seq=7 668 B: gets SYN, seq=7 669 670 By our good fortune, both A and B select the same initial 671 send sequence number of seven :-) 672 673 A: sends SYN|ACK, seq=7, ack_seq=8 674 B: sends SYN|ACK, seq=7, ack_seq=8 675 676 So we are now A eating this SYN|ACK, ACK test passes. So 677 does sequence test, SYN is truncated, and thus we consider 678 it a bare ACK. 679 680 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 681 bare ACK. Otherwise, we create an established connection. Both 682 ends (listening sockets) accept the new incoming connection and try 683 to talk to each other. 8-) 684 685 Note: This case is both harmless, and rare. Possibility is about the 686 same as us discovering intelligent life on another plant tomorrow. 687 688 But generally, we should (RFC lies!) to accept ACK 689 from SYNACK both here and in tcp_rcv_state_process(). 690 tcp_rcv_state_process() does not, hence, we do not too. 691 692 Note that the case is absolutely generic: 693 we cannot optimize anything here without 694 violating protocol. All the checks must be made 695 before attempt to create socket. 696 */ 697 698 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 699 * and the incoming segment acknowledges something not yet 700 * sent (the segment carries an unacceptable ACK) ... 701 * a reset is sent." 702 * 703 * Invalid ACK: reset will be sent by listening socket. 704 * Note that the ACK validity check for a Fast Open socket is done 705 * elsewhere and is checked directly against the child socket rather 706 * than req because user data may have been sent out. 707 */ 708 if ((flg & TCP_FLAG_ACK) && !fastopen && 709 (TCP_SKB_CB(skb)->ack_seq != 710 tcp_rsk(req)->snt_isn + 1)) 711 return sk; 712 713 /* Also, it would be not so bad idea to check rcv_tsecr, which 714 * is essentially ACK extension and too early or too late values 715 * should cause reset in unsynchronized states. 716 */ 717 718 /* RFC793: "first check sequence number". */ 719 720 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 721 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { 722 /* Out of window: send ACK and drop. */ 723 if (!(flg & TCP_FLAG_RST) && 724 !tcp_oow_rate_limited(sock_net(sk), skb, 725 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 726 &tcp_rsk(req)->last_oow_ack_time)) 727 req->rsk_ops->send_ack(sk, skb, req); 728 if (paws_reject) 729 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 730 return NULL; 731 } 732 733 /* In sequence, PAWS is OK. */ 734 735 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) 736 req->ts_recent = tmp_opt.rcv_tsval; 737 738 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 739 /* Truncate SYN, it is out of window starting 740 at tcp_rsk(req)->rcv_isn + 1. */ 741 flg &= ~TCP_FLAG_SYN; 742 } 743 744 /* RFC793: "second check the RST bit" and 745 * "fourth, check the SYN bit" 746 */ 747 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 748 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 749 goto embryonic_reset; 750 } 751 752 /* ACK sequence verified above, just make sure ACK is 753 * set. If ACK not set, just silently drop the packet. 754 * 755 * XXX (TFO) - if we ever allow "data after SYN", the 756 * following check needs to be removed. 757 */ 758 if (!(flg & TCP_FLAG_ACK)) 759 return NULL; 760 761 /* For Fast Open no more processing is needed (sk is the 762 * child socket). 763 */ 764 if (fastopen) 765 return sk; 766 767 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 768 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 769 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 770 inet_rsk(req)->acked = 1; 771 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 772 return NULL; 773 } 774 775 /* OK, ACK is valid, create big socket and 776 * feed this segment to it. It will repeat all 777 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 778 * ESTABLISHED STATE. If it will be dropped after 779 * socket is created, wait for troubles. 780 */ 781 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 782 req, &own_req); 783 if (!child) 784 goto listen_overflow; 785 786 sock_rps_save_rxhash(child, skb); 787 tcp_synack_rtt_meas(child, req); 788 return inet_csk_complete_hashdance(sk, child, req, own_req); 789 790 listen_overflow: 791 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) { 792 inet_rsk(req)->acked = 1; 793 return NULL; 794 } 795 796 embryonic_reset: 797 if (!(flg & TCP_FLAG_RST)) { 798 /* Received a bad SYN pkt - for TFO We try not to reset 799 * the local connection unless it's really necessary to 800 * avoid becoming vulnerable to outside attack aiming at 801 * resetting legit local connections. 802 */ 803 req->rsk_ops->send_reset(sk, skb); 804 } else if (fastopen) { /* received a valid RST pkt */ 805 reqsk_fastopen_remove(sk, req, true); 806 tcp_reset(sk); 807 } 808 if (!fastopen) { 809 inet_csk_reqsk_queue_drop(sk, req); 810 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 811 } 812 return NULL; 813 } 814 EXPORT_SYMBOL(tcp_check_req); 815 816 /* 817 * Queue segment on the new socket if the new socket is active, 818 * otherwise we just shortcircuit this and continue with 819 * the new socket. 820 * 821 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV 822 * when entering. But other states are possible due to a race condition 823 * where after __inet_lookup_established() fails but before the listener 824 * locked is obtained, other packets cause the same connection to 825 * be created. 826 */ 827 828 int tcp_child_process(struct sock *parent, struct sock *child, 829 struct sk_buff *skb) 830 { 831 int ret = 0; 832 int state = child->sk_state; 833 834 /* record NAPI ID of child */ 835 sk_mark_napi_id(child, skb); 836 837 tcp_segs_in(tcp_sk(child), skb); 838 if (!sock_owned_by_user(child)) { 839 ret = tcp_rcv_state_process(child, skb); 840 /* Wakeup parent, send SIGIO */ 841 if (state == TCP_SYN_RECV && child->sk_state != state) 842 parent->sk_data_ready(parent); 843 } else { 844 /* Alas, it is possible again, because we do lookup 845 * in main socket hash table and lock on listening 846 * socket does not protect us more. 847 */ 848 __sk_add_backlog(child, skb); 849 } 850 851 bh_unlock_sock(child); 852 sock_put(child); 853 return ret; 854 } 855 EXPORT_SYMBOL(tcp_child_process); 856