1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/slab.h> 24 #include <linux/sysctl.h> 25 #include <linux/workqueue.h> 26 #include <net/tcp.h> 27 #include <net/inet_common.h> 28 #include <net/xfrm.h> 29 #include <net/busy_poll.h> 30 31 int sysctl_tcp_abort_on_overflow __read_mostly; 32 33 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 34 { 35 if (seq == s_win) 36 return true; 37 if (after(end_seq, s_win) && before(seq, e_win)) 38 return true; 39 return seq == e_win && seq == end_seq; 40 } 41 42 static enum tcp_tw_status 43 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, 44 const struct sk_buff *skb, int mib_idx) 45 { 46 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 47 48 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, 49 &tcptw->tw_last_oow_ack_time)) { 50 /* Send ACK. Note, we do not put the bucket, 51 * it will be released by caller. 52 */ 53 return TCP_TW_ACK; 54 } 55 56 /* We are rate-limiting, so just release the tw sock and drop skb. */ 57 inet_twsk_put(tw); 58 return TCP_TW_SUCCESS; 59 } 60 61 /* 62 * * Main purpose of TIME-WAIT state is to close connection gracefully, 63 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 64 * (and, probably, tail of data) and one or more our ACKs are lost. 65 * * What is TIME-WAIT timeout? It is associated with maximal packet 66 * lifetime in the internet, which results in wrong conclusion, that 67 * it is set to catch "old duplicate segments" wandering out of their path. 68 * It is not quite correct. This timeout is calculated so that it exceeds 69 * maximal retransmission timeout enough to allow to lose one (or more) 70 * segments sent by peer and our ACKs. This time may be calculated from RTO. 71 * * When TIME-WAIT socket receives RST, it means that another end 72 * finally closed and we are allowed to kill TIME-WAIT too. 73 * * Second purpose of TIME-WAIT is catching old duplicate segments. 74 * Well, certainly it is pure paranoia, but if we load TIME-WAIT 75 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. 76 * * If we invented some more clever way to catch duplicates 77 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. 78 * 79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs. 80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES 81 * from the very beginning. 82 * 83 * NOTE. With recycling (and later with fin-wait-2) TW bucket 84 * is _not_ stateless. It means, that strictly speaking we must 85 * spinlock it. I do not want! Well, probability of misbehaviour 86 * is ridiculously low and, seems, we could use some mb() tricks 87 * to avoid misread sequence numbers, states etc. --ANK 88 * 89 * We don't need to initialize tmp_out.sack_ok as we don't use the results 90 */ 91 enum tcp_tw_status 92 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 93 const struct tcphdr *th) 94 { 95 struct tcp_options_received tmp_opt; 96 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 97 bool paws_reject = false; 98 99 tmp_opt.saw_tstamp = 0; 100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 101 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); 102 103 if (tmp_opt.saw_tstamp) { 104 if (tmp_opt.rcv_tsecr) 105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; 106 tmp_opt.ts_recent = tcptw->tw_ts_recent; 107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 109 } 110 } 111 112 if (tw->tw_substate == TCP_FIN_WAIT2) { 113 /* Just repeat all the checks of tcp_rcv_state_process() */ 114 115 /* Out of window, send ACK */ 116 if (paws_reject || 117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 118 tcptw->tw_rcv_nxt, 119 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 120 return tcp_timewait_check_oow_rate_limit( 121 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); 122 123 if (th->rst) 124 goto kill; 125 126 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) 127 return TCP_TW_RST; 128 129 /* Dup ACK? */ 130 if (!th->ack || 131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 133 inet_twsk_put(tw); 134 return TCP_TW_SUCCESS; 135 } 136 137 /* New data or FIN. If new data arrive after half-duplex close, 138 * reset. 139 */ 140 if (!th->fin || 141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) 142 return TCP_TW_RST; 143 144 /* FIN arrived, enter true time-wait state. */ 145 tw->tw_substate = TCP_TIME_WAIT; 146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 147 if (tmp_opt.saw_tstamp) { 148 tcptw->tw_ts_recent_stamp = get_seconds(); 149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 150 } 151 152 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 153 return TCP_TW_ACK; 154 } 155 156 /* 157 * Now real TIME-WAIT state. 158 * 159 * RFC 1122: 160 * "When a connection is [...] on TIME-WAIT state [...] 161 * [a TCP] MAY accept a new SYN from the remote TCP to 162 * reopen the connection directly, if it: 163 * 164 * (1) assigns its initial sequence number for the new 165 * connection to be larger than the largest sequence 166 * number it used on the previous connection incarnation, 167 * and 168 * 169 * (2) returns to TIME-WAIT state if the SYN turns out 170 * to be an old duplicate". 171 */ 172 173 if (!paws_reject && 174 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && 175 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 176 /* In window segment, it may be only reset or bare ack. */ 177 178 if (th->rst) { 179 /* This is TIME_WAIT assassination, in two flavors. 180 * Oh well... nobody has a sufficient solution to this 181 * protocol bug yet. 182 */ 183 if (sysctl_tcp_rfc1337 == 0) { 184 kill: 185 inet_twsk_deschedule_put(tw); 186 return TCP_TW_SUCCESS; 187 } 188 } 189 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 190 191 if (tmp_opt.saw_tstamp) { 192 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 193 tcptw->tw_ts_recent_stamp = get_seconds(); 194 } 195 196 inet_twsk_put(tw); 197 return TCP_TW_SUCCESS; 198 } 199 200 /* Out of window segment. 201 202 All the segments are ACKed immediately. 203 204 The only exception is new SYN. We accept it, if it is 205 not old duplicate and we are not in danger to be killed 206 by delayed old duplicates. RFC check is that it has 207 newer sequence number works at rates <40Mbit/sec. 208 However, if paws works, it is reliable AND even more, 209 we even may relax silly seq space cutoff. 210 211 RED-PEN: we violate main RFC requirement, if this SYN will appear 212 old duplicate (i.e. we receive RST in reply to SYN-ACK), 213 we must return socket to time-wait state. It is not good, 214 but not fatal yet. 215 */ 216 217 if (th->syn && !th->rst && !th->ack && !paws_reject && 218 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || 219 (tmp_opt.saw_tstamp && 220 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 221 u32 isn = tcptw->tw_snd_nxt + 65535 + 2; 222 if (isn == 0) 223 isn++; 224 TCP_SKB_CB(skb)->tcp_tw_isn = isn; 225 return TCP_TW_SYN; 226 } 227 228 if (paws_reject) 229 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); 230 231 if (!th->rst) { 232 /* In this case we must reset the TIMEWAIT timer. 233 * 234 * If it is ACKless SYN it may be both old duplicate 235 * and new good SYN with random sequence number <rcv_nxt. 236 * Do not reschedule in the last case. 237 */ 238 if (paws_reject || th->ack) 239 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 240 241 return tcp_timewait_check_oow_rate_limit( 242 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 243 } 244 inet_twsk_put(tw); 245 return TCP_TW_SUCCESS; 246 } 247 EXPORT_SYMBOL(tcp_timewait_state_process); 248 249 /* 250 * Move a socket to time-wait or dead fin-wait-2 state. 251 */ 252 void tcp_time_wait(struct sock *sk, int state, int timeo) 253 { 254 const struct inet_connection_sock *icsk = inet_csk(sk); 255 const struct tcp_sock *tp = tcp_sk(sk); 256 struct inet_timewait_sock *tw; 257 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; 258 259 tw = inet_twsk_alloc(sk, tcp_death_row, state); 260 261 if (tw) { 262 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 263 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 264 struct inet_sock *inet = inet_sk(sk); 265 266 tw->tw_transparent = inet->transparent; 267 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 268 tcptw->tw_rcv_nxt = tp->rcv_nxt; 269 tcptw->tw_snd_nxt = tp->snd_nxt; 270 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 271 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 272 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 273 tcptw->tw_ts_offset = tp->tsoffset; 274 tcptw->tw_last_oow_ack_time = 0; 275 276 #if IS_ENABLED(CONFIG_IPV6) 277 if (tw->tw_family == PF_INET6) { 278 struct ipv6_pinfo *np = inet6_sk(sk); 279 280 tw->tw_v6_daddr = sk->sk_v6_daddr; 281 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 282 tw->tw_tclass = np->tclass; 283 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); 284 tw->tw_ipv6only = sk->sk_ipv6only; 285 } 286 #endif 287 288 #ifdef CONFIG_TCP_MD5SIG 289 /* 290 * The timewait bucket does not have the key DB from the 291 * sock structure. We just make a quick copy of the 292 * md5 key being used (if indeed we are using one) 293 * so the timewait ack generating code has the key. 294 */ 295 do { 296 struct tcp_md5sig_key *key; 297 tcptw->tw_md5_key = NULL; 298 key = tp->af_specific->md5_lookup(sk, sk); 299 if (key) { 300 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 301 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()) 302 BUG(); 303 } 304 } while (0); 305 #endif 306 307 /* Get the TIME_WAIT timeout firing. */ 308 if (timeo < rto) 309 timeo = rto; 310 311 tw->tw_timeout = TCP_TIMEWAIT_LEN; 312 if (state == TCP_TIME_WAIT) 313 timeo = TCP_TIMEWAIT_LEN; 314 315 inet_twsk_schedule(tw, timeo); 316 /* Linkage updates. */ 317 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 318 inet_twsk_put(tw); 319 } else { 320 /* Sorry, if we're out of memory, just CLOSE this 321 * socket up. We've got bigger problems than 322 * non-graceful socket closings. 323 */ 324 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); 325 } 326 327 tcp_update_metrics(sk); 328 tcp_done(sk); 329 } 330 331 void tcp_twsk_destructor(struct sock *sk) 332 { 333 #ifdef CONFIG_TCP_MD5SIG 334 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 335 336 if (twsk->tw_md5_key) 337 kfree_rcu(twsk->tw_md5_key, rcu); 338 #endif 339 } 340 EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 341 342 /* Warning : This function is called without sk_listener being locked. 343 * Be sure to read socket fields once, as their value could change under us. 344 */ 345 void tcp_openreq_init_rwin(struct request_sock *req, 346 const struct sock *sk_listener, 347 const struct dst_entry *dst) 348 { 349 struct inet_request_sock *ireq = inet_rsk(req); 350 const struct tcp_sock *tp = tcp_sk(sk_listener); 351 int full_space = tcp_full_space(sk_listener); 352 u32 window_clamp; 353 __u8 rcv_wscale; 354 u32 rcv_wnd; 355 int mss; 356 357 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 358 window_clamp = READ_ONCE(tp->window_clamp); 359 /* Set this up on the first call only */ 360 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); 361 362 /* limit the window selection if the user enforce a smaller rx buffer */ 363 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && 364 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) 365 req->rsk_window_clamp = full_space; 366 367 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); 368 if (rcv_wnd == 0) 369 rcv_wnd = dst_metric(dst, RTAX_INITRWND); 370 else if (full_space < rcv_wnd * mss) 371 full_space = rcv_wnd * mss; 372 373 /* tcp_full_space because it is guaranteed to be the first packet */ 374 tcp_select_initial_window(full_space, 375 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 376 &req->rsk_rcv_wnd, 377 &req->rsk_window_clamp, 378 ireq->wscale_ok, 379 &rcv_wscale, 380 rcv_wnd); 381 ireq->rcv_wscale = rcv_wscale; 382 } 383 EXPORT_SYMBOL(tcp_openreq_init_rwin); 384 385 static void tcp_ecn_openreq_child(struct tcp_sock *tp, 386 const struct request_sock *req) 387 { 388 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; 389 } 390 391 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) 392 { 393 struct inet_connection_sock *icsk = inet_csk(sk); 394 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 395 bool ca_got_dst = false; 396 397 if (ca_key != TCP_CA_UNSPEC) { 398 const struct tcp_congestion_ops *ca; 399 400 rcu_read_lock(); 401 ca = tcp_ca_find_key(ca_key); 402 if (likely(ca && try_module_get(ca->owner))) { 403 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 404 icsk->icsk_ca_ops = ca; 405 ca_got_dst = true; 406 } 407 rcu_read_unlock(); 408 } 409 410 /* If no valid choice made yet, assign current system default ca. */ 411 if (!ca_got_dst && 412 (!icsk->icsk_ca_setsockopt || 413 !try_module_get(icsk->icsk_ca_ops->owner))) 414 tcp_assign_congestion_control(sk); 415 416 tcp_set_ca_state(sk, TCP_CA_Open); 417 } 418 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); 419 420 /* This is not only more efficient than what we used to do, it eliminates 421 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 422 * 423 * Actually, we could lots of memory writes here. tp of listening 424 * socket contains all necessary default parameters. 425 */ 426 struct sock *tcp_create_openreq_child(const struct sock *sk, 427 struct request_sock *req, 428 struct sk_buff *skb) 429 { 430 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 431 432 if (newsk) { 433 const struct inet_request_sock *ireq = inet_rsk(req); 434 struct tcp_request_sock *treq = tcp_rsk(req); 435 struct inet_connection_sock *newicsk = inet_csk(newsk); 436 struct tcp_sock *newtp = tcp_sk(newsk); 437 438 /* Now setup tcp_sock */ 439 newtp->pred_flags = 0; 440 441 newtp->rcv_wup = newtp->copied_seq = 442 newtp->rcv_nxt = treq->rcv_isn + 1; 443 newtp->segs_in = 1; 444 445 newtp->snd_sml = newtp->snd_una = 446 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; 447 448 tcp_prequeue_init(newtp); 449 INIT_LIST_HEAD(&newtp->tsq_node); 450 451 tcp_init_wl(newtp, treq->rcv_isn); 452 453 newtp->srtt_us = 0; 454 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 455 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); 456 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 457 newicsk->icsk_ack.lrcvtime = tcp_jiffies32; 458 459 newtp->packets_out = 0; 460 newtp->retrans_out = 0; 461 newtp->sacked_out = 0; 462 newtp->fackets_out = 0; 463 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 464 newtp->tlp_high_seq = 0; 465 newtp->lsndtime = tcp_jiffies32; 466 newsk->sk_txhash = treq->txhash; 467 newtp->last_oow_ack_time = 0; 468 newtp->total_retrans = req->num_retrans; 469 470 /* So many TCP implementations out there (incorrectly) count the 471 * initial SYN frame in their delayed-ACK and congestion control 472 * algorithms that we must have the following bandaid to talk 473 * efficiently to them. -DaveM 474 */ 475 newtp->snd_cwnd = TCP_INIT_CWND; 476 newtp->snd_cwnd_cnt = 0; 477 478 /* There's a bubble in the pipe until at least the first ACK. */ 479 newtp->app_limited = ~0U; 480 481 tcp_init_xmit_timers(newsk); 482 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; 483 484 newtp->rx_opt.saw_tstamp = 0; 485 486 newtp->rx_opt.dsack = 0; 487 newtp->rx_opt.num_sacks = 0; 488 489 newtp->urg_data = 0; 490 491 if (sock_flag(newsk, SOCK_KEEPOPEN)) 492 inet_csk_reset_keepalive_timer(newsk, 493 keepalive_time_when(newtp)); 494 495 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 496 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { 497 if (sysctl_tcp_fack) 498 tcp_enable_fack(newtp); 499 } 500 newtp->window_clamp = req->rsk_window_clamp; 501 newtp->rcv_ssthresh = req->rsk_rcv_wnd; 502 newtp->rcv_wnd = req->rsk_rcv_wnd; 503 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 504 if (newtp->rx_opt.wscale_ok) { 505 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 506 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 507 } else { 508 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 509 newtp->window_clamp = min(newtp->window_clamp, 65535U); 510 } 511 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << 512 newtp->rx_opt.snd_wscale); 513 newtp->max_window = newtp->snd_wnd; 514 515 if (newtp->rx_opt.tstamp_ok) { 516 newtp->rx_opt.ts_recent = req->ts_recent; 517 newtp->rx_opt.ts_recent_stamp = get_seconds(); 518 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 519 } else { 520 newtp->rx_opt.ts_recent_stamp = 0; 521 newtp->tcp_header_len = sizeof(struct tcphdr); 522 } 523 newtp->tsoffset = treq->ts_off; 524 #ifdef CONFIG_TCP_MD5SIG 525 newtp->md5sig_info = NULL; /*XXX*/ 526 if (newtp->af_specific->md5_lookup(sk, newsk)) 527 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 528 #endif 529 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 530 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 531 newtp->rx_opt.mss_clamp = req->mss; 532 tcp_ecn_openreq_child(newtp, req); 533 newtp->fastopen_req = NULL; 534 newtp->fastopen_rsk = NULL; 535 newtp->syn_data_acked = 0; 536 newtp->rack.mstamp = 0; 537 newtp->rack.advanced = 0; 538 539 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 540 } 541 return newsk; 542 } 543 EXPORT_SYMBOL(tcp_create_openreq_child); 544 545 /* 546 * Process an incoming packet for SYN_RECV sockets represented as a 547 * request_sock. Normally sk is the listener socket but for TFO it 548 * points to the child socket. 549 * 550 * XXX (TFO) - The current impl contains a special check for ack 551 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? 552 * 553 * We don't need to initialize tmp_opt.sack_ok as we don't use the results 554 */ 555 556 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 557 struct request_sock *req, 558 bool fastopen) 559 { 560 struct tcp_options_received tmp_opt; 561 struct sock *child; 562 const struct tcphdr *th = tcp_hdr(skb); 563 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 564 bool paws_reject = false; 565 bool own_req; 566 567 tmp_opt.saw_tstamp = 0; 568 if (th->doff > (sizeof(struct tcphdr)>>2)) { 569 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); 570 571 if (tmp_opt.saw_tstamp) { 572 tmp_opt.ts_recent = req->ts_recent; 573 if (tmp_opt.rcv_tsecr) 574 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; 575 /* We do not store true stamp, but it is not required, 576 * it can be estimated (approximately) 577 * from another data. 578 */ 579 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); 580 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 581 } 582 } 583 584 /* Check for pure retransmitted SYN. */ 585 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 586 flg == TCP_FLAG_SYN && 587 !paws_reject) { 588 /* 589 * RFC793 draws (Incorrectly! It was fixed in RFC1122) 590 * this case on figure 6 and figure 8, but formal 591 * protocol description says NOTHING. 592 * To be more exact, it says that we should send ACK, 593 * because this segment (at least, if it has no data) 594 * is out of window. 595 * 596 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT 597 * describe SYN-RECV state. All the description 598 * is wrong, we cannot believe to it and should 599 * rely only on common sense and implementation 600 * experience. 601 * 602 * Enforce "SYN-ACK" according to figure 8, figure 6 603 * of RFC793, fixed by RFC1122. 604 * 605 * Note that even if there is new data in the SYN packet 606 * they will be thrown away too. 607 * 608 * Reset timer after retransmitting SYNACK, similar to 609 * the idea of fast retransmit in recovery. 610 */ 611 if (!tcp_oow_rate_limited(sock_net(sk), skb, 612 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 613 &tcp_rsk(req)->last_oow_ack_time) && 614 615 !inet_rtx_syn_ack(sk, req)) { 616 unsigned long expires = jiffies; 617 618 expires += min(TCP_TIMEOUT_INIT << req->num_timeout, 619 TCP_RTO_MAX); 620 if (!fastopen) 621 mod_timer_pending(&req->rsk_timer, expires); 622 else 623 req->rsk_timer.expires = expires; 624 } 625 return NULL; 626 } 627 628 /* Further reproduces section "SEGMENT ARRIVES" 629 for state SYN-RECEIVED of RFC793. 630 It is broken, however, it does not work only 631 when SYNs are crossed. 632 633 You would think that SYN crossing is impossible here, since 634 we should have a SYN_SENT socket (from connect()) on our end, 635 but this is not true if the crossed SYNs were sent to both 636 ends by a malicious third party. We must defend against this, 637 and to do that we first verify the ACK (as per RFC793, page 638 36) and reset if it is invalid. Is this a true full defense? 639 To convince ourselves, let us consider a way in which the ACK 640 test can still pass in this 'malicious crossed SYNs' case. 641 Malicious sender sends identical SYNs (and thus identical sequence 642 numbers) to both A and B: 643 644 A: gets SYN, seq=7 645 B: gets SYN, seq=7 646 647 By our good fortune, both A and B select the same initial 648 send sequence number of seven :-) 649 650 A: sends SYN|ACK, seq=7, ack_seq=8 651 B: sends SYN|ACK, seq=7, ack_seq=8 652 653 So we are now A eating this SYN|ACK, ACK test passes. So 654 does sequence test, SYN is truncated, and thus we consider 655 it a bare ACK. 656 657 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 658 bare ACK. Otherwise, we create an established connection. Both 659 ends (listening sockets) accept the new incoming connection and try 660 to talk to each other. 8-) 661 662 Note: This case is both harmless, and rare. Possibility is about the 663 same as us discovering intelligent life on another plant tomorrow. 664 665 But generally, we should (RFC lies!) to accept ACK 666 from SYNACK both here and in tcp_rcv_state_process(). 667 tcp_rcv_state_process() does not, hence, we do not too. 668 669 Note that the case is absolutely generic: 670 we cannot optimize anything here without 671 violating protocol. All the checks must be made 672 before attempt to create socket. 673 */ 674 675 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 676 * and the incoming segment acknowledges something not yet 677 * sent (the segment carries an unacceptable ACK) ... 678 * a reset is sent." 679 * 680 * Invalid ACK: reset will be sent by listening socket. 681 * Note that the ACK validity check for a Fast Open socket is done 682 * elsewhere and is checked directly against the child socket rather 683 * than req because user data may have been sent out. 684 */ 685 if ((flg & TCP_FLAG_ACK) && !fastopen && 686 (TCP_SKB_CB(skb)->ack_seq != 687 tcp_rsk(req)->snt_isn + 1)) 688 return sk; 689 690 /* Also, it would be not so bad idea to check rcv_tsecr, which 691 * is essentially ACK extension and too early or too late values 692 * should cause reset in unsynchronized states. 693 */ 694 695 /* RFC793: "first check sequence number". */ 696 697 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 698 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { 699 /* Out of window: send ACK and drop. */ 700 if (!(flg & TCP_FLAG_RST) && 701 !tcp_oow_rate_limited(sock_net(sk), skb, 702 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 703 &tcp_rsk(req)->last_oow_ack_time)) 704 req->rsk_ops->send_ack(sk, skb, req); 705 if (paws_reject) 706 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 707 return NULL; 708 } 709 710 /* In sequence, PAWS is OK. */ 711 712 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) 713 req->ts_recent = tmp_opt.rcv_tsval; 714 715 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 716 /* Truncate SYN, it is out of window starting 717 at tcp_rsk(req)->rcv_isn + 1. */ 718 flg &= ~TCP_FLAG_SYN; 719 } 720 721 /* RFC793: "second check the RST bit" and 722 * "fourth, check the SYN bit" 723 */ 724 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 725 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 726 goto embryonic_reset; 727 } 728 729 /* ACK sequence verified above, just make sure ACK is 730 * set. If ACK not set, just silently drop the packet. 731 * 732 * XXX (TFO) - if we ever allow "data after SYN", the 733 * following check needs to be removed. 734 */ 735 if (!(flg & TCP_FLAG_ACK)) 736 return NULL; 737 738 /* For Fast Open no more processing is needed (sk is the 739 * child socket). 740 */ 741 if (fastopen) 742 return sk; 743 744 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 745 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 746 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 747 inet_rsk(req)->acked = 1; 748 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 749 return NULL; 750 } 751 752 /* OK, ACK is valid, create big socket and 753 * feed this segment to it. It will repeat all 754 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 755 * ESTABLISHED STATE. If it will be dropped after 756 * socket is created, wait for troubles. 757 */ 758 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 759 req, &own_req); 760 if (!child) 761 goto listen_overflow; 762 763 sock_rps_save_rxhash(child, skb); 764 tcp_synack_rtt_meas(child, req); 765 return inet_csk_complete_hashdance(sk, child, req, own_req); 766 767 listen_overflow: 768 if (!sysctl_tcp_abort_on_overflow) { 769 inet_rsk(req)->acked = 1; 770 return NULL; 771 } 772 773 embryonic_reset: 774 if (!(flg & TCP_FLAG_RST)) { 775 /* Received a bad SYN pkt - for TFO We try not to reset 776 * the local connection unless it's really necessary to 777 * avoid becoming vulnerable to outside attack aiming at 778 * resetting legit local connections. 779 */ 780 req->rsk_ops->send_reset(sk, skb); 781 } else if (fastopen) { /* received a valid RST pkt */ 782 reqsk_fastopen_remove(sk, req, true); 783 tcp_reset(sk); 784 } 785 if (!fastopen) { 786 inet_csk_reqsk_queue_drop(sk, req); 787 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 788 } 789 return NULL; 790 } 791 EXPORT_SYMBOL(tcp_check_req); 792 793 /* 794 * Queue segment on the new socket if the new socket is active, 795 * otherwise we just shortcircuit this and continue with 796 * the new socket. 797 * 798 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV 799 * when entering. But other states are possible due to a race condition 800 * where after __inet_lookup_established() fails but before the listener 801 * locked is obtained, other packets cause the same connection to 802 * be created. 803 */ 804 805 int tcp_child_process(struct sock *parent, struct sock *child, 806 struct sk_buff *skb) 807 { 808 int ret = 0; 809 int state = child->sk_state; 810 811 /* record NAPI ID of child */ 812 sk_mark_napi_id(child, skb); 813 814 tcp_segs_in(tcp_sk(child), skb); 815 if (!sock_owned_by_user(child)) { 816 ret = tcp_rcv_state_process(child, skb); 817 /* Wakeup parent, send SIGIO */ 818 if (state == TCP_SYN_RECV && child->sk_state != state) 819 parent->sk_data_ready(parent); 820 } else { 821 /* Alas, it is possible again, because we do lookup 822 * in main socket hash table and lock on listening 823 * socket does not protect us more. 824 */ 825 __sk_add_backlog(child, skb); 826 } 827 828 bh_unlock_sock(child); 829 sock_put(child); 830 return ret; 831 } 832 EXPORT_SYMBOL(tcp_child_process); 833