tcp_minisocks.c (7731b8bc94e599c9a79e428f3359ff2c34b7576a) | tcp_minisocks.c (242b1bbe5144de3577ad12da058e70ef88167146) |
---|---|
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro --- 435 unchanged lines hidden (view full) --- 444 * Actually, we could lots of memory writes here. tp of listening 445 * socket contains all necessary default parameters. 446 */ 447struct sock *tcp_create_openreq_child(const struct sock *sk, 448 struct request_sock *req, 449 struct sk_buff *skb) 450{ 451 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); | 1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro --- 435 unchanged lines hidden (view full) --- 444 * Actually, we could lots of memory writes here. tp of listening 445 * socket contains all necessary default parameters. 446 */ 447struct sock *tcp_create_openreq_child(const struct sock *sk, 448 struct request_sock *req, 449 struct sk_buff *skb) 450{ 451 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); |
452 const struct inet_request_sock *ireq = inet_rsk(req); 453 struct tcp_request_sock *treq = tcp_rsk(req); 454 struct inet_connection_sock *newicsk; 455 struct tcp_sock *oldtp, *newtp; |
|
452 | 456 |
453 if (newsk) { 454 const struct inet_request_sock *ireq = inet_rsk(req); 455 struct tcp_request_sock *treq = tcp_rsk(req); 456 struct inet_connection_sock *newicsk = inet_csk(newsk); 457 struct tcp_sock *newtp = tcp_sk(newsk); 458 struct tcp_sock *oldtp = tcp_sk(sk); | 457 if (!newsk) 458 return NULL; |
459 | 459 |
460 smc_check_reset_syn_req(oldtp, req, newtp); | 460 newicsk = inet_csk(newsk); 461 newtp = tcp_sk(newsk); 462 oldtp = tcp_sk(sk); |
461 | 463 |
462 /* Now setup tcp_sock */ 463 newtp->pred_flags = 0; | 464 smc_check_reset_syn_req(oldtp, req, newtp); |
464 | 465 |
465 newtp->rcv_wup = newtp->copied_seq = 466 newtp->rcv_nxt = treq->rcv_isn + 1; 467 newtp->segs_in = 1; | 466 /* Now setup tcp_sock */ 467 newtp->pred_flags = 0; |
468 | 468 |
469 newtp->snd_sml = newtp->snd_una = 470 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; | 469 newtp->rcv_wup = newtp->copied_seq = 470 newtp->rcv_nxt = treq->rcv_isn + 1; 471 newtp->segs_in = 1; |
471 | 472 |
472 INIT_LIST_HEAD(&newtp->tsq_node); 473 INIT_LIST_HEAD(&newtp->tsorted_sent_queue); | 473 newtp->snd_sml = newtp->snd_una = 474 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; |
474 | 475 |
475 tcp_init_wl(newtp, treq->rcv_isn); | 476 INIT_LIST_HEAD(&newtp->tsq_node); 477 INIT_LIST_HEAD(&newtp->tsorted_sent_queue); |
476 | 478 |
477 newtp->srtt_us = 0; 478 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 479 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); 480 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 481 newicsk->icsk_ack.lrcvtime = tcp_jiffies32; | 479 tcp_init_wl(newtp, treq->rcv_isn); |
482 | 480 |
483 newtp->packets_out = 0; 484 newtp->retrans_out = 0; 485 newtp->sacked_out = 0; 486 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 487 newtp->tlp_high_seq = 0; 488 newtp->lsndtime = tcp_jiffies32; 489 newsk->sk_txhash = treq->txhash; 490 newtp->last_oow_ack_time = 0; 491 newtp->total_retrans = req->num_retrans; | 481 newtp->srtt_us = 0; 482 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 483 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); 484 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 485 newicsk->icsk_ack.lrcvtime = tcp_jiffies32; |
492 | 486 |
493 /* So many TCP implementations out there (incorrectly) count the 494 * initial SYN frame in their delayed-ACK and congestion control 495 * algorithms that we must have the following bandaid to talk 496 * efficiently to them. -DaveM 497 */ 498 newtp->snd_cwnd = TCP_INIT_CWND; 499 newtp->snd_cwnd_cnt = 0; | 487 newtp->packets_out = 0; 488 newtp->retrans_out = 0; 489 newtp->sacked_out = 0; 490 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 491 newtp->tlp_high_seq = 0; 492 newtp->lsndtime = tcp_jiffies32; 493 newsk->sk_txhash = treq->txhash; 494 newtp->last_oow_ack_time = 0; 495 newtp->total_retrans = req->num_retrans; |
500 | 496 |
501 /* There's a bubble in the pipe until at least the first ACK. */ 502 newtp->app_limited = ~0U; | 497 /* So many TCP implementations out there (incorrectly) count the 498 * initial SYN frame in their delayed-ACK and congestion control 499 * algorithms that we must have the following bandaid to talk 500 * efficiently to them. -DaveM 501 */ 502 newtp->snd_cwnd = TCP_INIT_CWND; 503 newtp->snd_cwnd_cnt = 0; |
503 | 504 |
504 tcp_init_xmit_timers(newsk); 505 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; | 505 /* There's a bubble in the pipe until at least the first ACK. */ 506 newtp->app_limited = ~0U; |
506 | 507 |
507 newtp->rx_opt.saw_tstamp = 0; | 508 tcp_init_xmit_timers(newsk); 509 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; |
508 | 510 |
509 newtp->rx_opt.dsack = 0; 510 newtp->rx_opt.num_sacks = 0; | 511 newtp->rx_opt.saw_tstamp = 0; |
511 | 512 |
512 newtp->urg_data = 0; | 513 newtp->rx_opt.dsack = 0; 514 newtp->rx_opt.num_sacks = 0; |
513 | 515 |
514 if (sock_flag(newsk, SOCK_KEEPOPEN)) 515 inet_csk_reset_keepalive_timer(newsk, 516 keepalive_time_when(newtp)); | 516 newtp->urg_data = 0; |
517 | 517 |
518 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 519 newtp->rx_opt.sack_ok = ireq->sack_ok; 520 newtp->window_clamp = req->rsk_window_clamp; 521 newtp->rcv_ssthresh = req->rsk_rcv_wnd; 522 newtp->rcv_wnd = req->rsk_rcv_wnd; 523 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 524 if (newtp->rx_opt.wscale_ok) { 525 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 526 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 527 } else { 528 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 529 newtp->window_clamp = min(newtp->window_clamp, 65535U); 530 } 531 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << 532 newtp->rx_opt.snd_wscale); 533 newtp->max_window = newtp->snd_wnd; | 518 if (sock_flag(newsk, SOCK_KEEPOPEN)) 519 inet_csk_reset_keepalive_timer(newsk, 520 keepalive_time_when(newtp)); |
534 | 521 |
535 if (newtp->rx_opt.tstamp_ok) { 536 newtp->rx_opt.ts_recent = req->ts_recent; 537 newtp->rx_opt.ts_recent_stamp = get_seconds(); 538 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 539 } else { 540 newtp->rx_opt.ts_recent_stamp = 0; 541 newtp->tcp_header_len = sizeof(struct tcphdr); 542 } 543 newtp->tsoffset = treq->ts_off; | 522 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 523 newtp->rx_opt.sack_ok = ireq->sack_ok; 524 newtp->window_clamp = req->rsk_window_clamp; 525 newtp->rcv_ssthresh = req->rsk_rcv_wnd; 526 newtp->rcv_wnd = req->rsk_rcv_wnd; 527 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 528 if (newtp->rx_opt.wscale_ok) { 529 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 530 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 531 } else { 532 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 533 newtp->window_clamp = min(newtp->window_clamp, 65535U); 534 } 535 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; 536 newtp->max_window = newtp->snd_wnd; 537 538 if (newtp->rx_opt.tstamp_ok) { 539 newtp->rx_opt.ts_recent = req->ts_recent; 540 newtp->rx_opt.ts_recent_stamp = get_seconds(); 541 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 542 } else { 543 newtp->rx_opt.ts_recent_stamp = 0; 544 newtp->tcp_header_len = sizeof(struct tcphdr); 545 } 546 newtp->tsoffset = treq->ts_off; |
544#ifdef CONFIG_TCP_MD5SIG | 547#ifdef CONFIG_TCP_MD5SIG |
545 newtp->md5sig_info = NULL; /*XXX*/ 546 if (newtp->af_specific->md5_lookup(sk, newsk)) 547 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | 548 newtp->md5sig_info = NULL; /*XXX*/ 549 if (newtp->af_specific->md5_lookup(sk, newsk)) 550 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; |
548#endif | 551#endif |
549 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 550 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 551 newtp->rx_opt.mss_clamp = req->mss; 552 tcp_ecn_openreq_child(newtp, req); 553 newtp->fastopen_req = NULL; 554 newtp->fastopen_rsk = NULL; 555 newtp->syn_data_acked = 0; 556 newtp->rack.mstamp = 0; 557 newtp->rack.advanced = 0; 558 newtp->rack.reo_wnd_steps = 1; 559 newtp->rack.last_delivered = 0; 560 newtp->rack.reo_wnd_persist = 0; 561 newtp->rack.dsack_seen = 0; | 552 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 553 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 554 newtp->rx_opt.mss_clamp = req->mss; 555 tcp_ecn_openreq_child(newtp, req); 556 newtp->fastopen_req = NULL; 557 newtp->fastopen_rsk = NULL; 558 newtp->syn_data_acked = 0; 559 newtp->rack.mstamp = 0; 560 newtp->rack.advanced = 0; 561 newtp->rack.reo_wnd_steps = 1; 562 newtp->rack.last_delivered = 0; 563 newtp->rack.reo_wnd_persist = 0; 564 newtp->rack.dsack_seen = 0; |
562 | 565 |
563 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 564 } | 566 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 567 |
565 return newsk; 566} 567EXPORT_SYMBOL(tcp_create_openreq_child); 568 569/* 570 * Process an incoming packet for SYN_RECV sockets represented as a 571 * request_sock. Normally sk is the listener socket but for TFO it 572 * points to the child socket. --- 285 unchanged lines hidden --- | 568 return newsk; 569} 570EXPORT_SYMBOL(tcp_create_openreq_child); 571 572/* 573 * Process an incoming packet for SYN_RECV sockets represented as a 574 * request_sock. Normally sk is the listener socket but for TFO it 575 * points to the child socket. --- 285 unchanged lines hidden --- |