tcp_output.c (af9cc93c0dee5fc1f9fa32cd9d79a456738a21be) tcp_output.c (90bbcc608369a1b46089b0f5aa22b8ea31ffa12e)
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro

--- 1109 unchanged lines hidden (view full) ---

1118 if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) &&
1119 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1120 struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1121 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1122
1123 shinfo->tx_flags &= ~tsflags;
1124 shinfo2->tx_flags |= tsflags;
1125 swap(shinfo->tskey, shinfo2->tskey);
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro

--- 1109 unchanged lines hidden (view full) ---

1118 if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) &&
1119 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1120 struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1121 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1122
1123 shinfo->tx_flags &= ~tsflags;
1124 shinfo2->tx_flags |= tsflags;
1125 swap(shinfo->tskey, shinfo2->tskey);
1126 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1127 TCP_SKB_CB(skb)->txstamp_ack = 0;
1126 }
1127}
1128
1129/* Function to create two new TCP segments. Shrinks the given segment
1130 * to the specified size and appends a new segment with the rest of the
1131 * packet to the list. This won't be called frequently, I hope.
1132 * Remember, these are still headerless SKBs at this point.
1133 */

--- 1127 unchanged lines hidden (view full) ---

2261 GFP_ATOMIC)))
2262 goto rearm_timer;
2263 skb = tcp_write_queue_next(sk, skb);
2264 }
2265
2266 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2267 goto rearm_timer;
2268
1128 }
1129}
1130
1131/* Function to create two new TCP segments. Shrinks the given segment
1132 * to the specified size and appends a new segment with the rest of the
1133 * packet to the list. This won't be called frequently, I hope.
1134 * Remember, these are still headerless SKBs at this point.
1135 */

--- 1127 unchanged lines hidden (view full) ---

2263 GFP_ATOMIC)))
2264 goto rearm_timer;
2265 skb = tcp_write_queue_next(sk, skb);
2266 }
2267
2268 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2269 goto rearm_timer;
2270
2269 if (__tcp_retransmit_skb(sk, skb))
2271 if (__tcp_retransmit_skb(sk, skb, 1))
2270 goto rearm_timer;
2271
2272 /* Record snd_nxt for loss detection. */
2273 tp->tlp_high_seq = tp->snd_nxt;
2274
2275probe_sent:
2276 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2277 /* Reset s.t. tcp_rearm_rto will restart timer from now */

--- 169 unchanged lines hidden (view full) ---

2447 const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
2448 u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2449
2450 if (unlikely(tsflags)) {
2451 struct skb_shared_info *shinfo = skb_shinfo(skb);
2452
2453 shinfo->tx_flags |= tsflags;
2454 shinfo->tskey = next_shinfo->tskey;
2272 goto rearm_timer;
2273
2274 /* Record snd_nxt for loss detection. */
2275 tp->tlp_high_seq = tp->snd_nxt;
2276
2277probe_sent:
2278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2279 /* Reset s.t. tcp_rearm_rto will restart timer from now */

--- 169 unchanged lines hidden (view full) ---

2449 const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
2450 u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2451
2452 if (unlikely(tsflags)) {
2453 struct skb_shared_info *shinfo = skb_shinfo(skb);
2454
2455 shinfo->tx_flags |= tsflags;
2456 shinfo->tskey = next_shinfo->tskey;
2457 TCP_SKB_CB(skb)->txstamp_ack |=
2458 TCP_SKB_CB(next_skb)->txstamp_ack;
2455 }
2456}
2457
2458/* Collapses two adjacent SKB's during retransmission. */
2459static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2460{
2461 struct tcp_sock *tp = tcp_sk(sk);
2462 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);

--- 99 unchanged lines hidden (view full) ---

2562 tcp_collapse_retrans(sk, to);
2563 }
2564}
2565
2566/* This retransmits one SKB. Policy decisions and retransmit queue
2567 * state updates are done by the caller. Returns non-zero if an
2568 * error occurred which prevented the send.
2569 */
2459 }
2460}
2461
2462/* Collapses two adjacent SKB's during retransmission. */
2463static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2464{
2465 struct tcp_sock *tp = tcp_sk(sk);
2466 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);

--- 99 unchanged lines hidden (view full) ---

2566 tcp_collapse_retrans(sk, to);
2567 }
2568}
2569
2570/* This retransmits one SKB. Policy decisions and retransmit queue
2571 * state updates are done by the caller. Returns non-zero if an
2572 * error occurred which prevented the send.
2573 */
2570int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2574int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2571{
2575{
2572 struct tcp_sock *tp = tcp_sk(sk);
2573 struct inet_connection_sock *icsk = inet_csk(sk);
2576 struct inet_connection_sock *icsk = inet_csk(sk);
2577 struct tcp_sock *tp = tcp_sk(sk);
2574 unsigned int cur_mss;
2578 unsigned int cur_mss;
2575 int err;
2579 int diff, len, err;
2576
2580
2577 /* Inconslusive MTU probe */
2578 if (icsk->icsk_mtup.probe_size) {
2581
2582 /* Inconclusive MTU probe */
2583 if (icsk->icsk_mtup.probe_size)
2579 icsk->icsk_mtup.probe_size = 0;
2584 icsk->icsk_mtup.probe_size = 0;
2580 }
2581
2582 /* Do not sent more than we queued. 1/4 is reserved for possible
2583 * copying overhead: fragmentation, tunneling, mangling etc.
2584 */
2585 if (atomic_read(&sk->sk_wmem_alloc) >
2586 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2587 return -EAGAIN;
2588

--- 16 unchanged lines hidden (view full) ---

2605 * new window, do not retransmit it. The exception is the
2606 * case, when window is shrunk to zero. In this case
2607 * our retransmit serves as a zero window probe.
2608 */
2609 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2610 TCP_SKB_CB(skb)->seq != tp->snd_una)
2611 return -EAGAIN;
2612
2585
2586 /* Do not sent more than we queued. 1/4 is reserved for possible
2587 * copying overhead: fragmentation, tunneling, mangling etc.
2588 */
2589 if (atomic_read(&sk->sk_wmem_alloc) >
2590 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2591 return -EAGAIN;
2592

--- 16 unchanged lines hidden (view full) ---

2609 * new window, do not retransmit it. The exception is the
2610 * case, when window is shrunk to zero. In this case
2611 * our retransmit serves as a zero window probe.
2612 */
2613 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2614 TCP_SKB_CB(skb)->seq != tp->snd_una)
2615 return -EAGAIN;
2616
2613 if (skb->len > cur_mss) {
2614 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
2617 len = cur_mss * segs;
2618 if (skb->len > len) {
2619 if (tcp_fragment(sk, skb, len, cur_mss, GFP_ATOMIC))
2615 return -ENOMEM; /* We'll try again later. */
2616 } else {
2620 return -ENOMEM; /* We'll try again later. */
2621 } else {
2617 int oldpcount = tcp_skb_pcount(skb);
2622 if (skb_unclone(skb, GFP_ATOMIC))
2623 return -ENOMEM;
2618
2624
2619 if (unlikely(oldpcount > 1)) {
2620 if (skb_unclone(skb, GFP_ATOMIC))
2621 return -ENOMEM;
2622 tcp_init_tso_segs(skb, cur_mss);
2623 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2624 }
2625 diff = tcp_skb_pcount(skb);
2626 tcp_set_skb_tso_segs(skb, cur_mss);
2627 diff -= tcp_skb_pcount(skb);
2628 if (diff)
2629 tcp_adjust_pcount(sk, skb, diff);
2630 if (skb->len < cur_mss)
2631 tcp_retrans_try_collapse(sk, skb, cur_mss);
2625 }
2626
2627 /* RFC3168, section 6.1.1.1. ECN fallback */
2628 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
2629 tcp_ecn_clear_syn(sk, skb);
2630
2632 }
2633
2634 /* RFC3168, section 6.1.1.1. ECN fallback */
2635 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
2636 tcp_ecn_clear_syn(sk, skb);
2637
2631 tcp_retrans_try_collapse(sk, skb, cur_mss);
2632
2633 /* Make a copy, if the first transmission SKB clone we made
2634 * is still in somebody's hands, else make a clone.
2635 */
2636
2637 /* make sure skb->data is aligned on arches that require it
2638 * and check if ack-trimming & collapsing extended the headroom
2639 * beyond what csum_start can cover.
2640 */
2641 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2642 skb_headroom(skb) >= 0xFFFF)) {
2643 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2644 GFP_ATOMIC);
2645 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2646 -ENOBUFS;
2647 } else {
2648 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2649 }
2650
2651 if (likely(!err)) {
2638 /* make sure skb->data is aligned on arches that require it
2639 * and check if ack-trimming & collapsing extended the headroom
2640 * beyond what csum_start can cover.
2641 */
2642 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2643 skb_headroom(skb) >= 0xFFFF)) {
2644 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2645 GFP_ATOMIC);
2646 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2647 -ENOBUFS;
2648 } else {
2649 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2650 }
2651
2652 if (likely(!err)) {
2653 segs = tcp_skb_pcount(skb);
2654
2652 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2653 /* Update global TCP statistics. */
2655 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2656 /* Update global TCP statistics. */
2654 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
2657 TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2655 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2656 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2658 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2659 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2657 tp->total_retrans++;
2660 tp->total_retrans += segs;
2658 }
2659 return err;
2660}
2661
2661 }
2662 return err;
2663}
2664
2662int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2665int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2663{
2664 struct tcp_sock *tp = tcp_sk(sk);
2666{
2667 struct tcp_sock *tp = tcp_sk(sk);
2665 int err = __tcp_retransmit_skb(sk, skb);
2668 int err = __tcp_retransmit_skb(sk, skb, segs);
2666
2667 if (err == 0) {
2668#if FASTRETRANS_DEBUG > 0
2669 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2670 net_dbg_ratelimited("retrans_out leaked\n");
2671 }
2672#endif
2673 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;

--- 74 unchanged lines hidden (view full) ---

2748 last_lost = tp->retransmit_high;
2749 } else {
2750 skb = tcp_write_queue_head(sk);
2751 last_lost = tp->snd_una;
2752 }
2753
2754 tcp_for_write_queue_from(skb, sk) {
2755 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2669
2670 if (err == 0) {
2671#if FASTRETRANS_DEBUG > 0
2672 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2673 net_dbg_ratelimited("retrans_out leaked\n");
2674 }
2675#endif
2676 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;

--- 74 unchanged lines hidden (view full) ---

2751 last_lost = tp->retransmit_high;
2752 } else {
2753 skb = tcp_write_queue_head(sk);
2754 last_lost = tp->snd_una;
2755 }
2756
2757 tcp_for_write_queue_from(skb, sk) {
2758 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2759 int segs;
2756
2757 if (skb == tcp_send_head(sk))
2758 break;
2759 /* we could do better than to assign each time */
2760 if (!hole)
2761 tp->retransmit_skb_hint = skb;
2762
2760
2761 if (skb == tcp_send_head(sk))
2762 break;
2763 /* we could do better than to assign each time */
2764 if (!hole)
2765 tp->retransmit_skb_hint = skb;
2766
2763 /* Assume this retransmit will generate
2764 * only one packet for congestion window
2765 * calculation purposes. This works because
2766 * tcp_retransmit_skb() will chop up the
2767 * packet to be MSS sized and all the
2768 * packet counting works out.
2769 */
2770 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2767 segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
2768 if (segs <= 0)
2771 return;
2772
2773 if (fwd_rexmitting) {
2774begin_fwd:
2775 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2776 break;
2777 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2778

--- 20 unchanged lines hidden (view full) ---

2799 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2800 else
2801 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2802 }
2803
2804 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2805 continue;
2806
2769 return;
2770
2771 if (fwd_rexmitting) {
2772begin_fwd:
2773 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2774 break;
2775 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2776

--- 20 unchanged lines hidden (view full) ---

2797 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2798 else
2799 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2800 }
2801
2802 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2803 continue;
2804
2807 if (tcp_retransmit_skb(sk, skb))
2805 if (tcp_retransmit_skb(sk, skb, segs))
2808 return;
2809
2810 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2811
2812 if (tcp_in_cwnd_reduction(sk))
2813 tp->prr_out += tcp_skb_pcount(skb);
2814
2815 if (skb == tcp_write_queue_head(sk))

--- 139 unchanged lines hidden (view full) ---

2955 * req: request_sock pointer
2956 *
2957 * Allocate one skb and build a SYNACK packet.
2958 * @dst is consumed : Caller should not use it again.
2959 */
2960struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
2961 struct request_sock *req,
2962 struct tcp_fastopen_cookie *foc,
2806 return;
2807
2808 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2809
2810 if (tcp_in_cwnd_reduction(sk))
2811 tp->prr_out += tcp_skb_pcount(skb);
2812
2813 if (skb == tcp_write_queue_head(sk))

--- 139 unchanged lines hidden (view full) ---

2953 * req: request_sock pointer
2954 *
2955 * Allocate one skb and build a SYNACK packet.
2956 * @dst is consumed : Caller should not use it again.
2957 */
2958struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
2959 struct request_sock *req,
2960 struct tcp_fastopen_cookie *foc,
2963 bool attach_req)
2961 enum tcp_synack_type synack_type)
2964{
2965 struct inet_request_sock *ireq = inet_rsk(req);
2966 const struct tcp_sock *tp = tcp_sk(sk);
2967 struct tcp_md5sig_key *md5 = NULL;
2968 struct tcp_out_options opts;
2969 struct sk_buff *skb;
2970 int tcp_header_size;
2971 struct tcphdr *th;
2972 u16 user_mss;
2973 int mss;
2974
2975 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2976 if (unlikely(!skb)) {
2977 dst_release(dst);
2978 return NULL;
2979 }
2980 /* Reserve space for headers. */
2981 skb_reserve(skb, MAX_TCP_HEADER);
2982
2962{
2963 struct inet_request_sock *ireq = inet_rsk(req);
2964 const struct tcp_sock *tp = tcp_sk(sk);
2965 struct tcp_md5sig_key *md5 = NULL;
2966 struct tcp_out_options opts;
2967 struct sk_buff *skb;
2968 int tcp_header_size;
2969 struct tcphdr *th;
2970 u16 user_mss;
2971 int mss;
2972
2973 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2974 if (unlikely(!skb)) {
2975 dst_release(dst);
2976 return NULL;
2977 }
2978 /* Reserve space for headers. */
2979 skb_reserve(skb, MAX_TCP_HEADER);
2980
2983 if (attach_req) {
2981 switch (synack_type) {
2982 case TCP_SYNACK_NORMAL:
2984 skb_set_owner_w(skb, req_to_sk(req));
2983 skb_set_owner_w(skb, req_to_sk(req));
2985 } else {
2984 break;
2985 case TCP_SYNACK_COOKIE:
2986 /* Under synflood, we do not attach skb to a socket,
2987 * to avoid false sharing.
2988 */
2989 break;
2990 case TCP_SYNACK_FASTOPEN:
2986 /* sk is a const pointer, because we want to express multiple
2987 * cpu might call us concurrently.
2988 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
2989 */
2990 skb_set_owner_w(skb, (struct sock *)sk);
2991 /* sk is a const pointer, because we want to express multiple
2992 * cpu might call us concurrently.
2993 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
2994 */
2995 skb_set_owner_w(skb, (struct sock *)sk);
2996 break;
2991 }
2992 skb_dst_set(skb, dst);
2993
2994 mss = dst_metric_advmss(dst);
2995 user_mss = READ_ONCE(tp->rx_opt.user_mss);
2996 if (user_mss && user_mss < mss)
2997 mss = user_mss;
2998

--- 32 unchanged lines hidden (view full) ---

3031 th->seq = htonl(TCP_SKB_CB(skb)->seq);
3032 /* XXX data is queued and acked as is. No buffer/window check */
3033 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
3034
3035 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3036 th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3037 tcp_options_write((__be32 *)(th + 1), NULL, &opts);
3038 th->doff = (tcp_header_size >> 2);
2997 }
2998 skb_dst_set(skb, dst);
2999
3000 mss = dst_metric_advmss(dst);
3001 user_mss = READ_ONCE(tp->rx_opt.user_mss);
3002 if (user_mss && user_mss < mss)
3003 mss = user_mss;
3004

--- 32 unchanged lines hidden (view full) ---

3037 th->seq = htonl(TCP_SKB_CB(skb)->seq);
3038 /* XXX data is queued and acked as is. No buffer/window check */
3039 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
3040
3041 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3042 th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3043 tcp_options_write((__be32 *)(th + 1), NULL, &opts);
3044 th->doff = (tcp_header_size >> 2);
3039 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
3045 __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3040
3041#ifdef CONFIG_TCP_MD5SIG
3042 /* Okay, we have all we need - do the md5 hash if needed */
3043 if (md5)
3044 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
3045 md5, req_to_sk(req), skb);
3046 rcu_read_unlock();
3047#endif

--- 479 unchanged lines hidden (view full) ---

3527
3528int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
3529{
3530 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
3531 struct flowi fl;
3532 int res;
3533
3534 tcp_rsk(req)->txhash = net_tx_rndhash();
3046
3047#ifdef CONFIG_TCP_MD5SIG
3048 /* Okay, we have all we need - do the md5 hash if needed */
3049 if (md5)
3050 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
3051 md5, req_to_sk(req), skb);
3052 rcu_read_unlock();
3053#endif

--- 479 unchanged lines hidden (view full) ---

3533
3534int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
3535{
3536 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
3537 struct flowi fl;
3538 int res;
3539
3540 tcp_rsk(req)->txhash = net_tx_rndhash();
3535 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true);
3541 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
3536 if (!res) {
3542 if (!res) {
3537 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
3543 __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
3538 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3539 }
3540 return res;
3541}
3542EXPORT_SYMBOL(tcp_rtx_synack);
3544 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3545 }
3546 return res;
3547}
3548EXPORT_SYMBOL(tcp_rtx_synack);