tcp_input.c (9b610fda0df5d0f0b0c64242e37441ad1b384aac) tcp_input.c (c1e20f7c8b9ccbafc9ea78f2b406738728ce6b81)
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>

--- 581 unchanged lines hidden (view full) ---

599}
600
601static u32 tcp_rto_min(struct sock *sk)
602{
603 struct dst_entry *dst = __sk_dst_get(sk);
604 u32 rto_min = TCP_RTO_MIN;
605
606 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>

--- 581 unchanged lines hidden (view full) ---

597}
598
599static u32 tcp_rto_min(struct sock *sk)
600{
601 struct dst_entry *dst = __sk_dst_get(sk);
602 u32 rto_min = TCP_RTO_MIN;
603
604 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
607 rto_min = dst_metric(dst, RTAX_RTO_MIN);
605 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
608 return rto_min;
609}
610
611/* Called to compute a smoothed rtt estimate. The data fed to this
612 * routine either comes from timestamps, or from segments that were
613 * known _not_ to have been retransmitted [see Karn/Partridge
614 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
615 * piece by Van Jacobson.

--- 110 unchanged lines hidden (view full) ---

726 if (sysctl_tcp_nometrics_save)
727 return;
728
729 dst_confirm(dst);
730
731 if (dst && (dst->flags & DST_HOST)) {
732 const struct inet_connection_sock *icsk = inet_csk(sk);
733 int m;
606 return rto_min;
607}
608
609/* Called to compute a smoothed rtt estimate. The data fed to this
610 * routine either comes from timestamps, or from segments that were
611 * known _not_ to have been retransmitted [see Karn/Partridge
612 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
613 * piece by Van Jacobson.

--- 110 unchanged lines hidden (view full) ---

724 if (sysctl_tcp_nometrics_save)
725 return;
726
727 dst_confirm(dst);
728
729 if (dst && (dst->flags & DST_HOST)) {
730 const struct inet_connection_sock *icsk = inet_csk(sk);
731 int m;
732 unsigned long rtt;
734
735 if (icsk->icsk_backoff || !tp->srtt) {
736 /* This session failed to estimate rtt. Why?
737 * Probably, no packets returned in time.
738 * Reset our results.
739 */
740 if (!(dst_metric_locked(dst, RTAX_RTT)))
741 dst->metrics[RTAX_RTT - 1] = 0;
742 return;
743 }
744
733
734 if (icsk->icsk_backoff || !tp->srtt) {
735 /* This session failed to estimate rtt. Why?
736 * Probably, no packets returned in time.
737 * Reset our results.
738 */
739 if (!(dst_metric_locked(dst, RTAX_RTT)))
740 dst->metrics[RTAX_RTT - 1] = 0;
741 return;
742 }
743
745 m = dst_metric(dst, RTAX_RTT) - tp->srtt;
744 rtt = dst_metric_rtt(dst, RTAX_RTT);
745 m = rtt - tp->srtt;
746
747 /* If newly calculated rtt larger than stored one,
748 * store new one. Otherwise, use EWMA. Remember,
749 * rtt overestimation is always better than underestimation.
750 */
751 if (!(dst_metric_locked(dst, RTAX_RTT))) {
752 if (m <= 0)
746
747 /* If newly calculated rtt larger than stored one,
748 * store new one. Otherwise, use EWMA. Remember,
749 * rtt overestimation is always better than underestimation.
750 */
751 if (!(dst_metric_locked(dst, RTAX_RTT))) {
752 if (m <= 0)
753 dst->metrics[RTAX_RTT - 1] = tp->srtt;
753 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
754 else
754 else
755 dst->metrics[RTAX_RTT - 1] -= (m >> 3);
755 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
756 }
757
758 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
756 }
757
758 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
759 unsigned long var;
759 if (m < 0)
760 m = -m;
761
762 /* Scale deviation to rttvar fixed point */
763 m >>= 1;
764 if (m < tp->mdev)
765 m = tp->mdev;
766
760 if (m < 0)
761 m = -m;
762
763 /* Scale deviation to rttvar fixed point */
764 m >>= 1;
765 if (m < tp->mdev)
766 m = tp->mdev;
767
767 if (m >= dst_metric(dst, RTAX_RTTVAR))
768 dst->metrics[RTAX_RTTVAR - 1] = m;
768 var = dst_metric_rtt(dst, RTAX_RTTVAR);
769 if (m >= var)
770 var = m;
769 else
771 else
770 dst->metrics[RTAX_RTTVAR-1] -=
771 (dst_metric(dst, RTAX_RTTVAR) - m)>>2;
772 var -= (var - m) >> 2;
773
774 set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
772 }
773
774 if (tp->snd_ssthresh >= 0xFFFF) {
775 /* Slow start still did not finish. */
776 if (dst_metric(dst, RTAX_SSTHRESH) &&
777 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
778 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
779 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;

--- 114 unchanged lines hidden (view full) ---

894 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
895 tcp_disable_fack(tp);
896 tp->reordering = dst_metric(dst, RTAX_REORDERING);
897 }
898
899 if (dst_metric(dst, RTAX_RTT) == 0)
900 goto reset;
901
775 }
776
777 if (tp->snd_ssthresh >= 0xFFFF) {
778 /* Slow start still did not finish. */
779 if (dst_metric(dst, RTAX_SSTHRESH) &&
780 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
781 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
782 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;

--- 114 unchanged lines hidden (view full) ---

897 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
898 tcp_disable_fack(tp);
899 tp->reordering = dst_metric(dst, RTAX_REORDERING);
900 }
901
902 if (dst_metric(dst, RTAX_RTT) == 0)
903 goto reset;
904
902 if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
905 if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
903 goto reset;
904
905 /* Initial rtt is determined from SYN,SYN-ACK.
906 * The segment is small and rtt may appear much
907 * less than real one. Use per-dst memory
908 * to make it more realistic.
909 *
910 * A bit of theory. RTT is time passed after "normal" sized packet
911 * is sent until it is ACKed. In normal circumstances sending small
912 * packets force peer to delay ACKs and calculation is correct too.
913 * The algorithm is adaptive and, provided we follow specs, it
914 * NEVER underestimate RTT. BUT! If peer tries to make some clever
915 * tricks sort of "quick acks" for time long enough to decrease RTT
916 * to low value, and then abruptly stops to do it and starts to delay
917 * ACKs, wait for troubles.
918 */
906 goto reset;
907
908 /* Initial rtt is determined from SYN,SYN-ACK.
909 * The segment is small and rtt may appear much
910 * less than real one. Use per-dst memory
911 * to make it more realistic.
912 *
913 * A bit of theory. RTT is time passed after "normal" sized packet
914 * is sent until it is ACKed. In normal circumstances sending small
915 * packets force peer to delay ACKs and calculation is correct too.
916 * The algorithm is adaptive and, provided we follow specs, it
917 * NEVER underestimate RTT. BUT! If peer tries to make some clever
918 * tricks sort of "quick acks" for time long enough to decrease RTT
919 * to low value, and then abruptly stops to do it and starts to delay
920 * ACKs, wait for troubles.
921 */
919 if (dst_metric(dst, RTAX_RTT) > tp->srtt) {
920 tp->srtt = dst_metric(dst, RTAX_RTT);
922 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
923 tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
921 tp->rtt_seq = tp->snd_nxt;
922 }
924 tp->rtt_seq = tp->snd_nxt;
925 }
923 if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) {
924 tp->mdev = dst_metric(dst, RTAX_RTTVAR);
926 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
927 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
925 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
926 }
927 tcp_set_rto(sk);
928 tcp_bound_rto(sk);
929 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
930 goto reset;
931 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
932 tp->snd_cwnd_stamp = tcp_time_stamp;

--- 11 unchanged lines hidden (view full) ---

944 }
945}
946
947static void tcp_update_reordering(struct sock *sk, const int metric,
948 const int ts)
949{
950 struct tcp_sock *tp = tcp_sk(sk);
951 if (metric > tp->reordering) {
928 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
929 }
930 tcp_set_rto(sk);
931 tcp_bound_rto(sk);
932 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
933 goto reset;
934 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
935 tp->snd_cwnd_stamp = tcp_time_stamp;

--- 11 unchanged lines hidden (view full) ---

947 }
948}
949
950static void tcp_update_reordering(struct sock *sk, const int metric,
951 const int ts)
952{
953 struct tcp_sock *tp = tcp_sk(sk);
954 if (metric > tp->reordering) {
955 int mib_idx;
956
952 tp->reordering = min(TCP_MAX_REORDERING, metric);
953
954 /* This exciting event is worth to be remembered. 8) */
955 if (ts)
957 tp->reordering = min(TCP_MAX_REORDERING, metric);
958
959 /* This exciting event is worth to be remembered. 8) */
960 if (ts)
956 NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
961 mib_idx = LINUX_MIB_TCPTSREORDER;
957 else if (tcp_is_reno(tp))
962 else if (tcp_is_reno(tp))
958 NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
963 mib_idx = LINUX_MIB_TCPRENOREORDER;
959 else if (tcp_is_fack(tp))
964 else if (tcp_is_fack(tp))
960 NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
965 mib_idx = LINUX_MIB_TCPFACKREORDER;
961 else
966 else
962 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
967 mib_idx = LINUX_MIB_TCPSACKREORDER;
968
969 NET_INC_STATS_BH(sock_net(sk), mib_idx);
963#if FASTRETRANS_DEBUG > 1
964 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
965 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
966 tp->reordering,
967 tp->fackets_out,
968 tp->sacked_out,
969 tp->undo_marker ? tp->undo_retrans : 0);
970#endif

--- 179 unchanged lines hidden (view full) ---

1150
1151 /* clear lost hint */
1152 tp->retransmit_skb_hint = NULL;
1153
1154 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1155 tp->lost_out += tcp_skb_pcount(skb);
1156 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1157 }
970#if FASTRETRANS_DEBUG > 1
971 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
972 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
973 tp->reordering,
974 tp->fackets_out,
975 tp->sacked_out,
976 tp->undo_marker ? tp->undo_retrans : 0);
977#endif

--- 179 unchanged lines hidden (view full) ---

1157
1158 /* clear lost hint */
1159 tp->retransmit_skb_hint = NULL;
1160
1161 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1162 tp->lost_out += tcp_skb_pcount(skb);
1163 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1164 }
1158 NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
1165 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1159 } else {
1160 if (before(ack_seq, new_low_seq))
1161 new_low_seq = ack_seq;
1162 cnt += tcp_skb_pcount(skb);
1163 }
1164 }
1165
1166 if (tp->retrans_out)
1167 tp->lost_retrans_low = new_low_seq;
1168}
1169
1166 } else {
1167 if (before(ack_seq, new_low_seq))
1168 new_low_seq = ack_seq;
1169 cnt += tcp_skb_pcount(skb);
1170 }
1171 }
1172
1173 if (tp->retrans_out)
1174 tp->lost_retrans_low = new_low_seq;
1175}
1176
1170static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
1177static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1171 struct tcp_sack_block_wire *sp, int num_sacks,
1172 u32 prior_snd_una)
1173{
1178 struct tcp_sack_block_wire *sp, int num_sacks,
1179 u32 prior_snd_una)
1180{
1181 struct tcp_sock *tp = tcp_sk(sk);
1174 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1175 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1176 int dup_sack = 0;
1177
1178 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1179 dup_sack = 1;
1180 tcp_dsack_seen(tp);
1182 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1183 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1184 int dup_sack = 0;
1185
1186 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1187 dup_sack = 1;
1188 tcp_dsack_seen(tp);
1181 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
1189 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1182 } else if (num_sacks > 1) {
1183 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1184 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
1185
1186 if (!after(end_seq_0, end_seq_1) &&
1187 !before(start_seq_0, start_seq_1)) {
1188 dup_sack = 1;
1189 tcp_dsack_seen(tp);
1190 } else if (num_sacks > 1) {
1191 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1192 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
1193
1194 if (!after(end_seq_0, end_seq_1) &&
1195 !before(start_seq_0, start_seq_1)) {
1196 dup_sack = 1;
1197 tcp_dsack_seen(tp);
1190 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
1198 NET_INC_STATS_BH(sock_net(sk),
1199 LINUX_MIB_TCPDSACKOFORECV);
1191 }
1192 }
1193
1194 /* D-SACK for already forgotten data... Do dumb counting. */
1195 if (dup_sack &&
1196 !after(end_seq_0, prior_snd_una) &&
1197 after(end_seq_0, tp->undo_marker))
1198 tp->undo_retrans--;

--- 228 unchanged lines hidden (view full) ---

1427 int first_sack_index;
1428
1429 if (!tp->sacked_out) {
1430 if (WARN_ON(tp->fackets_out))
1431 tp->fackets_out = 0;
1432 tcp_highest_sack_reset(sk);
1433 }
1434
1200 }
1201 }
1202
1203 /* D-SACK for already forgotten data... Do dumb counting. */
1204 if (dup_sack &&
1205 !after(end_seq_0, prior_snd_una) &&
1206 after(end_seq_0, tp->undo_marker))
1207 tp->undo_retrans--;

--- 228 unchanged lines hidden (view full) ---

1436 int first_sack_index;
1437
1438 if (!tp->sacked_out) {
1439 if (WARN_ON(tp->fackets_out))
1440 tp->fackets_out = 0;
1441 tcp_highest_sack_reset(sk);
1442 }
1443
1435 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire,
1444 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
1436 num_sacks, prior_snd_una);
1437 if (found_dup_sack)
1438 flag |= FLAG_DSACKING_ACK;
1439
1440 /* Eliminate too old ACKs, but take into
1441 * account more or less fresh ones, they can
1442 * contain valid SACK info.
1443 */

--- 9 unchanged lines hidden (view full) ---

1453 int dup_sack = !i && found_dup_sack;
1454
1455 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1456 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
1457
1458 if (!tcp_is_sackblock_valid(tp, dup_sack,
1459 sp[used_sacks].start_seq,
1460 sp[used_sacks].end_seq)) {
1445 num_sacks, prior_snd_una);
1446 if (found_dup_sack)
1447 flag |= FLAG_DSACKING_ACK;
1448
1449 /* Eliminate too old ACKs, but take into
1450 * account more or less fresh ones, they can
1451 * contain valid SACK info.
1452 */

--- 9 unchanged lines hidden (view full) ---

1462 int dup_sack = !i && found_dup_sack;
1463
1464 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1465 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
1466
1467 if (!tcp_is_sackblock_valid(tp, dup_sack,
1468 sp[used_sacks].start_seq,
1469 sp[used_sacks].end_seq)) {
1470 int mib_idx;
1471
1461 if (dup_sack) {
1462 if (!tp->undo_marker)
1472 if (dup_sack) {
1473 if (!tp->undo_marker)
1463 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
1474 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1464 else
1475 else
1465 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
1476 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1466 } else {
1467 /* Don't count olds caused by ACK reordering */
1468 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1469 !after(sp[used_sacks].end_seq, tp->snd_una))
1470 continue;
1477 } else {
1478 /* Don't count olds caused by ACK reordering */
1479 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1480 !after(sp[used_sacks].end_seq, tp->snd_una))
1481 continue;
1471 NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
1482 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1472 }
1483 }
1484
1485 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1473 if (i == 0)
1474 first_sack_index = -1;
1475 continue;
1476 }
1477
1478 /* Ignore very old stuff early */
1479 if (!after(sp[used_sacks].end_seq, prior_snd_una))
1480 continue;

--- 476 unchanged lines hidden (view full) ---

1957 * receiver _host_ is heavily congested (or buggy).
1958 *
1959 * Do processing similar to RTO timeout.
1960 */
1961static int tcp_check_sack_reneging(struct sock *sk, int flag)
1962{
1963 if (flag & FLAG_SACK_RENEGING) {
1964 struct inet_connection_sock *icsk = inet_csk(sk);
1486 if (i == 0)
1487 first_sack_index = -1;
1488 continue;
1489 }
1490
1491 /* Ignore very old stuff early */
1492 if (!after(sp[used_sacks].end_seq, prior_snd_una))
1493 continue;

--- 476 unchanged lines hidden (view full) ---

1970 * receiver _host_ is heavily congested (or buggy).
1971 *
1972 * Do processing similar to RTO timeout.
1973 */
1974static int tcp_check_sack_reneging(struct sock *sk, int flag)
1975{
1976 if (flag & FLAG_SACK_RENEGING) {
1977 struct inet_connection_sock *icsk = inet_csk(sk);
1965 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
1978 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1966
1967 tcp_enter_loss(sk, 1);
1968 icsk->icsk_retransmits++;
1969 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
1970 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1971 icsk->icsk_rto, TCP_RTO_MAX);
1972 return 1;
1973 }

--- 403 unchanged lines hidden (view full) ---

2377}
2378
2379/* People celebrate: "We love our President!" */
2380static int tcp_try_undo_recovery(struct sock *sk)
2381{
2382 struct tcp_sock *tp = tcp_sk(sk);
2383
2384 if (tcp_may_undo(tp)) {
1979
1980 tcp_enter_loss(sk, 1);
1981 icsk->icsk_retransmits++;
1982 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
1983 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1984 icsk->icsk_rto, TCP_RTO_MAX);
1985 return 1;
1986 }

--- 403 unchanged lines hidden (view full) ---

2390}
2391
2392/* People celebrate: "We love our President!" */
2393static int tcp_try_undo_recovery(struct sock *sk)
2394{
2395 struct tcp_sock *tp = tcp_sk(sk);
2396
2397 if (tcp_may_undo(tp)) {
2398 int mib_idx;
2399
2385 /* Happy end! We did not retransmit anything
2386 * or our original transmission succeeded.
2387 */
2388 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2389 tcp_undo_cwr(sk, 1);
2390 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2400 /* Happy end! We did not retransmit anything
2401 * or our original transmission succeeded.
2402 */
2403 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2404 tcp_undo_cwr(sk, 1);
2405 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2391 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
2406 mib_idx = LINUX_MIB_TCPLOSSUNDO;
2392 else
2407 else
2393 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
2408 mib_idx = LINUX_MIB_TCPFULLUNDO;
2409
2410 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2394 tp->undo_marker = 0;
2395 }
2396 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2397 /* Hold old state until something *above* high_seq
2398 * is ACKed. For Reno it is MUST to prevent false
2399 * fast retransmits (RFC2582). SACK TCP is safe. */
2400 tcp_moderate_cwnd(tp);
2401 return 1;

--- 6 unchanged lines hidden (view full) ---

2408static void tcp_try_undo_dsack(struct sock *sk)
2409{
2410 struct tcp_sock *tp = tcp_sk(sk);
2411
2412 if (tp->undo_marker && !tp->undo_retrans) {
2413 DBGUNDO(sk, "D-SACK");
2414 tcp_undo_cwr(sk, 1);
2415 tp->undo_marker = 0;
2411 tp->undo_marker = 0;
2412 }
2413 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2414 /* Hold old state until something *above* high_seq
2415 * is ACKed. For Reno it is MUST to prevent false
2416 * fast retransmits (RFC2582). SACK TCP is safe. */
2417 tcp_moderate_cwnd(tp);
2418 return 1;

--- 6 unchanged lines hidden (view full) ---

2425static void tcp_try_undo_dsack(struct sock *sk)
2426{
2427 struct tcp_sock *tp = tcp_sk(sk);
2428
2429 if (tp->undo_marker && !tp->undo_retrans) {
2430 DBGUNDO(sk, "D-SACK");
2431 tcp_undo_cwr(sk, 1);
2432 tp->undo_marker = 0;
2416 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
2433 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2417 }
2418}
2419
2420/* Undo during fast recovery after partial ACK. */
2421
2422static int tcp_try_undo_partial(struct sock *sk, int acked)
2423{
2424 struct tcp_sock *tp = tcp_sk(sk);

--- 6 unchanged lines hidden (view full) ---

2431 */
2432 if (tp->retrans_out == 0)
2433 tp->retrans_stamp = 0;
2434
2435 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
2436
2437 DBGUNDO(sk, "Hoe");
2438 tcp_undo_cwr(sk, 0);
2434 }
2435}
2436
2437/* Undo during fast recovery after partial ACK. */
2438
2439static int tcp_try_undo_partial(struct sock *sk, int acked)
2440{
2441 struct tcp_sock *tp = tcp_sk(sk);

--- 6 unchanged lines hidden (view full) ---

2448 */
2449 if (tp->retrans_out == 0)
2450 tp->retrans_stamp = 0;
2451
2452 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
2453
2454 DBGUNDO(sk, "Hoe");
2455 tcp_undo_cwr(sk, 0);
2439 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
2456 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2440
2441 /* So... Do not make Hoe's retransmit yet.
2442 * If the first packet was delayed, the rest
2443 * ones are most probably delayed as well.
2444 */
2445 failed = 0;
2446 }
2447 return failed;

--- 12 unchanged lines hidden (view full) ---

2460 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2461 }
2462
2463 tcp_clear_all_retrans_hints(tp);
2464
2465 DBGUNDO(sk, "partial loss");
2466 tp->lost_out = 0;
2467 tcp_undo_cwr(sk, 1);
2457
2458 /* So... Do not make Hoe's retransmit yet.
2459 * If the first packet was delayed, the rest
2460 * ones are most probably delayed as well.
2461 */
2462 failed = 0;
2463 }
2464 return failed;

--- 12 unchanged lines hidden (view full) ---

2477 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2478 }
2479
2480 tcp_clear_all_retrans_hints(tp);
2481
2482 DBGUNDO(sk, "partial loss");
2483 tp->lost_out = 0;
2484 tcp_undo_cwr(sk, 1);
2468 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
2485 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2469 inet_csk(sk)->icsk_retransmits = 0;
2470 tp->undo_marker = 0;
2471 if (tcp_is_sack(tp))
2472 tcp_set_ca_state(sk, TCP_CA_Open);
2473 return 1;
2474 }
2475 return 0;
2476}

--- 80 unchanged lines hidden (view full) ---

2557 */
2558static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2559{
2560 struct inet_connection_sock *icsk = inet_csk(sk);
2561 struct tcp_sock *tp = tcp_sk(sk);
2562 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
2563 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2564 (tcp_fackets_out(tp) > tp->reordering));
2486 inet_csk(sk)->icsk_retransmits = 0;
2487 tp->undo_marker = 0;
2488 if (tcp_is_sack(tp))
2489 tcp_set_ca_state(sk, TCP_CA_Open);
2490 return 1;
2491 }
2492 return 0;
2493}

--- 80 unchanged lines hidden (view full) ---

2574 */
2575static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2576{
2577 struct inet_connection_sock *icsk = inet_csk(sk);
2578 struct tcp_sock *tp = tcp_sk(sk);
2579 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
2580 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2581 (tcp_fackets_out(tp) > tp->reordering));
2565 int fast_rexmit = 0;
2582 int fast_rexmit = 0, mib_idx;
2566
2567 if (WARN_ON(!tp->packets_out && tp->sacked_out))
2568 tp->sacked_out = 0;
2569 if (WARN_ON(!tp->sacked_out && tp->fackets_out))
2570 tp->fackets_out = 0;
2571
2572 /* Now state machine starts.
2573 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */

--- 5 unchanged lines hidden (view full) ---

2579 return;
2580
2581 /* C. Process data loss notification, provided it is valid. */
2582 if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
2583 before(tp->snd_una, tp->high_seq) &&
2584 icsk->icsk_ca_state != TCP_CA_Open &&
2585 tp->fackets_out > tp->reordering) {
2586 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2583
2584 if (WARN_ON(!tp->packets_out && tp->sacked_out))
2585 tp->sacked_out = 0;
2586 if (WARN_ON(!tp->sacked_out && tp->fackets_out))
2587 tp->fackets_out = 0;
2588
2589 /* Now state machine starts.
2590 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */

--- 5 unchanged lines hidden (view full) ---

2596 return;
2597
2598 /* C. Process data loss notification, provided it is valid. */
2599 if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
2600 before(tp->snd_una, tp->high_seq) &&
2601 icsk->icsk_ca_state != TCP_CA_Open &&
2602 tp->fackets_out > tp->reordering) {
2603 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2587 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
2604 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
2588 }
2589
2590 /* D. Check consistency of the current state. */
2591 tcp_verify_left_out(tp);
2592
2593 /* E. Check state exit conditions. State can be terminated
2594 * when high_seq is ACKed. */
2595 if (icsk->icsk_ca_state == TCP_CA_Open) {

--- 84 unchanged lines hidden (view full) ---

2680 tp->snd_cwnd++;
2681 tcp_simple_retransmit(sk);
2682 return;
2683 }
2684
2685 /* Otherwise enter Recovery state */
2686
2687 if (tcp_is_reno(tp))
2605 }
2606
2607 /* D. Check consistency of the current state. */
2608 tcp_verify_left_out(tp);
2609
2610 /* E. Check state exit conditions. State can be terminated
2611 * when high_seq is ACKed. */
2612 if (icsk->icsk_ca_state == TCP_CA_Open) {

--- 84 unchanged lines hidden (view full) ---

2697 tp->snd_cwnd++;
2698 tcp_simple_retransmit(sk);
2699 return;
2700 }
2701
2702 /* Otherwise enter Recovery state */
2703
2704 if (tcp_is_reno(tp))
2688 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
2705 mib_idx = LINUX_MIB_TCPRENORECOVERY;
2689 else
2706 else
2690 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
2707 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2691
2708
2709 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2710
2692 tp->high_seq = tp->snd_nxt;
2693 tp->prior_ssthresh = 0;
2694 tp->undo_marker = tp->snd_una;
2695 tp->undo_retrans = tp->retrans_out;
2696
2697 if (icsk->icsk_ca_state < TCP_CA_CWR) {
2698 if (!(flag & FLAG_ECE))
2699 tp->prior_ssthresh = tcp_current_ssthresh(sk);

--- 493 unchanged lines hidden (view full) ---

3193 tcp_conservative_spur_to_response(tp);
3194 break;
3195 default:
3196 tcp_ratehalving_spur_to_response(sk);
3197 break;
3198 }
3199 tp->frto_counter = 0;
3200 tp->undo_marker = 0;
2711 tp->high_seq = tp->snd_nxt;
2712 tp->prior_ssthresh = 0;
2713 tp->undo_marker = tp->snd_una;
2714 tp->undo_retrans = tp->retrans_out;
2715
2716 if (icsk->icsk_ca_state < TCP_CA_CWR) {
2717 if (!(flag & FLAG_ECE))
2718 tp->prior_ssthresh = tcp_current_ssthresh(sk);

--- 493 unchanged lines hidden (view full) ---

3212 tcp_conservative_spur_to_response(tp);
3213 break;
3214 default:
3215 tcp_ratehalving_spur_to_response(sk);
3216 break;
3217 }
3218 tp->frto_counter = 0;
3219 tp->undo_marker = 0;
3201 NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS);
3220 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3202 }
3203 return 0;
3204}
3205
3206/* This routine deals with incoming acks, but not outgoing ones. */
3207static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3208{
3209 struct inet_connection_sock *icsk = inet_csk(sk);

--- 36 unchanged lines hidden (view full) ---

3246 * Note, we use the fact that SND.UNA>=SND.WL2.
3247 */
3248 tcp_update_wl(tp, ack, ack_seq);
3249 tp->snd_una = ack;
3250 flag |= FLAG_WIN_UPDATE;
3251
3252 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3253
3221 }
3222 return 0;
3223}
3224
3225/* This routine deals with incoming acks, but not outgoing ones. */
3226static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3227{
3228 struct inet_connection_sock *icsk = inet_csk(sk);

--- 36 unchanged lines hidden (view full) ---

3265 * Note, we use the fact that SND.UNA>=SND.WL2.
3266 */
3267 tcp_update_wl(tp, ack, ack_seq);
3268 tp->snd_una = ack;
3269 flag |= FLAG_WIN_UPDATE;
3270
3271 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3272
3254 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
3273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
3255 } else {
3256 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3257 flag |= FLAG_DATA;
3258 else
3274 } else {
3275 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3276 flag |= FLAG_DATA;
3277 else
3259 NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
3278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3260
3261 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3262
3263 if (TCP_SKB_CB(skb)->sacked)
3264 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3265
3266 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
3267 flag |= FLAG_ECE;

--- 177 unchanged lines hidden (view full) ---

3445 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3446 return 1;
3447 }
3448 }
3449 tcp_parse_options(skb, &tp->rx_opt, 1);
3450 return 1;
3451}
3452
3279
3280 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3281
3282 if (TCP_SKB_CB(skb)->sacked)
3283 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3284
3285 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
3286 flag |= FLAG_ECE;

--- 177 unchanged lines hidden (view full) ---

3464 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3465 return 1;
3466 }
3467 }
3468 tcp_parse_options(skb, &tp->rx_opt, 1);
3469 return 1;
3470}
3471
3472#ifdef CONFIG_TCP_MD5SIG
3473/*
3474 * Parse MD5 Signature option
3475 */
3476u8 *tcp_parse_md5sig_option(struct tcphdr *th)
3477{
3478 int length = (th->doff << 2) - sizeof (*th);
3479 u8 *ptr = (u8*)(th + 1);
3480
3481 /* If the TCP option is too short, we can short cut */
3482 if (length < TCPOLEN_MD5SIG)
3483 return NULL;
3484
3485 while (length > 0) {
3486 int opcode = *ptr++;
3487 int opsize;
3488
3489 switch(opcode) {
3490 case TCPOPT_EOL:
3491 return NULL;
3492 case TCPOPT_NOP:
3493 length--;
3494 continue;
3495 default:
3496 opsize = *ptr++;
3497 if (opsize < 2 || opsize > length)
3498 return NULL;
3499 if (opcode == TCPOPT_MD5SIG)
3500 return ptr;
3501 }
3502 ptr += opsize - 2;
3503 length -= opsize;
3504 }
3505 return NULL;
3506}
3507#endif
3508
3453static inline void tcp_store_ts_recent(struct tcp_sock *tp)
3454{
3455 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
3456 tp->rx_opt.ts_recent_stamp = get_seconds();
3457}
3458
3459static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3460{

--- 196 unchanged lines hidden (view full) ---

3657 sp->start_seq = seq;
3658 if (after(end_seq, sp->end_seq))
3659 sp->end_seq = end_seq;
3660 return 1;
3661 }
3662 return 0;
3663}
3664
3509static inline void tcp_store_ts_recent(struct tcp_sock *tp)
3510{
3511 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
3512 tp->rx_opt.ts_recent_stamp = get_seconds();
3513}
3514
3515static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3516{

--- 196 unchanged lines hidden (view full) ---

3713 sp->start_seq = seq;
3714 if (after(end_seq, sp->end_seq))
3715 sp->end_seq = end_seq;
3716 return 1;
3717 }
3718 return 0;
3719}
3720
3665static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
3721static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
3666{
3722{
3723 struct tcp_sock *tp = tcp_sk(sk);
3724
3667 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3725 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3726 int mib_idx;
3727
3668 if (before(seq, tp->rcv_nxt))
3728 if (before(seq, tp->rcv_nxt))
3669 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
3729 mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
3670 else
3730 else
3671 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
3731 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3672
3732
3733 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3734
3673 tp->rx_opt.dsack = 1;
3674 tp->duplicate_sack[0].start_seq = seq;
3675 tp->duplicate_sack[0].end_seq = end_seq;
3676 tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1,
3677 4 - tp->rx_opt.tstamp_ok);
3678 }
3679}
3680
3735 tp->rx_opt.dsack = 1;
3736 tp->duplicate_sack[0].start_seq = seq;
3737 tp->duplicate_sack[0].end_seq = end_seq;
3738 tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1,
3739 4 - tp->rx_opt.tstamp_ok);
3740 }
3741}
3742
3681static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
3743static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
3682{
3744{
3745 struct tcp_sock *tp = tcp_sk(sk);
3746
3683 if (!tp->rx_opt.dsack)
3747 if (!tp->rx_opt.dsack)
3684 tcp_dsack_set(tp, seq, end_seq);
3748 tcp_dsack_set(sk, seq, end_seq);
3685 else
3686 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
3687}
3688
3689static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3690{
3691 struct tcp_sock *tp = tcp_sk(sk);
3692
3693 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
3694 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3749 else
3750 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
3751}
3752
3753static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3754{
3755 struct tcp_sock *tp = tcp_sk(sk);
3756
3757 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
3758 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3695 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
3759 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
3696 tcp_enter_quickack_mode(sk);
3697
3698 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3699 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3700
3701 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
3702 end_seq = tp->rcv_nxt;
3760 tcp_enter_quickack_mode(sk);
3761
3762 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3763 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3764
3765 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
3766 end_seq = tp->rcv_nxt;
3703 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq);
3767 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
3704 }
3705 }
3706
3707 tcp_send_ack(sk);
3708}
3709
3710/* These routines update the SACK block as out-of-order packets arrive or
3711 * in-order packets close up the sequence space.

--- 136 unchanged lines hidden (view full) ---

3848 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
3849 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
3850 break;
3851
3852 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
3853 __u32 dsack = dsack_high;
3854 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
3855 dsack_high = TCP_SKB_CB(skb)->end_seq;
3768 }
3769 }
3770
3771 tcp_send_ack(sk);
3772}
3773
3774/* These routines update the SACK block as out-of-order packets arrive or
3775 * in-order packets close up the sequence space.

--- 136 unchanged lines hidden (view full) ---

3912 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
3913 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
3914 break;
3915
3916 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
3917 __u32 dsack = dsack_high;
3918 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
3919 dsack_high = TCP_SKB_CB(skb)->end_seq;
3856 tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack);
3920 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
3857 }
3858
3859 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3860 SOCK_DEBUG(sk, "ofo packet was already received \n");
3861 __skb_unlink(skb, &tp->out_of_order_queue);
3862 __kfree_skb(skb);
3863 continue;
3864 }

--- 111 unchanged lines hidden (view full) ---

3976 __kfree_skb(skb);
3977 else if (!sock_flag(sk, SOCK_DEAD))
3978 sk->sk_data_ready(sk, 0);
3979 return;
3980 }
3981
3982 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3983 /* A retransmit, 2nd most common case. Force an immediate ack. */
3921 }
3922
3923 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3924 SOCK_DEBUG(sk, "ofo packet was already received \n");
3925 __skb_unlink(skb, &tp->out_of_order_queue);
3926 __kfree_skb(skb);
3927 continue;
3928 }

--- 111 unchanged lines hidden (view full) ---

4040 __kfree_skb(skb);
4041 else if (!sock_flag(sk, SOCK_DEAD))
4042 sk->sk_data_ready(sk, 0);
4043 return;
4044 }
4045
4046 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4047 /* A retransmit, 2nd most common case. Force an immediate ack. */
3984 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
3985 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4048 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4049 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3986
3987out_of_window:
3988 tcp_enter_quickack_mode(sk);
3989 inet_csk_schedule_ack(sk);
3990drop:
3991 __kfree_skb(skb);
3992 return;
3993 }

--- 5 unchanged lines hidden (view full) ---

3999 tcp_enter_quickack_mode(sk);
4000
4001 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4002 /* Partial packet, seq < rcv_next < end_seq */
4003 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
4004 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4005 TCP_SKB_CB(skb)->end_seq);
4006
4050
4051out_of_window:
4052 tcp_enter_quickack_mode(sk);
4053 inet_csk_schedule_ack(sk);
4054drop:
4055 __kfree_skb(skb);
4056 return;
4057 }

--- 5 unchanged lines hidden (view full) ---

4063 tcp_enter_quickack_mode(sk);
4064
4065 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4066 /* Partial packet, seq < rcv_next < end_seq */
4067 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
4068 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4069 TCP_SKB_CB(skb)->end_seq);
4070
4007 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
4071 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
4008
4009 /* If window is closed, drop tail of packet. But after
4010 * remembering D-SACK for its head made in previous line.
4011 */
4012 if (!tcp_receive_window(tp))
4013 goto out_of_window;
4014 goto queue_and_out;
4015 }

--- 48 unchanged lines hidden (view full) ---

4064 (struct sk_buff *)&tp->out_of_order_queue);
4065
4066 /* Do skb overlap to previous one? */
4067 if (skb1 != (struct sk_buff *)&tp->out_of_order_queue &&
4068 before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4069 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4070 /* All the bits are present. Drop. */
4071 __kfree_skb(skb);
4072
4073 /* If window is closed, drop tail of packet. But after
4074 * remembering D-SACK for its head made in previous line.
4075 */
4076 if (!tcp_receive_window(tp))
4077 goto out_of_window;
4078 goto queue_and_out;
4079 }

--- 48 unchanged lines hidden (view full) ---

4128 (struct sk_buff *)&tp->out_of_order_queue);
4129
4130 /* Do skb overlap to previous one? */
4131 if (skb1 != (struct sk_buff *)&tp->out_of_order_queue &&
4132 before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4133 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4134 /* All the bits are present. Drop. */
4135 __kfree_skb(skb);
4072 tcp_dsack_set(tp, seq, end_seq);
4136 tcp_dsack_set(sk, seq, end_seq);
4073 goto add_sack;
4074 }
4075 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4076 /* Partial overlap. */
4137 goto add_sack;
4138 }
4139 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4140 /* Partial overlap. */
4077 tcp_dsack_set(tp, seq,
4141 tcp_dsack_set(sk, seq,
4078 TCP_SKB_CB(skb1)->end_seq);
4079 } else {
4080 skb1 = skb1->prev;
4081 }
4082 }
4083 __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
4084
4085 /* And clean segments covered by new one as whole. */
4086 while ((skb1 = skb->next) !=
4087 (struct sk_buff *)&tp->out_of_order_queue &&
4088 after(end_seq, TCP_SKB_CB(skb1)->seq)) {
4089 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4142 TCP_SKB_CB(skb1)->end_seq);
4143 } else {
4144 skb1 = skb1->prev;
4145 }
4146 }
4147 __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
4148
4149 /* And clean segments covered by new one as whole. */
4150 while ((skb1 = skb->next) !=
4151 (struct sk_buff *)&tp->out_of_order_queue &&
4152 after(end_seq, TCP_SKB_CB(skb1)->seq)) {
4153 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4090 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq,
4154 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4091 end_seq);
4092 break;
4093 }
4094 __skb_unlink(skb1, &tp->out_of_order_queue);
4155 end_seq);
4156 break;
4157 }
4158 __skb_unlink(skb1, &tp->out_of_order_queue);
4095 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq,
4159 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4096 TCP_SKB_CB(skb1)->end_seq);
4097 __kfree_skb(skb1);
4098 }
4099
4100add_sack:
4101 if (tcp_is_sack(tp))
4102 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4103 }

--- 14 unchanged lines hidden (view full) ---

4118 /* First, check that queue is collapsible and find
4119 * the point where collapsing can be useful. */
4120 for (skb = head; skb != tail;) {
4121 /* No new bits? It is possible on ofo queue. */
4122 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4123 struct sk_buff *next = skb->next;
4124 __skb_unlink(skb, list);
4125 __kfree_skb(skb);
4160 TCP_SKB_CB(skb1)->end_seq);
4161 __kfree_skb(skb1);
4162 }
4163
4164add_sack:
4165 if (tcp_is_sack(tp))
4166 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4167 }

--- 14 unchanged lines hidden (view full) ---

4182 /* First, check that queue is collapsible and find
4183 * the point where collapsing can be useful. */
4184 for (skb = head; skb != tail;) {
4185 /* No new bits? It is possible on ofo queue. */
4186 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4187 struct sk_buff *next = skb->next;
4188 __skb_unlink(skb, list);
4189 __kfree_skb(skb);
4126 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
4190 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4127 skb = next;
4128 continue;
4129 }
4130
4131 /* The first skb to collapse is:
4132 * - not SYN/FIN and
4133 * - bloated or contains data before "start" or
4134 * overlaps to the next one.

--- 51 unchanged lines hidden (view full) ---

4186 TCP_SKB_CB(nskb)->end_seq += size;
4187 copy -= size;
4188 start += size;
4189 }
4190 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4191 struct sk_buff *next = skb->next;
4192 __skb_unlink(skb, list);
4193 __kfree_skb(skb);
4191 skb = next;
4192 continue;
4193 }
4194
4195 /* The first skb to collapse is:
4196 * - not SYN/FIN and
4197 * - bloated or contains data before "start" or
4198 * overlaps to the next one.

--- 51 unchanged lines hidden (view full) ---

4250 TCP_SKB_CB(nskb)->end_seq += size;
4251 copy -= size;
4252 start += size;
4253 }
4254 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4255 struct sk_buff *next = skb->next;
4256 __skb_unlink(skb, list);
4257 __kfree_skb(skb);
4194 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
4258 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4195 skb = next;
4196 if (skb == tail ||
4197 tcp_hdr(skb)->syn ||
4198 tcp_hdr(skb)->fin)
4199 return;
4200 }
4201 }
4202 }

--- 46 unchanged lines hidden (view full) ---

4249 * Return true if queue was pruned.
4250 */
4251static int tcp_prune_ofo_queue(struct sock *sk)
4252{
4253 struct tcp_sock *tp = tcp_sk(sk);
4254 int res = 0;
4255
4256 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4259 skb = next;
4260 if (skb == tail ||
4261 tcp_hdr(skb)->syn ||
4262 tcp_hdr(skb)->fin)
4263 return;
4264 }
4265 }
4266 }

--- 46 unchanged lines hidden (view full) ---

4313 * Return true if queue was pruned.
4314 */
4315static int tcp_prune_ofo_queue(struct sock *sk)
4316{
4317 struct tcp_sock *tp = tcp_sk(sk);
4318 int res = 0;
4319
4320 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4257 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
4321 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
4258 __skb_queue_purge(&tp->out_of_order_queue);
4259
4260 /* Reset SACK state. A conforming SACK implementation will
4261 * do the same at a timeout based retransmit. When a connection
4262 * is in a sad state like this, we care only about integrity
4263 * of the connection not performance.
4264 */
4265 if (tp->rx_opt.sack_ok)

--- 12 unchanged lines hidden (view full) ---

4278 * to stabilize the situation.
4279 */
4280static int tcp_prune_queue(struct sock *sk)
4281{
4282 struct tcp_sock *tp = tcp_sk(sk);
4283
4284 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4285
4322 __skb_queue_purge(&tp->out_of_order_queue);
4323
4324 /* Reset SACK state. A conforming SACK implementation will
4325 * do the same at a timeout based retransmit. When a connection
4326 * is in a sad state like this, we care only about integrity
4327 * of the connection not performance.
4328 */
4329 if (tp->rx_opt.sack_ok)

--- 12 unchanged lines hidden (view full) ---

4342 * to stabilize the situation.
4343 */
4344static int tcp_prune_queue(struct sock *sk)
4345{
4346 struct tcp_sock *tp = tcp_sk(sk);
4347
4348 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4349
4286 NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
4350 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
4287
4288 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4289 tcp_clamp_window(sk);
4290 else if (tcp_memory_pressure)
4291 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4292
4293 tcp_collapse_ofo_queue(sk);
4294 tcp_collapse(sk, &sk->sk_receive_queue,

--- 12 unchanged lines hidden (view full) ---

4307
4308 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4309 return 0;
4310
4311 /* If we are really being abused, tell the caller to silently
4312 * drop receive data on the floor. It will get retransmitted
4313 * and hopefully then we'll have sufficient space.
4314 */
4351
4352 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4353 tcp_clamp_window(sk);
4354 else if (tcp_memory_pressure)
4355 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4356
4357 tcp_collapse_ofo_queue(sk);
4358 tcp_collapse(sk, &sk->sk_receive_queue,

--- 12 unchanged lines hidden (view full) ---

4371
4372 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4373 return 0;
4374
4375 /* If we are really being abused, tell the caller to silently
4376 * drop receive data on the floor. It will get retransmitted
4377 * and hopefully then we'll have sufficient space.
4378 */
4315 NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
4379 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
4316
4317 /* Massive buffer overcommit. */
4318 tp->pred_flags = 0;
4319 return -1;
4320}
4321
4322/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
4323 * As additional protections, we do not touch cwnd in retransmission phases,

--- 413 unchanged lines hidden (view full) ---

4737 /* We know that such packets are checksummed
4738 * on entry.
4739 */
4740 tcp_ack(sk, skb, 0);
4741 __kfree_skb(skb);
4742 tcp_data_snd_check(sk);
4743 return 0;
4744 } else { /* Header too small */
4380
4381 /* Massive buffer overcommit. */
4382 tp->pred_flags = 0;
4383 return -1;
4384}
4385
4386/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
4387 * As additional protections, we do not touch cwnd in retransmission phases,

--- 413 unchanged lines hidden (view full) ---

4801 /* We know that such packets are checksummed
4802 * on entry.
4803 */
4804 tcp_ack(sk, skb, 0);
4805 __kfree_skb(skb);
4806 tcp_data_snd_check(sk);
4807 return 0;
4808 } else { /* Header too small */
4745 TCP_INC_STATS_BH(TCP_MIB_INERRS);
4809 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4746 goto discard;
4747 }
4748 } else {
4749 int eaten = 0;
4750 int copied_early = 0;
4751
4752 if (tp->copied_seq == tp->rcv_nxt &&
4753 len - tcp_header_len <= tp->ucopy.len) {

--- 20 unchanged lines hidden (view full) ---

4774 TCPOLEN_TSTAMP_ALIGNED) &&
4775 tp->rcv_nxt == tp->rcv_wup)
4776 tcp_store_ts_recent(tp);
4777
4778 tcp_rcv_rtt_measure_ts(sk, skb);
4779
4780 __skb_pull(skb, tcp_header_len);
4781 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4810 goto discard;
4811 }
4812 } else {
4813 int eaten = 0;
4814 int copied_early = 0;
4815
4816 if (tp->copied_seq == tp->rcv_nxt &&
4817 len - tcp_header_len <= tp->ucopy.len) {

--- 20 unchanged lines hidden (view full) ---

4838 TCPOLEN_TSTAMP_ALIGNED) &&
4839 tp->rcv_nxt == tp->rcv_wup)
4840 tcp_store_ts_recent(tp);
4841
4842 tcp_rcv_rtt_measure_ts(sk, skb);
4843
4844 __skb_pull(skb, tcp_header_len);
4845 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4782 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
4846 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
4783 }
4784 if (copied_early)
4785 tcp_cleanup_rbuf(sk, skb->len);
4786 }
4787 if (!eaten) {
4788 if (tcp_checksum_complete_user(sk, skb))
4789 goto csum_error;
4790

--- 6 unchanged lines hidden (view full) ---

4797 tp->rcv_nxt == tp->rcv_wup)
4798 tcp_store_ts_recent(tp);
4799
4800 tcp_rcv_rtt_measure_ts(sk, skb);
4801
4802 if ((int)skb->truesize > sk->sk_forward_alloc)
4803 goto step5;
4804
4847 }
4848 if (copied_early)
4849 tcp_cleanup_rbuf(sk, skb->len);
4850 }
4851 if (!eaten) {
4852 if (tcp_checksum_complete_user(sk, skb))
4853 goto csum_error;
4854

--- 6 unchanged lines hidden (view full) ---

4861 tp->rcv_nxt == tp->rcv_wup)
4862 tcp_store_ts_recent(tp);
4863
4864 tcp_rcv_rtt_measure_ts(sk, skb);
4865
4866 if ((int)skb->truesize > sk->sk_forward_alloc)
4867 goto step5;
4868
4805 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
4869 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
4806
4807 /* Bulk data transfer: receiver */
4808 __skb_pull(skb, tcp_header_len);
4809 __skb_queue_tail(&sk->sk_receive_queue, skb);
4810 skb_set_owner_r(skb, sk);
4811 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4812 }
4813

--- 27 unchanged lines hidden (view full) ---

4841 goto csum_error;
4842
4843 /*
4844 * RFC1323: H1. Apply PAWS check first.
4845 */
4846 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4847 tcp_paws_discard(sk, skb)) {
4848 if (!th->rst) {
4870
4871 /* Bulk data transfer: receiver */
4872 __skb_pull(skb, tcp_header_len);
4873 __skb_queue_tail(&sk->sk_receive_queue, skb);
4874 skb_set_owner_r(skb, sk);
4875 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4876 }
4877

--- 27 unchanged lines hidden (view full) ---

4905 goto csum_error;
4906
4907 /*
4908 * RFC1323: H1. Apply PAWS check first.
4909 */
4910 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4911 tcp_paws_discard(sk, skb)) {
4912 if (!th->rst) {
4849 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
4913 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4850 tcp_send_dupack(sk, skb);
4851 goto discard;
4852 }
4853 /* Resets are accepted even if PAWS failed.
4854
4855 ts_recent update must be made after we are sure
4856 that the packet is in window.
4857 */

--- 18 unchanged lines hidden (view full) ---

4876 if (th->rst) {
4877 tcp_reset(sk);
4878 goto discard;
4879 }
4880
4881 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4882
4883 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4914 tcp_send_dupack(sk, skb);
4915 goto discard;
4916 }
4917 /* Resets are accepted even if PAWS failed.
4918
4919 ts_recent update must be made after we are sure
4920 that the packet is in window.
4921 */

--- 18 unchanged lines hidden (view full) ---

4940 if (th->rst) {
4941 tcp_reset(sk);
4942 goto discard;
4943 }
4944
4945 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4946
4947 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4884 TCP_INC_STATS_BH(TCP_MIB_INERRS);
4885 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
4948 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4949 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4886 tcp_reset(sk);
4887 return 1;
4888 }
4889
4890step5:
4891 if (th->ack)
4892 tcp_ack(sk, skb, FLAG_SLOWPATH);
4893

--- 5 unchanged lines hidden (view full) ---

4899 /* step 7: process the segment text */
4900 tcp_data_queue(sk, skb);
4901
4902 tcp_data_snd_check(sk);
4903 tcp_ack_snd_check(sk);
4904 return 0;
4905
4906csum_error:
4950 tcp_reset(sk);
4951 return 1;
4952 }
4953
4954step5:
4955 if (th->ack)
4956 tcp_ack(sk, skb, FLAG_SLOWPATH);
4957

--- 5 unchanged lines hidden (view full) ---

4963 /* step 7: process the segment text */
4964 tcp_data_queue(sk, skb);
4965
4966 tcp_data_snd_check(sk);
4967 tcp_ack_snd_check(sk);
4968 return 0;
4969
4970csum_error:
4907 TCP_INC_STATS_BH(TCP_MIB_INERRS);
4971 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4908
4909discard:
4910 __kfree_skb(skb);
4911 return 0;
4912}
4913
4914static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4915 struct tcphdr *th, unsigned len)

--- 17 unchanged lines hidden (view full) ---

4933 * test reduces to:
4934 */
4935 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
4936 goto reset_and_undo;
4937
4938 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4939 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
4940 tcp_time_stamp)) {
4972
4973discard:
4974 __kfree_skb(skb);
4975 return 0;
4976}
4977
4978static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4979 struct tcphdr *th, unsigned len)

--- 17 unchanged lines hidden (view full) ---

4997 * test reduces to:
4998 */
4999 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
5000 goto reset_and_undo;
5001
5002 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
5003 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
5004 tcp_time_stamp)) {
4941 NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
5005 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
4942 goto reset_and_undo;
4943 }
4944
4945 /* Now ACK is acceptable.
4946 *
4947 * "If the RST bit is set
4948 * If the ACK was acceptable then signal the user "error:
4949 * connection reset", drop the segment, enter CLOSED state,

--- 267 unchanged lines hidden (view full) ---

5217 __kfree_skb(skb);
5218 tcp_data_snd_check(sk);
5219 return 0;
5220 }
5221
5222 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5223 tcp_paws_discard(sk, skb)) {
5224 if (!th->rst) {
5006 goto reset_and_undo;
5007 }
5008
5009 /* Now ACK is acceptable.
5010 *
5011 * "If the RST bit is set
5012 * If the ACK was acceptable then signal the user "error:
5013 * connection reset", drop the segment, enter CLOSED state,

--- 267 unchanged lines hidden (view full) ---

5281 __kfree_skb(skb);
5282 tcp_data_snd_check(sk);
5283 return 0;
5284 }
5285
5286 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5287 tcp_paws_discard(sk, skb)) {
5288 if (!th->rst) {
5225 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
5289 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5226 tcp_send_dupack(sk, skb);
5227 goto discard;
5228 }
5229 /* Reset is accepted even if it did not pass PAWS. */
5230 }
5231
5232 /* step 1: check sequence number */
5233 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {

--- 12 unchanged lines hidden (view full) ---

5246
5247 /* step 3: check security and precedence [ignored] */
5248
5249 /* step 4:
5250 *
5251 * Check for a SYN in window.
5252 */
5253 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5290 tcp_send_dupack(sk, skb);
5291 goto discard;
5292 }
5293 /* Reset is accepted even if it did not pass PAWS. */
5294 }
5295
5296 /* step 1: check sequence number */
5297 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {

--- 12 unchanged lines hidden (view full) ---

5310
5311 /* step 3: check security and precedence [ignored] */
5312
5313 /* step 4:
5314 *
5315 * Check for a SYN in window.
5316 */
5317 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5254 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
5318 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5255 tcp_reset(sk);
5256 return 1;
5257 }
5258
5259 /* step 5: check the ACK field */
5260 if (th->ack) {
5261 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
5262

--- 65 unchanged lines hidden (view full) ---

5328 sk->sk_state_change(sk);
5329 else {
5330 int tmo;
5331
5332 if (tp->linger2 < 0 ||
5333 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5334 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5335 tcp_done(sk);
5319 tcp_reset(sk);
5320 return 1;
5321 }
5322
5323 /* step 5: check the ACK field */
5324 if (th->ack) {
5325 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
5326

--- 65 unchanged lines hidden (view full) ---

5392 sk->sk_state_change(sk);
5393 else {
5394 int tmo;
5395
5396 if (tp->linger2 < 0 ||
5397 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5398 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5399 tcp_done(sk);
5336 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
5400 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5337 return 1;
5338 }
5339
5340 tmo = tcp_fin_time(sk);
5341 if (tmo > TCP_TIMEWAIT_LEN) {
5342 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
5343 } else if (th->fin || sock_owned_by_user(sk)) {
5344 /* Bad case. We could lose such FIN otherwise.

--- 43 unchanged lines hidden (view full) ---

5388 case TCP_FIN_WAIT2:
5389 /* RFC 793 says to queue data in these states,
5390 * RFC 1122 says we MUST send a reset.
5391 * BSD 4.4 also does reset.
5392 */
5393 if (sk->sk_shutdown & RCV_SHUTDOWN) {
5394 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5395 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
5401 return 1;
5402 }
5403
5404 tmo = tcp_fin_time(sk);
5405 if (tmo > TCP_TIMEWAIT_LEN) {
5406 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
5407 } else if (th->fin || sock_owned_by_user(sk)) {
5408 /* Bad case. We could lose such FIN otherwise.

--- 43 unchanged lines hidden (view full) ---

5452 case TCP_FIN_WAIT2:
5453 /* RFC 793 says to queue data in these states,
5454 * RFC 1122 says we MUST send a reset.
5455 * BSD 4.4 also does reset.
5456 */
5457 if (sk->sk_shutdown & RCV_SHUTDOWN) {
5458 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5459 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
5396 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
5460 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5397 tcp_reset(sk);
5398 return 1;
5399 }
5400 }
5401 /* Fall through */
5402 case TCP_ESTABLISHED:
5403 tcp_data_queue(sk, skb);
5404 queued = 1;

--- 12 unchanged lines hidden (view full) ---

5417 }
5418 return 0;
5419}
5420
5421EXPORT_SYMBOL(sysctl_tcp_ecn);
5422EXPORT_SYMBOL(sysctl_tcp_reordering);
5423EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5424EXPORT_SYMBOL(tcp_parse_options);
5461 tcp_reset(sk);
5462 return 1;
5463 }
5464 }
5465 /* Fall through */
5466 case TCP_ESTABLISHED:
5467 tcp_data_queue(sk, skb);
5468 queued = 1;

--- 12 unchanged lines hidden (view full) ---

5481 }
5482 return 0;
5483}
5484
5485EXPORT_SYMBOL(sysctl_tcp_ecn);
5486EXPORT_SYMBOL(sysctl_tcp_reordering);
5487EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5488EXPORT_SYMBOL(tcp_parse_options);
5489#ifdef CONFIG_TCP_MD5SIG
5490EXPORT_SYMBOL(tcp_parse_md5sig_option);
5491#endif
5425EXPORT_SYMBOL(tcp_rcv_established);
5426EXPORT_SYMBOL(tcp_rcv_state_process);
5427EXPORT_SYMBOL(tcp_initialize_rcv_mss);
5492EXPORT_SYMBOL(tcp_rcv_established);
5493EXPORT_SYMBOL(tcp_rcv_state_process);
5494EXPORT_SYMBOL(tcp_initialize_rcv_mss);