tcp_input.c (af9cc93c0dee5fc1f9fa32cd9d79a456738a21be) tcp_input.c (90bbcc608369a1b46089b0f5aa22b8ea31ffa12e)
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro

--- 2239 unchanged lines hidden (view full) ---

2248 int sacked_upto = tp->sacked_out - tp->reordering;
2249 if (sacked_upto >= 0)
2250 tcp_mark_head_lost(sk, sacked_upto, 0);
2251 else if (fast_rexmit)
2252 tcp_mark_head_lost(sk, 1, 1);
2253 }
2254}
2255
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro

--- 2239 unchanged lines hidden (view full) ---

2248 int sacked_upto = tp->sacked_out - tp->reordering;
2249 if (sacked_upto >= 0)
2250 tcp_mark_head_lost(sk, sacked_upto, 0);
2251 else if (fast_rexmit)
2252 tcp_mark_head_lost(sk, 1, 1);
2253 }
2254}
2255
2256/* CWND moderation, preventing bursts due to too big ACKs
2257 * in dubious situations.
2258 */
2259static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
2260{
2261 tp->snd_cwnd = min(tp->snd_cwnd,
2262 tcp_packets_in_flight(tp) + tcp_max_burst(tp));
2263 tp->snd_cwnd_stamp = tcp_time_stamp;
2264}
2265
2266static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
2267{
2268 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2269 before(tp->rx_opt.rcv_tsecr, when);
2270}
2271
2272/* skb is spurious retransmitted if the returned timestamp echo
2273 * reply is prior to the skb transmission time

--- 132 unchanged lines hidden (view full) ---

2406 mib_idx = LINUX_MIB_TCPFULLUNDO;
2407
2408 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2409 }
2410 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2411 /* Hold old state until something *above* high_seq
2412 * is ACKed. For Reno it is MUST to prevent false
2413 * fast retransmits (RFC2582). SACK TCP is safe. */
2256static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
2257{
2258 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2259 before(tp->rx_opt.rcv_tsecr, when);
2260}
2261
2262/* skb is spurious retransmitted if the returned timestamp echo
2263 * reply is prior to the skb transmission time

--- 132 unchanged lines hidden (view full) ---

2396 mib_idx = LINUX_MIB_TCPFULLUNDO;
2397
2398 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2399 }
2400 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2401 /* Hold old state until something *above* high_seq
2402 * is ACKed. For Reno it is MUST to prevent false
2403 * fast retransmits (RFC2582). SACK TCP is safe. */
2414 tcp_moderate_cwnd(tp);
2415 if (!tcp_any_retrans_done(sk))
2416 tp->retrans_stamp = 0;
2417 return true;
2418 }
2419 tcp_set_ca_state(sk, TCP_CA_Open);
2420 return false;
2421}
2422

--- 666 unchanged lines hidden (view full) ---

3089}
3090
3091static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3092 u32 prior_snd_una)
3093{
3094 const struct skb_shared_info *shinfo;
3095
3096 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
2404 if (!tcp_any_retrans_done(sk))
2405 tp->retrans_stamp = 0;
2406 return true;
2407 }
2408 tcp_set_ca_state(sk, TCP_CA_Open);
2409 return false;
2410}
2411

--- 666 unchanged lines hidden (view full) ---

3078}
3079
3080static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3081 u32 prior_snd_una)
3082{
3083 const struct skb_shared_info *shinfo;
3084
3085 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
3097 if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)))
3086 if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
3098 return;
3099
3100 shinfo = skb_shinfo(skb);
3101 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
3102 !before(shinfo->tskey, prior_snd_una) &&
3103 before(shinfo->tskey, tcp_sk(sk)->snd_una))
3104 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3105}

--- 1209 unchanged lines hidden (view full) ---

4315 sk_mem_charge(sk, delta);
4316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4317 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4318 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4319 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
4320 return true;
4321}
4322
3087 return;
3088
3089 shinfo = skb_shinfo(skb);
3090 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
3091 !before(shinfo->tskey, prior_snd_una) &&
3092 before(shinfo->tskey, tcp_sk(sk)->snd_una))
3093 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3094}

--- 1209 unchanged lines hidden (view full) ---

4304 sk_mem_charge(sk, delta);
4305 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4306 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4307 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4308 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
4309 return true;
4310}
4311
4312static void tcp_drop(struct sock *sk, struct sk_buff *skb)
4313{
4314 sk_drops_add(sk, skb);
4315 __kfree_skb(skb);
4316}
4317
4323/* This one checks to see if we can put data from the
4324 * out_of_order queue into the receive_queue.
4325 */
4326static void tcp_ofo_queue(struct sock *sk)
4327{
4328 struct tcp_sock *tp = tcp_sk(sk);
4329 __u32 dsack_high = tp->rcv_nxt;
4330 struct sk_buff *skb, *tail;

--- 8 unchanged lines hidden (view full) ---

4339 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
4340 dsack_high = TCP_SKB_CB(skb)->end_seq;
4341 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
4342 }
4343
4344 __skb_unlink(skb, &tp->out_of_order_queue);
4345 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4346 SOCK_DEBUG(sk, "ofo packet was already received\n");
4318/* This one checks to see if we can put data from the
4319 * out_of_order queue into the receive_queue.
4320 */
4321static void tcp_ofo_queue(struct sock *sk)
4322{
4323 struct tcp_sock *tp = tcp_sk(sk);
4324 __u32 dsack_high = tp->rcv_nxt;
4325 struct sk_buff *skb, *tail;

--- 8 unchanged lines hidden (view full) ---

4334 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
4335 dsack_high = TCP_SKB_CB(skb)->end_seq;
4336 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
4337 }
4338
4339 __skb_unlink(skb, &tp->out_of_order_queue);
4340 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4341 SOCK_DEBUG(sk, "ofo packet was already received\n");
4347 __kfree_skb(skb);
4342 tcp_drop(sk, skb);
4348 continue;
4349 }
4350 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
4351 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4352 TCP_SKB_CB(skb)->end_seq);
4353
4354 tail = skb_peek_tail(&sk->sk_receive_queue);
4355 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);

--- 35 unchanged lines hidden (view full) ---

4391 struct tcp_sock *tp = tcp_sk(sk);
4392 struct sk_buff *skb1;
4393 u32 seq, end_seq;
4394
4395 tcp_ecn_check_ce(tp, skb);
4396
4397 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4398 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
4343 continue;
4344 }
4345 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
4346 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4347 TCP_SKB_CB(skb)->end_seq);
4348
4349 tail = skb_peek_tail(&sk->sk_receive_queue);
4350 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);

--- 35 unchanged lines hidden (view full) ---

4386 struct tcp_sock *tp = tcp_sk(sk);
4387 struct sk_buff *skb1;
4388 u32 seq, end_seq;
4389
4390 tcp_ecn_check_ce(tp, skb);
4391
4392 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4393 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
4399 __kfree_skb(skb);
4394 tcp_drop(sk, skb);
4400 return;
4401 }
4402
4403 /* Disable header prediction. */
4404 tp->pred_flags = 0;
4405 inet_csk_schedule_ack(sk);
4406
4407 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);

--- 47 unchanged lines hidden (view full) ---

4455 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4456 }
4457
4458 /* Do skb overlap to previous one? */
4459 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4460 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4461 /* All the bits are present. Drop. */
4462 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4395 return;
4396 }
4397
4398 /* Disable header prediction. */
4399 tp->pred_flags = 0;
4400 inet_csk_schedule_ack(sk);
4401
4402 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);

--- 47 unchanged lines hidden (view full) ---

4450 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4451 }
4452
4453 /* Do skb overlap to previous one? */
4454 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4455 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4456 /* All the bits are present. Drop. */
4457 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4463 __kfree_skb(skb);
4458 tcp_drop(sk, skb);
4464 skb = NULL;
4465 tcp_dsack_set(sk, seq, end_seq);
4466 goto add_sack;
4467 }
4468 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4469 /* Partial overlap. */
4470 tcp_dsack_set(sk, seq,
4471 TCP_SKB_CB(skb1)->end_seq);

--- 22 unchanged lines hidden (view full) ---

4494 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4495 end_seq);
4496 break;
4497 }
4498 __skb_unlink(skb1, &tp->out_of_order_queue);
4499 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4500 TCP_SKB_CB(skb1)->end_seq);
4501 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4459 skb = NULL;
4460 tcp_dsack_set(sk, seq, end_seq);
4461 goto add_sack;
4462 }
4463 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4464 /* Partial overlap. */
4465 tcp_dsack_set(sk, seq,
4466 TCP_SKB_CB(skb1)->end_seq);

--- 22 unchanged lines hidden (view full) ---

4489 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4490 end_seq);
4491 break;
4492 }
4493 __skb_unlink(skb1, &tp->out_of_order_queue);
4494 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4495 TCP_SKB_CB(skb1)->end_seq);
4496 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4502 __kfree_skb(skb1);
4497 tcp_drop(sk, skb1);
4503 }
4504
4505add_sack:
4506 if (tcp_is_sack(tp))
4507 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4508end:
4509 if (skb) {
4510 tcp_grow_window(sk, skb);

--- 66 unchanged lines hidden (view full) ---

4577err:
4578 return err;
4579
4580}
4581
4582static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4583{
4584 struct tcp_sock *tp = tcp_sk(sk);
4498 }
4499
4500add_sack:
4501 if (tcp_is_sack(tp))
4502 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4503end:
4504 if (skb) {
4505 tcp_grow_window(sk, skb);

--- 66 unchanged lines hidden (view full) ---

4572err:
4573 return err;
4574
4575}
4576
4577static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4578{
4579 struct tcp_sock *tp = tcp_sk(sk);
4585 int eaten = -1;
4586 bool fragstolen = false;
4580 bool fragstolen = false;
4581 int eaten = -1;
4587
4582
4588 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
4589 goto drop;
4590
4583 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
4584 __kfree_skb(skb);
4585 return;
4586 }
4591 skb_dst_drop(skb);
4592 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
4593
4594 tcp_ecn_accept_cwr(tp, skb);
4595
4596 tp->rx_opt.dsack = 0;
4597
4598 /* Queue data for delivery to the user.

--- 65 unchanged lines hidden (view full) ---

4664 /* A retransmit, 2nd most common case. Force an immediate ack. */
4665 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4666 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4667
4668out_of_window:
4669 tcp_enter_quickack_mode(sk);
4670 inet_csk_schedule_ack(sk);
4671drop:
4587 skb_dst_drop(skb);
4588 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
4589
4590 tcp_ecn_accept_cwr(tp, skb);
4591
4592 tp->rx_opt.dsack = 0;
4593
4594 /* Queue data for delivery to the user.

--- 65 unchanged lines hidden (view full) ---

4660 /* A retransmit, 2nd most common case. Force an immediate ack. */
4661 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4662 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4663
4664out_of_window:
4665 tcp_enter_quickack_mode(sk);
4666 inet_csk_schedule_ack(sk);
4667drop:
4672 __kfree_skb(skb);
4668 tcp_drop(sk, skb);
4673 return;
4674 }
4675
4676 /* Out of window. F.e. zero window probe. */
4677 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
4678 goto out_of_window;
4679
4680 tcp_enter_quickack_mode(sk);

--- 551 unchanged lines hidden (view full) ---

5232 /* step 3: check security and precedence [ignored] */
5233
5234 /* step 4: Check for a SYN
5235 * RFC 5961 4.2 : Send a challenge ack
5236 */
5237 if (th->syn) {
5238syn_challenge:
5239 if (syn_inerr)
4669 return;
4670 }
4671
4672 /* Out of window. F.e. zero window probe. */
4673 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
4674 goto out_of_window;
4675
4676 tcp_enter_quickack_mode(sk);

--- 551 unchanged lines hidden (view full) ---

5228 /* step 3: check security and precedence [ignored] */
5229
5230 /* step 4: Check for a SYN
5231 * RFC 5961 4.2 : Send a challenge ack
5232 */
5233 if (th->syn) {
5234syn_challenge:
5235 if (syn_inerr)
5240 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
5236 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5241 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5242 tcp_send_challenge_ack(sk, skb);
5243 goto discard;
5244 }
5245
5246 return true;
5247
5248discard:
5237 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5238 tcp_send_challenge_ack(sk, skb);
5239 goto discard;
5240 }
5241
5242 return true;
5243
5244discard:
5249 __kfree_skb(skb);
5245 tcp_drop(sk, skb);
5250 return false;
5251}
5252
5253/*
5254 * TCP receive function for the ESTABLISHED state.
5255 *
5256 * It is split into a fast path and a slow path. The fast path is
5257 * disabled when:

--- 90 unchanged lines hidden (view full) ---

5348 /* We know that such packets are checksummed
5349 * on entry.
5350 */
5351 tcp_ack(sk, skb, 0);
5352 __kfree_skb(skb);
5353 tcp_data_snd_check(sk);
5354 return;
5355 } else { /* Header too small */
5246 return false;
5247}
5248
5249/*
5250 * TCP receive function for the ESTABLISHED state.
5251 *
5252 * It is split into a fast path and a slow path. The fast path is
5253 * disabled when:

--- 90 unchanged lines hidden (view full) ---

5344 /* We know that such packets are checksummed
5345 * on entry.
5346 */
5347 tcp_ack(sk, skb, 0);
5348 __kfree_skb(skb);
5349 tcp_data_snd_check(sk);
5350 return;
5351 } else { /* Header too small */
5356 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
5352 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5357 goto discard;
5358 }
5359 } else {
5360 int eaten = 0;
5361 bool fragstolen = false;
5362
5363 if (tp->ucopy.task == current &&
5364 tp->copied_seq == tp->rcv_nxt &&

--- 90 unchanged lines hidden (view full) ---

5455 /* step 7: process the segment text */
5456 tcp_data_queue(sk, skb);
5457
5458 tcp_data_snd_check(sk);
5459 tcp_ack_snd_check(sk);
5460 return;
5461
5462csum_error:
5353 goto discard;
5354 }
5355 } else {
5356 int eaten = 0;
5357 bool fragstolen = false;
5358
5359 if (tp->ucopy.task == current &&
5360 tp->copied_seq == tp->rcv_nxt &&

--- 90 unchanged lines hidden (view full) ---

5451 /* step 7: process the segment text */
5452 tcp_data_queue(sk, skb);
5453
5454 tcp_data_snd_check(sk);
5455 tcp_ack_snd_check(sk);
5456 return;
5457
5458csum_error:
5463 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
5464 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
5459 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
5460 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5465
5466discard:
5461
5462discard:
5467 __kfree_skb(skb);
5463 tcp_drop(sk, skb);
5468}
5469EXPORT_SYMBOL(tcp_rcv_established);
5470
5471void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5472{
5473 struct tcp_sock *tp = tcp_sk(sk);
5474 struct inet_connection_sock *icsk = inet_csk(sk);
5475

--- 68 unchanged lines hidden (view full) ---

5544 try_exp = tp->syn_fastopen_exp ? 2 : 1;
5545 }
5546
5547 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
5548
5549 if (data) { /* Retransmit unacked data in SYN */
5550 tcp_for_write_queue_from(data, sk) {
5551 if (data == tcp_send_head(sk) ||
5464}
5465EXPORT_SYMBOL(tcp_rcv_established);
5466
5467void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5468{
5469 struct tcp_sock *tp = tcp_sk(sk);
5470 struct inet_connection_sock *icsk = inet_csk(sk);
5471

--- 68 unchanged lines hidden (view full) ---

5540 try_exp = tp->syn_fastopen_exp ? 2 : 1;
5541 }
5542
5543 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
5544
5545 if (data) { /* Retransmit unacked data in SYN */
5546 tcp_for_write_queue_from(data, sk) {
5547 if (data == tcp_send_head(sk) ||
5552 __tcp_retransmit_skb(sk, data))
5548 __tcp_retransmit_skb(sk, data, 1))
5553 break;
5554 }
5555 tcp_rearm_rto(sk);
5556 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
5557 return true;
5558 }
5559 tp->syn_data_acked = tp->syn_data;
5560 if (tp->syn_data_acked)

--- 129 unchanged lines hidden (view full) ---

5690 */
5691 inet_csk_schedule_ack(sk);
5692 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5693 tcp_enter_quickack_mode(sk);
5694 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5695 TCP_DELACK_MAX, TCP_RTO_MAX);
5696
5697discard:
5549 break;
5550 }
5551 tcp_rearm_rto(sk);
5552 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
5553 return true;
5554 }
5555 tp->syn_data_acked = tp->syn_data;
5556 if (tp->syn_data_acked)

--- 129 unchanged lines hidden (view full) ---

5686 */
5687 inet_csk_schedule_ack(sk);
5688 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5689 tcp_enter_quickack_mode(sk);
5690 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5691 TCP_DELACK_MAX, TCP_RTO_MAX);
5692
5693discard:
5698 __kfree_skb(skb);
5694 tcp_drop(sk, skb);
5699 return 0;
5700 } else {
5701 tcp_send_ack(sk);
5702 }
5703 return -1;
5704 }
5705
5706 /* No ACK in the segment */

--- 90 unchanged lines hidden (view full) ---

5797{
5798 struct tcp_sock *tp = tcp_sk(sk);
5799 struct inet_connection_sock *icsk = inet_csk(sk);
5800 const struct tcphdr *th = tcp_hdr(skb);
5801 struct request_sock *req;
5802 int queued = 0;
5803 bool acceptable;
5804
5695 return 0;
5696 } else {
5697 tcp_send_ack(sk);
5698 }
5699 return -1;
5700 }
5701
5702 /* No ACK in the segment */

--- 90 unchanged lines hidden (view full) ---

5793{
5794 struct tcp_sock *tp = tcp_sk(sk);
5795 struct inet_connection_sock *icsk = inet_csk(sk);
5796 const struct tcphdr *th = tcp_hdr(skb);
5797 struct request_sock *req;
5798 int queued = 0;
5799 bool acceptable;
5800
5805 tp->rx_opt.saw_tstamp = 0;
5806
5807 switch (sk->sk_state) {
5808 case TCP_CLOSE:
5809 goto discard;
5810
5811 case TCP_LISTEN:
5812 if (th->ack)
5813 return 1;
5814
5815 if (th->rst)
5816 goto discard;
5817
5818 if (th->syn) {
5819 if (th->fin)
5820 goto discard;
5821 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
5822 return 1;
5823
5801 switch (sk->sk_state) {
5802 case TCP_CLOSE:
5803 goto discard;
5804
5805 case TCP_LISTEN:
5806 if (th->ack)
5807 return 1;
5808
5809 if (th->rst)
5810 goto discard;
5811
5812 if (th->syn) {
5813 if (th->fin)
5814 goto discard;
5815 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
5816 return 1;
5817
5824 /* Now we have several options: In theory there is
5825 * nothing else in the frame. KA9Q has an option to
5826 * send data with the syn, BSD accepts data with the
5827 * syn up to the [to be] advertised window and
5828 * Solaris 2.1 gives you a protocol error. For now
5829 * we just ignore it, that fits the spec precisely
5830 * and avoids incompatibilities. It would be nice in
5831 * future to drop through and process the data.
5832 *
5833 * Now that TTCP is starting to be used we ought to
5834 * queue this data.
5835 * But, this leaves one open to an easy denial of
5836 * service attack, and SYN cookies can't defend
5837 * against this problem. So, we drop the data
5838 * in the interest of security over speed unless
5839 * it's still in use.
5840 */
5841 kfree_skb(skb);
5818 consume_skb(skb);
5842 return 0;
5843 }
5844 goto discard;
5845
5846 case TCP_SYN_SENT:
5819 return 0;
5820 }
5821 goto discard;
5822
5823 case TCP_SYN_SENT:
5824 tp->rx_opt.saw_tstamp = 0;
5847 queued = tcp_rcv_synsent_state_process(sk, skb, th);
5848 if (queued >= 0)
5849 return queued;
5850
5851 /* Do step6 onward by hand. */
5852 tcp_urg(sk, skb, th);
5853 __kfree_skb(skb);
5854 tcp_data_snd_check(sk);
5855 return 0;
5856 }
5857
5825 queued = tcp_rcv_synsent_state_process(sk, skb, th);
5826 if (queued >= 0)
5827 return queued;
5828
5829 /* Do step6 onward by hand. */
5830 tcp_urg(sk, skb, th);
5831 __kfree_skb(skb);
5832 tcp_data_snd_check(sk);
5833 return 0;
5834 }
5835
5836 tp->rx_opt.saw_tstamp = 0;
5858 req = tp->fastopen_rsk;
5859 if (req) {
5860 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
5861 sk->sk_state != TCP_FIN_WAIT1);
5862
5863 if (!tcp_check_req(sk, skb, req, true))
5864 goto discard;
5865 }

--- 185 unchanged lines hidden (view full) ---

6051 /* tcp_data could move socket to TIME-WAIT */
6052 if (sk->sk_state != TCP_CLOSE) {
6053 tcp_data_snd_check(sk);
6054 tcp_ack_snd_check(sk);
6055 }
6056
6057 if (!queued) {
6058discard:
5837 req = tp->fastopen_rsk;
5838 if (req) {
5839 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
5840 sk->sk_state != TCP_FIN_WAIT1);
5841
5842 if (!tcp_check_req(sk, skb, req, true))
5843 goto discard;
5844 }

--- 185 unchanged lines hidden (view full) ---

6030 /* tcp_data could move socket to TIME-WAIT */
6031 if (sk->sk_state != TCP_CLOSE) {
6032 tcp_data_snd_check(sk);
6033 tcp_ack_snd_check(sk);
6034 }
6035
6036 if (!queued) {
6037discard:
6059 __kfree_skb(skb);
6038 tcp_drop(sk, skb);
6060 }
6061 return 0;
6062}
6063EXPORT_SYMBOL(tcp_rcv_state_process);
6064
6065static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
6066{
6067 struct inet_request_sock *ireq = inet_rsk(req);

--- 260 unchanged lines hidden (view full) ---

6328 tcp_rsk(req)->txhash = net_tx_rndhash();
6329 tcp_openreq_init_rwin(req, sk, dst);
6330 if (!want_cookie) {
6331 tcp_reqsk_record_syn(sk, req, skb);
6332 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
6333 }
6334 if (fastopen_sk) {
6335 af_ops->send_synack(fastopen_sk, dst, &fl, req,
6039 }
6040 return 0;
6041}
6042EXPORT_SYMBOL(tcp_rcv_state_process);
6043
6044static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
6045{
6046 struct inet_request_sock *ireq = inet_rsk(req);

--- 260 unchanged lines hidden (view full) ---

6307 tcp_rsk(req)->txhash = net_tx_rndhash();
6308 tcp_openreq_init_rwin(req, sk, dst);
6309 if (!want_cookie) {
6310 tcp_reqsk_record_syn(sk, req, skb);
6311 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
6312 }
6313 if (fastopen_sk) {
6314 af_ops->send_synack(fastopen_sk, dst, &fl, req,
6336 &foc, false);
6315 &foc, TCP_SYNACK_FASTOPEN);
6337 /* Add the child socket directly into the accept queue */
6338 inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
6339 sk->sk_data_ready(sk);
6340 bh_unlock_sock(fastopen_sk);
6341 sock_put(fastopen_sk);
6342 } else {
6343 tcp_rsk(req)->tfo_listener = false;
6344 if (!want_cookie)
6345 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
6316 /* Add the child socket directly into the accept queue */
6317 inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
6318 sk->sk_data_ready(sk);
6319 bh_unlock_sock(fastopen_sk);
6320 sock_put(fastopen_sk);
6321 } else {
6322 tcp_rsk(req)->tfo_listener = false;
6323 if (!want_cookie)
6324 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
6346 af_ops->send_synack(sk, dst, &fl, req,
6347 &foc, !want_cookie);
6348 if (want_cookie)
6349 goto drop_and_free;
6325 af_ops->send_synack(sk, dst, &fl, req, &foc,
6326 !want_cookie ? TCP_SYNACK_NORMAL :
6327 TCP_SYNACK_COOKIE);
6328 if (want_cookie) {
6329 reqsk_free(req);
6330 return 0;
6331 }
6350 }
6351 reqsk_put(req);
6352 return 0;
6353
6354drop_and_release:
6355 dst_release(dst);
6356drop_and_free:
6357 reqsk_free(req);
6358drop:
6332 }
6333 reqsk_put(req);
6334 return 0;
6335
6336drop_and_release:
6337 dst_release(dst);
6338drop_and_free:
6339 reqsk_free(req);
6340drop:
6359 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
6341 tcp_listendrop(sk);
6360 return 0;
6361}
6362EXPORT_SYMBOL(tcp_conn_request);
6342 return 0;
6343}
6344EXPORT_SYMBOL(tcp_conn_request);