tcp_ipv4.c (ade9628ed049242fac5dd94730881f8c5e244634) tcp_ipv4.c (4f693b55c3d2d2239b8a0094b518a1e533cf75d5)
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions

--- 1605 unchanged lines hidden (view full) ---

1614 }
1615 }
1616 return 0;
1617}
1618
1619bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1620{
1621 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions

--- 1605 unchanged lines hidden (view full) ---

1614 }
1615 }
1616 return 0;
1617}
1618
1619bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1620{
1621 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1622 struct skb_shared_info *shinfo;
1623 const struct tcphdr *th;
1624 struct tcphdr *thtail;
1625 struct sk_buff *tail;
1626 unsigned int hdrlen;
1627 bool fragstolen;
1628 u32 gso_segs;
1629 int delta;
1622
1630
1623 /* Only socket owner can try to collapse/prune rx queues
1624 * to reduce memory overhead, so add a little headroom here.
1625 * Few sockets backlog are possibly concurrently non empty.
1626 */
1627 limit += 64*1024;
1628
1629 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1630 * we can fix skb->truesize to its real value to avoid future drops.
1631 * This is valid because skb is not yet charged to the socket.
1632 * It has been noticed pure SACK packets were sometimes dropped
1633 * (if cooked by drivers without copybreak feature).
1634 */
1635 skb_condense(skb);
1636
1637 skb_dst_drop(skb);
1638
1631 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1632 * we can fix skb->truesize to its real value to avoid future drops.
1633 * This is valid because skb is not yet charged to the socket.
1634 * It has been noticed pure SACK packets were sometimes dropped
1635 * (if cooked by drivers without copybreak feature).
1636 */
1637 skb_condense(skb);
1638
1639 skb_dst_drop(skb);
1640
1641 if (unlikely(tcp_checksum_complete(skb))) {
1642 bh_unlock_sock(sk);
1643 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1644 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1645 return true;
1646 }
1647
1648 /* Attempt coalescing to last skb in backlog, even if we are
1649 * above the limits.
1650 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1651 */
1652 th = (const struct tcphdr *)skb->data;
1653 hdrlen = th->doff * 4;
1654 shinfo = skb_shinfo(skb);
1655
1656 if (!shinfo->gso_size)
1657 shinfo->gso_size = skb->len - hdrlen;
1658
1659 if (!shinfo->gso_segs)
1660 shinfo->gso_segs = 1;
1661
1662 tail = sk->sk_backlog.tail;
1663 if (!tail)
1664 goto no_coalesce;
1665 thtail = (struct tcphdr *)tail->data;
1666
1667 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1668 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1669 ((TCP_SKB_CB(tail)->tcp_flags |
1670 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
1671 ((TCP_SKB_CB(tail)->tcp_flags ^
1672 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1673#ifdef CONFIG_TLS_DEVICE
1674 tail->decrypted != skb->decrypted ||
1675#endif
1676 thtail->doff != th->doff ||
1677 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1678 goto no_coalesce;
1679
1680 __skb_pull(skb, hdrlen);
1681 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1682 thtail->window = th->window;
1683
1684 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1685
1686 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1687 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1688
1689 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1690
1691 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1692 TCP_SKB_CB(tail)->has_rxtstamp = true;
1693 tail->tstamp = skb->tstamp;
1694 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1695 }
1696
1697 /* Not as strict as GRO. We only need to carry mss max value */
1698 skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1699 skb_shinfo(tail)->gso_size);
1700
1701 gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1702 skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1703
1704 sk->sk_backlog.len += delta;
1705 __NET_INC_STATS(sock_net(sk),
1706 LINUX_MIB_TCPBACKLOGCOALESCE);
1707 kfree_skb_partial(skb, fragstolen);
1708 return false;
1709 }
1710 __skb_push(skb, hdrlen);
1711
1712no_coalesce:
1713 /* Only socket owner can try to collapse/prune rx queues
1714 * to reduce memory overhead, so add a little headroom here.
1715 * Few sockets backlog are possibly concurrently non empty.
1716 */
1717 limit += 64*1024;
1718
1639 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1640 bh_unlock_sock(sk);
1641 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1642 return true;
1643 }
1644 return false;
1645}
1646EXPORT_SYMBOL(tcp_add_backlog);

--- 992 unchanged lines hidden ---
1719 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1720 bh_unlock_sock(sk);
1721 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1722 return true;
1723 }
1724 return false;
1725}
1726EXPORT_SYMBOL(tcp_add_backlog);

--- 992 unchanged lines hidden ---