tcp.h (5800571960234f9d1f1011bf135799b2014d4268) tcp.h (a842fe1425cb20f457abd3f8ef98b468f83ca98b)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *

--- 1596 unchanged lines hidden (view full) ---

1605 size_t size;
1606 int copied; /* queued in tcp_connect() */
1607 struct ubuf_info *uarg;
1608};
1609void tcp_free_fastopen_req(struct tcp_sock *tp);
1610void tcp_fastopen_destroy_cipher(struct sock *sk);
1611void tcp_fastopen_ctx_destroy(struct net *net);
1612int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *

--- 1596 unchanged lines hidden (view full) ---

1605 size_t size;
1606 int copied; /* queued in tcp_connect() */
1607 struct ubuf_info *uarg;
1608};
1609void tcp_free_fastopen_req(struct tcp_sock *tp);
1610void tcp_fastopen_destroy_cipher(struct sock *sk);
1611void tcp_fastopen_ctx_destroy(struct net *net);
1612int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1613 void *key, unsigned int len);
1613 void *primary_key, void *backup_key,
1614 unsigned int len);
1614void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1615struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1616 struct request_sock *req,
1617 struct tcp_fastopen_cookie *foc,
1618 const struct dst_entry *dst);
1619void tcp_fastopen_init_key_once(struct net *net);
1620bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1621 struct tcp_fastopen_cookie *cookie);
1622bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1623#define TCP_FASTOPEN_KEY_LENGTH 16
1615void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1616struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1617 struct request_sock *req,
1618 struct tcp_fastopen_cookie *foc,
1619 const struct dst_entry *dst);
1620void tcp_fastopen_init_key_once(struct net *net);
1621bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1622 struct tcp_fastopen_cookie *cookie);
1623bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1624#define TCP_FASTOPEN_KEY_LENGTH 16
1625#define TCP_FASTOPEN_KEY_MAX 2
1626#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1627 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1624
1625/* Fastopen key context */
1626struct tcp_fastopen_context {
1628
1629/* Fastopen key context */
1630struct tcp_fastopen_context {
1627 struct crypto_cipher *tfm;
1628 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1631 struct crypto_cipher *tfm[TCP_FASTOPEN_KEY_MAX];
1632 __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
1629 struct rcu_head rcu;
1630};
1631
1632extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1633void tcp_fastopen_active_disable(struct sock *sk);
1634bool tcp_fastopen_active_should_disable(struct sock *sk);
1635void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1636void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1637
1633 struct rcu_head rcu;
1634};
1635
1636extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1637void tcp_fastopen_active_disable(struct sock *sk);
1638bool tcp_fastopen_active_should_disable(struct sock *sk);
1639void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1640void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1641
1642/* Caller needs to wrap with rcu_read_(un)lock() */
1643static inline
1644struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1645{
1646 struct tcp_fastopen_context *ctx;
1647
1648 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1649 if (!ctx)
1650 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1651 return ctx;
1652}
1653
1654static inline
1655bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1656 const struct tcp_fastopen_cookie *orig)
1657{
1658 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1659 orig->len == foc->len &&
1660 !memcmp(orig->val, foc->val, foc->len))
1661 return true;
1662 return false;
1663}
1664
1665static inline
1666int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1667{
1668 if (ctx->tfm[1])
1669 return 2;
1670 return 1;
1671}
1672
1638/* Latencies incurred by various limits for a sender. They are
1639 * chronograph-like stats that are mutually exclusive.
1640 */
1641enum tcp_chrono {
1642 TCP_CHRONO_UNSPEC,
1643 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1644 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1645 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */

--- 546 unchanged lines hidden (view full) ---

2192
2193#if IS_ENABLED(CONFIG_TLS_DEVICE)
2194void clean_acked_data_enable(struct inet_connection_sock *icsk,
2195 void (*cad)(struct sock *sk, u32 ack_seq));
2196void clean_acked_data_disable(struct inet_connection_sock *icsk);
2197void clean_acked_data_flush(void);
2198#endif
2199
1673/* Latencies incurred by various limits for a sender. They are
1674 * chronograph-like stats that are mutually exclusive.
1675 */
1676enum tcp_chrono {
1677 TCP_CHRONO_UNSPEC,
1678 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1679 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1680 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */

--- 546 unchanged lines hidden (view full) ---

2227
2228#if IS_ENABLED(CONFIG_TLS_DEVICE)
2229void clean_acked_data_enable(struct inet_connection_sock *icsk,
2230 void (*cad)(struct sock *sk, u32 ack_seq));
2231void clean_acked_data_disable(struct inet_connection_sock *icsk);
2232void clean_acked_data_flush(void);
2233#endif
2234
2235DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2236static inline void tcp_add_tx_delay(struct sk_buff *skb,
2237 const struct tcp_sock *tp)
2238{
2239 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2240 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2241}
2242
2243static inline void tcp_set_tx_time(struct sk_buff *skb,
2244 const struct sock *sk)
2245{
2246 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2247 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2248 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2249
2250 skb->skb_mstamp_ns = tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2251 }
2252}
2253
2200#endif /* _TCP_H */
2254#endif /* _TCP_H */