/openbmc/linux/net/ipv4/ |
H A D | bpf_tcp_ca.c | 98 case offsetof(struct tcp_sock, snd_cwnd): in bpf_tcp_ca_btf_struct_access() 99 end = offsetofend(struct tcp_sock, snd_cwnd); in bpf_tcp_ca_btf_struct_access() 101 case offsetof(struct tcp_sock, snd_cwnd_cnt): in bpf_tcp_ca_btf_struct_access() 102 end = offsetofend(struct tcp_sock, snd_cwnd_cnt); in bpf_tcp_ca_btf_struct_access() 104 case offsetof(struct tcp_sock, snd_ssthresh): in bpf_tcp_ca_btf_struct_access() 105 end = offsetofend(struct tcp_sock, snd_ssthresh); in bpf_tcp_ca_btf_struct_access() 107 case offsetof(struct tcp_sock, ecn_flags): in bpf_tcp_ca_btf_struct_access() 108 end = offsetofend(struct tcp_sock, ecn_flags); in bpf_tcp_ca_btf_struct_access() 110 case offsetof(struct tcp_sock, app_limited): in bpf_tcp_ca_btf_struct_access() 111 end = offsetofend(struct tcp_sock, app_limited); in bpf_tcp_ca_btf_struct_access() [all …]
|
H A D | tcp_recovery.c | 7 const struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_wnd() 32 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) in tcp_rack_skb_timeout() 60 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_detect_loss() 97 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_mark_lost() 118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance() 151 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_timeout() 189 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_update_reo_wnd() 220 struct tcp_sock *tp = tcp_sk(sk); in tcp_newreno_mark_lost()
|
H A D | tcp_input.c | 252 struct tcp_sock *tp = tcp_sk(sk); in tcp_measure_rcv_mss() 346 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr() 366 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() 373 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() 409 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_synack() 415 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_syn() 421 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_ecn_echo() 435 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() 494 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() 531 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() [all …]
|
H A D | tcp_dctcp.c | 81 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) in dctcp_reset() 91 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init() 121 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh() 129 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_update_alpha() 177 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss() 219 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_get_info() 246 struct tcp_sock *tp = tcp_sk(sk); in dctcp_cwnd_undo()
|
H A D | tcp_output.c | 53 void tcp_mstamp_refresh(struct tcp_sock *tp) in tcp_mstamp_refresh() 68 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() 97 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() 123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() 144 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart() 161 static void tcp_event_data_sent(struct tcp_sock *tp, in tcp_event_data_sent() 182 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent() 260 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window() 323 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack() 336 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn() [all …]
|
H A D | tcp_bbr.c | 268 struct tcp_sock *tp = tcp_sk(sk); in bbr_init_pacing_rate_from_rtt() 287 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_pacing_rate() 305 struct tcp_sock *tp = tcp_sk(sk); in bbr_tso_segs_goal() 322 struct tcp_sock *tp = tcp_sk(sk); in bbr_save_cwnd() 333 struct tcp_sock *tp = tcp_sk(sk); in bbr_cwnd_event() 438 struct tcp_sock *tp = tcp_sk(sk); in bbr_packets_in_net_at_edt() 482 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd_to_recover_or_restore() 521 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd() 556 struct tcp_sock *tp = tcp_sk(sk); in bbr_is_next_cycle_phase() 592 struct tcp_sock *tp = tcp_sk(sk); in bbr_advance_cycle_phase() [all …]
|
H A D | tcp_timer.c | 102 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources() 232 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout() 297 struct tcp_sock *tp = tcp_sk(sk); in tcp_delack_timer_handler() 368 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer() 417 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rto_stats() 434 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_synack_timer() 471 const struct tcp_sock *tp = tcp_sk(sk); in tcp_rtx_probe0_timed_out() 513 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_timer() 744 struct tcp_sock *tp = tcp_sk(sk); in tcp_keepalive_timer() 829 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer); in tcp_compressed_ack_kick()
|
H A D | tcp_cdg.c | 143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update() 244 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_backoff() 265 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cong_avoid() 302 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_acked() 331 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_ssthresh() 348 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cwnd_event() 376 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_init()
|
H A D | tcp_rate.c | 42 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_sent() 83 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_delivered() 120 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_gen() 196 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_check_app_limited()
|
H A D | tcp_highspeed.c | 102 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init() 114 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid() 153 const struct tcp_sock *tp = tcp_sk(sk); in hstcp_ssthresh()
|
H A D | tcp_scalable.c | 20 struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_cong_avoid() 36 const struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_ssthresh()
|
H A D | tcp_westwood.c | 165 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw() 182 const struct tcp_sock *tp = tcp_sk(sk); in westwood_acked_count() 219 const struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_bw_rttmin() 242 struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_event()
|
H A D | tcp_illinois.c | 59 struct tcp_sock *tp = tcp_sk(sk); in rtt_reset() 224 struct tcp_sock *tp = tcp_sk(sk); in update_params() 262 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_cong_avoid() 297 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_ssthresh()
|
H A D | tcp_yeah.c | 43 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init() 60 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid() 189 const struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_ssthresh()
|
H A D | tcp_vegas.c | 73 const struct tcp_sock *tp = tcp_sk(sk); in vegas_enable() 160 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) in tcp_vegas_ssthresh() 167 struct tcp_sock *tp = tcp_sk(sk); in tcp_vegas_cong_avoid()
|
H A D | tcp_minisocks.c | 246 const struct tcp_sock *tp = tcp_sk(sk); in tcp_time_wait_init() 283 const struct tcp_sock *tp = tcp_sk(sk); in tcp_time_wait() 390 const struct tcp_sock *tp = tcp_sk(sk_listener); in tcp_openreq_init_rwin() 425 static void tcp_ecn_openreq_child(struct tcp_sock *tp, in tcp_ecn_openreq_child() 460 static void smc_check_reset_syn_req(const struct tcp_sock *oldtp, in smc_check_reset_syn_req() 462 struct tcp_sock *newtp) in smc_check_reset_syn_req() 489 const struct tcp_sock *oldtp; in tcp_create_openreq_child() 490 struct tcp_sock *newtp; in tcp_create_openreq_child()
|
/openbmc/linux/tools/testing/selftests/bpf/progs/ |
H A D | tcp_ca_write_sk_pacing.c | 14 static inline struct tcp_sock *tcp_sk(const struct sock *sk) in tcp_sk() 16 return (struct tcp_sock *)sk; in tcp_sk() 19 static inline unsigned int tcp_left_out(const struct tcp_sock *tp) in tcp_left_out() 24 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) in tcp_packets_in_flight() 44 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG()
|
H A D | bpf_dctcp.c | 51 static __always_inline void dctcp_reset(const struct tcp_sock *tp, in dctcp_reset() 63 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 111 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 120 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() 150 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss() 169 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ece_ack_cwr()
|
H A D | tcp_ca_incompl_cong_ops.c | 10 static inline struct tcp_sock *tcp_sk(const struct sock *sk) in tcp_sk() 12 return (struct tcp_sock *)sk; in tcp_sk()
|
H A D | tcp_ca_update.c | 13 static inline struct tcp_sock *tcp_sk(const struct sock *sk) in tcp_sk() 15 return (struct tcp_sock *)sk; in tcp_sk()
|
H A D | cgrp_ls_attach_cgroup.c | 27 struct tcp_sock *tcp_sk; in set_cookie() 56 struct tcp_sock *tcp_sk; in update_cookie_sockops()
|
/openbmc/linux/tools/testing/selftests/bpf/ |
H A D | bpf_tcp_helpers.h | 62 struct tcp_sock { struct 101 static __always_inline struct tcp_sock *tcp_sk(const struct sock *sk) in tcp_sk() argument 103 return (struct tcp_sock *)sk; in tcp_sk() 200 static __always_inline bool tcp_in_slow_start(const struct tcp_sock *tp) in tcp_in_slow_start() 207 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited() 230 extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym; 231 extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
|
/openbmc/linux/include/net/ |
H A D | tcp.h | 395 void tcp_clear_retrans(struct tcp_sock *tp); 593 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb); 665 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) in tcp_bound_to_half_wnd() 712 static inline u32 __tcp_set_rto(const struct tcp_sock *tp) in __tcp_set_rto() 717 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) in __tcp_fast_path_on() 728 static inline void tcp_fast_path_on(struct tcp_sock *tp) in tcp_fast_path_on() 735 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check() 768 static inline u32 tcp_min_rtt(const struct tcp_sock *tp) in tcp_min_rtt() 777 static inline u32 tcp_receive_window(const struct tcp_sock *tp) in tcp_receive_window() 818 static inline u32 tcp_time_stamp(const struct tcp_sock *tp) in tcp_time_stamp() [all …]
|
/openbmc/linux/include/linux/ |
H A D | tcp.h | 177 struct tcp_sock { struct 485 #define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk) argument 490 #define tcp_sk_rw(ptr) container_of(ptr, struct tcp_sock, inet_conn.icsk_inet.sk) 529 static inline void tcp_move_syn(struct tcp_sock *tp, in tcp_move_syn() 536 static inline void tcp_saved_syn_free(struct tcp_sock *tp) in tcp_saved_syn_free() 552 static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) in tcp_mss_clamp()
|
/openbmc/linux/net/mptcp/ |
H A D | fastopen.c | 14 struct tcp_sock *tp; in mptcp_fastopen_subflow_synack_set_params() 22 ssk = subflow->tcp_sock; in mptcp_fastopen_subflow_synack_set_params()
|