/openbmc/linux/net/ipv4/ |
H A D | tcp_input.c | 361 if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) in tcp_ecn_accept_cwr() 717 if (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts() 1020 u32 end_seq, struct tcp_sacktag_state *state) in tcp_dsack_seen() argument 1024 if (!before(start_seq, end_seq)) in tcp_dsack_seen() 1027 seq_len = end_seq - start_seq; in tcp_dsack_seen() 1033 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) in tcp_dsack_seen() 1243 u32 start_seq, u32 end_seq) in tcp_is_sackblock_valid() argument 1246 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid() 1263 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid() 1270 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid() [all …]
|
H A D | tcp_recovery.c | 78 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss() 118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance() argument 140 end_seq, tp->rack.end_seq)) { in tcp_rack_advance() 142 tp->rack.end_seq = end_seq; in tcp_rack_advance()
|
H A D | tcp_minisocks.c | 26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) in tcp_in_window() argument 30 if (after(end_seq, s_win) && before(seq, e_win)) in tcp_in_window() 32 return seq == e_win && seq == end_seq; in tcp_in_window() 110 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process() 124 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || in tcp_timewait_state_process() 125 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { in tcp_timewait_state_process() 134 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) in tcp_timewait_state_process() 139 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_timewait_state_process() 168 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { in tcp_timewait_state_process() 742 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_check_req() [all …]
|
H A D | tcp_output.c | 71 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent() 415 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb() 686 *ptr++ = htonl(sp[this_sack].end_seq); in tcp_options_write() 1404 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb() 1454 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb() 1598 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment() 1599 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment() 1625 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment() 1951 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update() 2119 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test() local [all …]
|
H A D | tcp_illinois.c | 49 u32 end_seq; /* right edge of current RTT */ member 62 ca->end_seq = tp->snd_nxt; in rtt_reset() 265 if (after(ack, ca->end_seq)) in tcp_illinois_cong_avoid()
|
H A D | tcp_rate.c | 93 scb->end_seq, rs->last_end_seq)) { in tcp_rate_skb_delivered() 99 rs->last_end_seq = scb->end_seq; in tcp_rate_skb_delivered()
|
H A D | tcp_cubic.c | 102 u32 end_seq; /* end_seq of the round */ member 124 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 392 if (after(tp->snd_una, ca->end_seq)) in hystart_update()
|
H A D | tcp_fastopen.c | 174 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb() 197 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb() 353 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; in tcp_try_fastopen()
|
H A D | tcp.c | 667 tcb->seq = tcb->end_seq = tp->write_seq; in tcp_skb_entail() 941 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in tcp_remove_empty_skb() 1275 TCP_SKB_CB(skb)->end_seq += copy; in tcp_sendmsg_locked() 1502 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), in tcp_cleanup_rbuf() 1504 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); in tcp_cleanup_rbuf() 2809 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; in __tcp_close() 3099 tp->duplicate_sack[0].end_seq = 0; in tcp_disconnect()
|
H A D | tcp_ipv4.c | 1868 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq || in tcp_add_backlog() 1895 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_add_backlog() 1983 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + in tcp_v4_fill_cb()
|
/openbmc/linux/net/mptcp/ |
H A D | fastopen.c | 47 MPTCP_SKB_CB(skb)->end_seq = 0; in mptcp_fastopen_subflow_synack_set_params() 70 WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq); in __mptcp_fastopen_gen_msk_ackseq() 73 MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq); in __mptcp_fastopen_gen_msk_ackseq() 75 MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq; in __mptcp_fastopen_gen_msk_ackseq()
|
H A D | protocol.c | 147 to->len, MPTCP_SKB_CB(from)->end_seq); in mptcp_try_coalesce() 148 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; in mptcp_try_coalesce() 164 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) in mptcp_ooo_try_coalesce() 216 u64 seq, end_seq, max_seq; in mptcp_data_queue_ofo() local 220 end_seq = MPTCP_SKB_CB(skb)->end_seq; in mptcp_data_queue_ofo() 225 if (after64(end_seq, max_seq)) { in mptcp_data_queue_ofo() 229 (unsigned long long)end_seq - (unsigned long)max_seq, in mptcp_data_queue_ofo() 254 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { in mptcp_data_queue_ofo() 270 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { in mptcp_data_queue_ofo() 271 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { in mptcp_data_queue_ofo() [all …]
|
H A D | options.c | 409 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; in mptcp_syn_options() 942 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && in check_fully_established() 1211 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in mptcp_incoming_options()
|
H A D | subflow.c | 1094 TCP_SKB_CB(skb)->end_seq, in get_mapping_status() 1232 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) in mptcp_subflow_discard_data()
|
H A D | protocol.h | 130 u64 end_seq; member
|
/openbmc/linux/net/netfilter/ |
H A D | nf_conntrack_seqadj.c | 94 if (after(ntohl(sack->end_seq) - seq->offset_before, in nf_ct_sack_block_adjust() 96 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 99 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 104 ntohl(sack->end_seq), ntohl(new_end_seq)); in nf_ct_sack_block_adjust() 109 sack->end_seq, new_end_seq, false); in nf_ct_sack_block_adjust() 111 sack->end_seq = new_end_seq; in nf_ct_sack_block_adjust()
|
/openbmc/linux/net/tls/ |
H A D | tls_device.c | 177 if (info && !before(acked_seq, info->end_seq)) in tls_icsk_clean_acked() 181 if (before(acked_seq, info->end_seq)) in tls_icsk_clean_acked() 294 record->end_seq = tp->write_seq + record->len; in tls_push_record() 624 before(seq, info->end_seq - info->len)) { in tls_get_record() 647 last->end_seq)) in tls_get_record() 656 if (before(seq, info->end_seq)) { in tls_get_record() 658 after(info->end_seq, in tls_get_record() 659 context->retransmit_hint->end_seq)) { in tls_get_record() 1133 start_marker_record->end_seq = tcp_sk(sk)->write_seq; in tls_set_device_offload()
|
/openbmc/linux/include/linux/ |
H A D | tcp.h | 99 __be32 end_seq; member 104 u32 end_seq; member 248 u32 end_seq; /* Ending TCP sequence of the skb */ member
|
/openbmc/linux/tools/testing/selftests/bpf/progs/ |
H A D | bpf_cubic.c | 89 __u32 end_seq; /* end_seq of the round */ member 167 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 390 if (hystart && after(ack, ca->end_seq)) in BPF_STRUCT_OPS()
|
/openbmc/linux/include/net/ |
H A D | tls.h | 142 u32 end_seq; member 345 return rec->end_seq - rec->len; in tls_record_start_seq()
|
H A D | tcp.h | 875 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ member 2212 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
|
/openbmc/linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/ |
H A D | chcr_ktls.c | 1744 tx_info->prev_seq = record->end_seq; in chcr_end_part_handler() 1999 tls_end_offset = record->end_seq - tcp_seq; in chcr_ktls_xmit() 2002 tcp_seq, record->end_seq, tx_info->prev_seq, data_len); in chcr_ktls_xmit() 2009 tx_max = record->end_seq - in chcr_ktls_xmit() 2054 tcp_seq = record->end_seq; in chcr_ktls_xmit()
|
/openbmc/linux/drivers/infiniband/hw/irdma/ |
H A D | puda.c | 1157 u32 marker_seq, end_seq, blk_start; in irdma_ieq_get_fpdu_len() local 1181 end_seq = rcv_seq + total_len; in irdma_ieq_get_fpdu_len() 1182 while ((int)(marker_seq - end_seq) < 0) { in irdma_ieq_get_fpdu_len() 1184 end_seq += marker_len; in irdma_ieq_get_fpdu_len()
|
/openbmc/linux/net/ipv6/ |
H A D | tcp_ipv6.c | 1533 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && in tcp_v6_do_rcv() 1569 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + in tcp_v6_fill_cb()
|
/openbmc/linux/net/sched/ |
H A D | sch_cake.c | 1085 u32 end_a = get_unaligned_be32(&sack_a->end_seq); in cake_tcph_sack_compare() 1097 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq); in cake_tcph_sack_compare()
|