Lines Matching refs:tp

7 	const struct tcp_sock *tp = tcp_sk(sk);  in tcp_rack_reo_wnd()  local
9 if (!tp->reord_seen) { in tcp_rack_reo_wnd()
16 if (tp->sacked_out >= tp->reordering && in tcp_rack_reo_wnd()
28 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, in tcp_rack_reo_wnd()
29 tp->srtt_us >> 3); in tcp_rack_reo_wnd()
32 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) in tcp_rack_skb_timeout() argument
34 return tp->rack.rtt_us + reo_wnd - in tcp_rack_skb_timeout()
35 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); in tcp_rack_skb_timeout()
60 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_detect_loss() local
66 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, in tcp_rack_detect_loss()
76 if (!tcp_skb_sent_after(tp->rack.mstamp, in tcp_rack_detect_loss()
78 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss()
84 remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd); in tcp_rack_detect_loss()
97 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_mark_lost() local
100 if (!tp->rack.advanced) in tcp_rack_mark_lost()
104 tp->rack.advanced = 0; in tcp_rack_mark_lost()
118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance() argument
123 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); in tcp_rack_advance()
124 if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) { in tcp_rack_advance()
137 tp->rack.advanced = 1; in tcp_rack_advance()
138 tp->rack.rtt_us = rtt_us; in tcp_rack_advance()
139 if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp, in tcp_rack_advance()
140 end_seq, tp->rack.end_seq)) { in tcp_rack_advance()
141 tp->rack.mstamp = xmit_time; in tcp_rack_advance()
142 tp->rack.end_seq = end_seq; in tcp_rack_advance()
151 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_timeout() local
153 u32 lost = tp->lost; in tcp_rack_reo_timeout()
155 prior_inflight = tcp_packets_in_flight(tp); in tcp_rack_reo_timeout()
157 if (prior_inflight != tcp_packets_in_flight(tp)) { in tcp_rack_reo_timeout()
161 tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0); in tcp_rack_reo_timeout()
189 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_update_reo_wnd() local
197 if (before(rs->prior_delivered, tp->rack.last_delivered)) in tcp_rack_update_reo_wnd()
198 tp->rack.dsack_seen = 0; in tcp_rack_update_reo_wnd()
201 if (tp->rack.dsack_seen) { in tcp_rack_update_reo_wnd()
202 tp->rack.reo_wnd_steps = min_t(u32, 0xFF, in tcp_rack_update_reo_wnd()
203 tp->rack.reo_wnd_steps + 1); in tcp_rack_update_reo_wnd()
204 tp->rack.dsack_seen = 0; in tcp_rack_update_reo_wnd()
205 tp->rack.last_delivered = tp->delivered; in tcp_rack_update_reo_wnd()
206 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH; in tcp_rack_update_reo_wnd()
207 } else if (!tp->rack.reo_wnd_persist) { in tcp_rack_update_reo_wnd()
208 tp->rack.reo_wnd_steps = 1; in tcp_rack_update_reo_wnd()
220 struct tcp_sock *tp = tcp_sk(sk); in tcp_newreno_mark_lost() local
222 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) || in tcp_newreno_mark_lost()