xref: /openbmc/linux/net/dccp/ccids/ccid2.c (revision 1fa6ac37)
1 /*
2  *  Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
3  *
4  *  Changes to meet Linux coding standards, and DCCP infrastructure fixes.
5  *
6  *  Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 /*
24  * This implementation should follow RFC 4341
25  */
26 #include <linux/slab.h>
27 #include "../feat.h"
28 #include "../ccid.h"
29 #include "../dccp.h"
30 #include "ccid2.h"
31 
32 
33 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
34 static int ccid2_debug;
35 #define ccid2_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid2_debug, format, ##a)
36 
37 static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc)
38 {
39 	int len = 0;
40 	int pipe = 0;
41 	struct ccid2_seq *seqp = hc->tx_seqh;
42 
43 	/* there is data in the chain */
44 	if (seqp != hc->tx_seqt) {
45 		seqp = seqp->ccid2s_prev;
46 		len++;
47 		if (!seqp->ccid2s_acked)
48 			pipe++;
49 
50 		while (seqp != hc->tx_seqt) {
51 			struct ccid2_seq *prev = seqp->ccid2s_prev;
52 
53 			len++;
54 			if (!prev->ccid2s_acked)
55 				pipe++;
56 
57 			/* packets are sent sequentially */
58 			BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq,
59 						prev->ccid2s_seq ) >= 0);
60 			BUG_ON(time_before(seqp->ccid2s_sent,
61 					   prev->ccid2s_sent));
62 
63 			seqp = prev;
64 		}
65 	}
66 
67 	BUG_ON(pipe != hc->tx_pipe);
68 	ccid2_pr_debug("len of chain=%d\n", len);
69 
70 	do {
71 		seqp = seqp->ccid2s_prev;
72 		len++;
73 	} while (seqp != hc->tx_seqh);
74 
75 	ccid2_pr_debug("total len=%d\n", len);
76 	BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN);
77 }
78 #else
79 #define ccid2_pr_debug(format, a...)
80 #define ccid2_hc_tx_check_sanity(hc)
81 #endif
82 
83 static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
84 {
85 	struct ccid2_seq *seqp;
86 	int i;
87 
88 	/* check if we have space to preserve the pointer to the buffer */
89 	if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
90 			       sizeof(struct ccid2_seq *)))
91 		return -ENOMEM;
92 
93 	/* allocate buffer and initialize linked list */
94 	seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any());
95 	if (seqp == NULL)
96 		return -ENOMEM;
97 
98 	for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
99 		seqp[i].ccid2s_next = &seqp[i + 1];
100 		seqp[i + 1].ccid2s_prev = &seqp[i];
101 	}
102 	seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
103 	seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
104 
105 	/* This is the first allocation.  Initiate the head and tail.  */
106 	if (hc->tx_seqbufc == 0)
107 		hc->tx_seqh = hc->tx_seqt = seqp;
108 	else {
109 		/* link the existing list with the one we just created */
110 		hc->tx_seqh->ccid2s_next = seqp;
111 		seqp->ccid2s_prev = hc->tx_seqh;
112 
113 		hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
114 		seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
115 	}
116 
117 	/* store the original pointer to the buffer so we can free it */
118 	hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
119 	hc->tx_seqbufc++;
120 
121 	return 0;
122 }
123 
124 static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
125 {
126 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
127 
128 	if (hc->tx_pipe < hc->tx_cwnd)
129 		return 0;
130 
131 	return 1; /* XXX CCID should dequeue when ready instead of polling */
132 }
133 
134 static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
135 {
136 	struct dccp_sock *dp = dccp_sk(sk);
137 	u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
138 
139 	/*
140 	 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
141 	 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
142 	 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
143 	 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
144 	 */
145 	if (val == 0 || val > max_ratio) {
146 		DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
147 		val = max_ratio;
148 	}
149 	if (val > DCCPF_ACK_RATIO_MAX)
150 		val = DCCPF_ACK_RATIO_MAX;
151 
152 	if (val == dp->dccps_l_ack_ratio)
153 		return;
154 
155 	ccid2_pr_debug("changing local ack ratio to %u\n", val);
156 	dp->dccps_l_ack_ratio = val;
157 }
158 
159 static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val)
160 {
161 	ccid2_pr_debug("change SRTT to %ld\n", val);
162 	hc->tx_srtt = val;
163 }
164 
165 static void ccid2_start_rto_timer(struct sock *sk);
166 
167 static void ccid2_hc_tx_rto_expire(unsigned long data)
168 {
169 	struct sock *sk = (struct sock *)data;
170 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
171 	long s;
172 
173 	bh_lock_sock(sk);
174 	if (sock_owned_by_user(sk)) {
175 		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
176 		goto out;
177 	}
178 
179 	ccid2_pr_debug("RTO_EXPIRE\n");
180 
181 	ccid2_hc_tx_check_sanity(hc);
182 
183 	/* back-off timer */
184 	hc->tx_rto <<= 1;
185 
186 	s = hc->tx_rto / HZ;
187 	if (s > 60)
188 		hc->tx_rto = 60 * HZ;
189 
190 	ccid2_start_rto_timer(sk);
191 
192 	/* adjust pipe, cwnd etc */
193 	hc->tx_ssthresh = hc->tx_cwnd / 2;
194 	if (hc->tx_ssthresh < 2)
195 		hc->tx_ssthresh = 2;
196 	hc->tx_cwnd	 = 1;
197 	hc->tx_pipe	 = 0;
198 
199 	/* clear state about stuff we sent */
200 	hc->tx_seqt = hc->tx_seqh;
201 	hc->tx_packets_acked = 0;
202 
203 	/* clear ack ratio state. */
204 	hc->tx_rpseq    = 0;
205 	hc->tx_rpdupack = -1;
206 	ccid2_change_l_ack_ratio(sk, 1);
207 	ccid2_hc_tx_check_sanity(hc);
208 out:
209 	bh_unlock_sock(sk);
210 	sock_put(sk);
211 }
212 
213 static void ccid2_start_rto_timer(struct sock *sk)
214 {
215 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
216 
217 	ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto);
218 
219 	BUG_ON(timer_pending(&hc->tx_rtotimer));
220 	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
221 }
222 
223 static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
224 {
225 	struct dccp_sock *dp = dccp_sk(sk);
226 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
227 	struct ccid2_seq *next;
228 
229 	hc->tx_pipe++;
230 
231 	hc->tx_seqh->ccid2s_seq   = dp->dccps_gss;
232 	hc->tx_seqh->ccid2s_acked = 0;
233 	hc->tx_seqh->ccid2s_sent  = jiffies;
234 
235 	next = hc->tx_seqh->ccid2s_next;
236 	/* check if we need to alloc more space */
237 	if (next == hc->tx_seqt) {
238 		if (ccid2_hc_tx_alloc_seq(hc)) {
239 			DCCP_CRIT("packet history - out of memory!");
240 			/* FIXME: find a more graceful way to bail out */
241 			return;
242 		}
243 		next = hc->tx_seqh->ccid2s_next;
244 		BUG_ON(next == hc->tx_seqt);
245 	}
246 	hc->tx_seqh = next;
247 
248 	ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
249 
250 	/*
251 	 * FIXME: The code below is broken and the variables have been removed
252 	 * from the socket struct. The `ackloss' variable was always set to 0,
253 	 * and with arsent there are several problems:
254 	 *  (i) it doesn't just count the number of Acks, but all sent packets;
255 	 *  (ii) it is expressed in # of packets, not # of windows, so the
256 	 *  comparison below uses the wrong formula: Appendix A of RFC 4341
257 	 *  comes up with the number K = cwnd / (R^2 - R) of consecutive windows
258 	 *  of data with no lost or marked Ack packets. If arsent were the # of
259 	 *  consecutive Acks received without loss, then Ack Ratio needs to be
260 	 *  decreased by 1 when
261 	 *	      arsent >=  K * cwnd / R  =  cwnd^2 / (R^3 - R^2)
262 	 *  where cwnd / R is the number of Acks received per window of data
263 	 *  (cf. RFC 4341, App. A). The problems are that
264 	 *  - arsent counts other packets as well;
265 	 *  - the comparison uses a formula different from RFC 4341;
266 	 *  - computing a cubic/quadratic equation each time is too complicated.
267 	 *  Hence a different algorithm is needed.
268 	 */
269 #if 0
270 	/* Ack Ratio.  Need to maintain a concept of how many windows we sent */
271 	hc->tx_arsent++;
272 	/* We had an ack loss in this window... */
273 	if (hc->tx_ackloss) {
274 		if (hc->tx_arsent >= hc->tx_cwnd) {
275 			hc->tx_arsent  = 0;
276 			hc->tx_ackloss = 0;
277 		}
278 	} else {
279 		/* No acks lost up to now... */
280 		/* decrease ack ratio if enough packets were sent */
281 		if (dp->dccps_l_ack_ratio > 1) {
282 			/* XXX don't calculate denominator each time */
283 			int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
284 				    dp->dccps_l_ack_ratio;
285 
286 			denom = hc->tx_cwnd * hc->tx_cwnd / denom;
287 
288 			if (hc->tx_arsent >= denom) {
289 				ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
290 				hc->tx_arsent = 0;
291 			}
292 		} else {
293 			/* we can't increase ack ratio further [1] */
294 			hc->tx_arsent = 0; /* or maybe set it to cwnd*/
295 		}
296 	}
297 #endif
298 
299 	/* setup RTO timer */
300 	if (!timer_pending(&hc->tx_rtotimer))
301 		ccid2_start_rto_timer(sk);
302 
303 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
304 	do {
305 		struct ccid2_seq *seqp = hc->tx_seqt;
306 
307 		while (seqp != hc->tx_seqh) {
308 			ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
309 				       (unsigned long long)seqp->ccid2s_seq,
310 				       seqp->ccid2s_acked, seqp->ccid2s_sent);
311 			seqp = seqp->ccid2s_next;
312 		}
313 	} while (0);
314 	ccid2_pr_debug("=========\n");
315 	ccid2_hc_tx_check_sanity(hc);
316 #endif
317 }
318 
319 /* XXX Lame code duplication!
320  * returns -1 if none was found.
321  * else returns the next offset to use in the function call.
322  */
323 static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
324 			   unsigned char **vec, unsigned char *veclen)
325 {
326 	const struct dccp_hdr *dh = dccp_hdr(skb);
327 	unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
328 	unsigned char *opt_ptr;
329 	const unsigned char *opt_end = (unsigned char *)dh +
330 					(dh->dccph_doff * 4);
331 	unsigned char opt, len;
332 	unsigned char *value;
333 
334 	BUG_ON(offset < 0);
335 	options += offset;
336 	opt_ptr = options;
337 	if (opt_ptr >= opt_end)
338 		return -1;
339 
340 	while (opt_ptr != opt_end) {
341 		opt   = *opt_ptr++;
342 		len   = 0;
343 		value = NULL;
344 
345 		/* Check if this isn't a single byte option */
346 		if (opt > DCCPO_MAX_RESERVED) {
347 			if (opt_ptr == opt_end)
348 				goto out_invalid_option;
349 
350 			len = *opt_ptr++;
351 			if (len < 3)
352 				goto out_invalid_option;
353 			/*
354 			 * Remove the type and len fields, leaving
355 			 * just the value size
356 			 */
357 			len     -= 2;
358 			value   = opt_ptr;
359 			opt_ptr += len;
360 
361 			if (opt_ptr > opt_end)
362 				goto out_invalid_option;
363 		}
364 
365 		switch (opt) {
366 		case DCCPO_ACK_VECTOR_0:
367 		case DCCPO_ACK_VECTOR_1:
368 			*vec	= value;
369 			*veclen = len;
370 			return offset + (opt_ptr - options);
371 		}
372 	}
373 
374 	return -1;
375 
376 out_invalid_option:
377 	DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
378 	return -1;
379 }
380 
381 static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
382 {
383 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
384 
385 	sk_stop_timer(sk, &hc->tx_rtotimer);
386 	ccid2_pr_debug("deleted RTO timer\n");
387 }
388 
389 static inline void ccid2_new_ack(struct sock *sk,
390 				 struct ccid2_seq *seqp,
391 				 unsigned int *maxincr)
392 {
393 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
394 
395 	if (hc->tx_cwnd < hc->tx_ssthresh) {
396 		if (*maxincr > 0 && ++hc->tx_packets_acked == 2) {
397 			hc->tx_cwnd += 1;
398 			*maxincr    -= 1;
399 			hc->tx_packets_acked = 0;
400 		}
401 	} else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
402 			hc->tx_cwnd += 1;
403 			hc->tx_packets_acked = 0;
404 	}
405 
406 	/* update RTO */
407 	if (hc->tx_srtt == -1 ||
408 	    time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) {
409 		unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent;
410 		int s;
411 
412 		/* first measurement */
413 		if (hc->tx_srtt == -1) {
414 			ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
415 				       r, jiffies,
416 				       (unsigned long long)seqp->ccid2s_seq);
417 			ccid2_change_srtt(hc, r);
418 			hc->tx_rttvar = r >> 1;
419 		} else {
420 			/* RTTVAR */
421 			long tmp = hc->tx_srtt - r;
422 			long srtt;
423 
424 			if (tmp < 0)
425 				tmp *= -1;
426 
427 			tmp >>= 2;
428 			hc->tx_rttvar *= 3;
429 			hc->tx_rttvar >>= 2;
430 			hc->tx_rttvar += tmp;
431 
432 			/* SRTT */
433 			srtt = hc->tx_srtt;
434 			srtt *= 7;
435 			srtt >>= 3;
436 			tmp = r >> 3;
437 			srtt += tmp;
438 			ccid2_change_srtt(hc, srtt);
439 		}
440 		s = hc->tx_rttvar << 2;
441 		/* clock granularity is 1 when based on jiffies */
442 		if (!s)
443 			s = 1;
444 		hc->tx_rto = hc->tx_srtt + s;
445 
446 		/* must be at least a second */
447 		s = hc->tx_rto / HZ;
448 		/* DCCP doesn't require this [but I like it cuz my code sux] */
449 #if 1
450 		if (s < 1)
451 			hc->tx_rto = HZ;
452 #endif
453 		/* max 60 seconds */
454 		if (s > 60)
455 			hc->tx_rto = HZ * 60;
456 
457 		hc->tx_lastrtt = jiffies;
458 
459 		ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
460 			       hc->tx_srtt, hc->tx_rttvar,
461 			       hc->tx_rto, HZ, r);
462 	}
463 
464 	/* we got a new ack, so re-start RTO timer */
465 	ccid2_hc_tx_kill_rto_timer(sk);
466 	ccid2_start_rto_timer(sk);
467 }
468 
469 static void ccid2_hc_tx_dec_pipe(struct sock *sk)
470 {
471 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
472 
473 	if (hc->tx_pipe == 0)
474 		DCCP_BUG("pipe == 0");
475 	else
476 		hc->tx_pipe--;
477 
478 	if (hc->tx_pipe == 0)
479 		ccid2_hc_tx_kill_rto_timer(sk);
480 }
481 
482 static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
483 {
484 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
485 
486 	if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) {
487 		ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
488 		return;
489 	}
490 
491 	hc->tx_last_cong = jiffies;
492 
493 	hc->tx_cwnd      = hc->tx_cwnd / 2 ? : 1U;
494 	hc->tx_ssthresh  = max(hc->tx_cwnd, 2U);
495 
496 	/* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
497 	if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
498 		ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
499 }
500 
501 static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
502 {
503 	struct dccp_sock *dp = dccp_sk(sk);
504 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
505 	u64 ackno, seqno;
506 	struct ccid2_seq *seqp;
507 	unsigned char *vector;
508 	unsigned char veclen;
509 	int offset = 0;
510 	int done = 0;
511 	unsigned int maxincr = 0;
512 
513 	ccid2_hc_tx_check_sanity(hc);
514 	/* check reverse path congestion */
515 	seqno = DCCP_SKB_CB(skb)->dccpd_seq;
516 
517 	/* XXX this whole "algorithm" is broken.  Need to fix it to keep track
518 	 * of the seqnos of the dupacks so that rpseq and rpdupack are correct
519 	 * -sorbo.
520 	 */
521 	/* need to bootstrap */
522 	if (hc->tx_rpdupack == -1) {
523 		hc->tx_rpdupack = 0;
524 		hc->tx_rpseq    = seqno;
525 	} else {
526 		/* check if packet is consecutive */
527 		if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
528 			hc->tx_rpseq = seqno;
529 		/* it's a later packet */
530 		else if (after48(seqno, hc->tx_rpseq)) {
531 			hc->tx_rpdupack++;
532 
533 			/* check if we got enough dupacks */
534 			if (hc->tx_rpdupack >= NUMDUPACK) {
535 				hc->tx_rpdupack = -1; /* XXX lame */
536 				hc->tx_rpseq    = 0;
537 
538 				ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
539 			}
540 		}
541 	}
542 
543 	/* check forward path congestion */
544 	/* still didn't send out new data packets */
545 	if (hc->tx_seqh == hc->tx_seqt)
546 		return;
547 
548 	switch (DCCP_SKB_CB(skb)->dccpd_type) {
549 	case DCCP_PKT_ACK:
550 	case DCCP_PKT_DATAACK:
551 		break;
552 	default:
553 		return;
554 	}
555 
556 	ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
557 	if (after48(ackno, hc->tx_high_ack))
558 		hc->tx_high_ack = ackno;
559 
560 	seqp = hc->tx_seqt;
561 	while (before48(seqp->ccid2s_seq, ackno)) {
562 		seqp = seqp->ccid2s_next;
563 		if (seqp == hc->tx_seqh) {
564 			seqp = hc->tx_seqh->ccid2s_prev;
565 			break;
566 		}
567 	}
568 
569 	/*
570 	 * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
571 	 * packets per acknowledgement. Rounding up avoids that cwnd is not
572 	 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
573 	 */
574 	if (hc->tx_cwnd < hc->tx_ssthresh)
575 		maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
576 
577 	/* go through all ack vectors */
578 	while ((offset = ccid2_ackvector(sk, skb, offset,
579 					 &vector, &veclen)) != -1) {
580 		/* go through this ack vector */
581 		while (veclen--) {
582 			const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
583 			u64 ackno_end_rl = SUB48(ackno, rl);
584 
585 			ccid2_pr_debug("ackvec start:%llu end:%llu\n",
586 				       (unsigned long long)ackno,
587 				       (unsigned long long)ackno_end_rl);
588 			/* if the seqno we are analyzing is larger than the
589 			 * current ackno, then move towards the tail of our
590 			 * seqnos.
591 			 */
592 			while (after48(seqp->ccid2s_seq, ackno)) {
593 				if (seqp == hc->tx_seqt) {
594 					done = 1;
595 					break;
596 				}
597 				seqp = seqp->ccid2s_prev;
598 			}
599 			if (done)
600 				break;
601 
602 			/* check all seqnos in the range of the vector
603 			 * run length
604 			 */
605 			while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
606 				const u8 state = *vector &
607 						 DCCP_ACKVEC_STATE_MASK;
608 
609 				/* new packet received or marked */
610 				if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
611 				    !seqp->ccid2s_acked) {
612 					if (state ==
613 					    DCCP_ACKVEC_STATE_ECN_MARKED) {
614 						ccid2_congestion_event(sk,
615 								       seqp);
616 					} else
617 						ccid2_new_ack(sk, seqp,
618 							      &maxincr);
619 
620 					seqp->ccid2s_acked = 1;
621 					ccid2_pr_debug("Got ack for %llu\n",
622 						       (unsigned long long)seqp->ccid2s_seq);
623 					ccid2_hc_tx_dec_pipe(sk);
624 				}
625 				if (seqp == hc->tx_seqt) {
626 					done = 1;
627 					break;
628 				}
629 				seqp = seqp->ccid2s_prev;
630 			}
631 			if (done)
632 				break;
633 
634 			ackno = SUB48(ackno_end_rl, 1);
635 			vector++;
636 		}
637 		if (done)
638 			break;
639 	}
640 
641 	/* The state about what is acked should be correct now
642 	 * Check for NUMDUPACK
643 	 */
644 	seqp = hc->tx_seqt;
645 	while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
646 		seqp = seqp->ccid2s_next;
647 		if (seqp == hc->tx_seqh) {
648 			seqp = hc->tx_seqh->ccid2s_prev;
649 			break;
650 		}
651 	}
652 	done = 0;
653 	while (1) {
654 		if (seqp->ccid2s_acked) {
655 			done++;
656 			if (done == NUMDUPACK)
657 				break;
658 		}
659 		if (seqp == hc->tx_seqt)
660 			break;
661 		seqp = seqp->ccid2s_prev;
662 	}
663 
664 	/* If there are at least 3 acknowledgements, anything unacknowledged
665 	 * below the last sequence number is considered lost
666 	 */
667 	if (done == NUMDUPACK) {
668 		struct ccid2_seq *last_acked = seqp;
669 
670 		/* check for lost packets */
671 		while (1) {
672 			if (!seqp->ccid2s_acked) {
673 				ccid2_pr_debug("Packet lost: %llu\n",
674 					       (unsigned long long)seqp->ccid2s_seq);
675 				/* XXX need to traverse from tail -> head in
676 				 * order to detect multiple congestion events in
677 				 * one ack vector.
678 				 */
679 				ccid2_congestion_event(sk, seqp);
680 				ccid2_hc_tx_dec_pipe(sk);
681 			}
682 			if (seqp == hc->tx_seqt)
683 				break;
684 			seqp = seqp->ccid2s_prev;
685 		}
686 
687 		hc->tx_seqt = last_acked;
688 	}
689 
690 	/* trim acked packets in tail */
691 	while (hc->tx_seqt != hc->tx_seqh) {
692 		if (!hc->tx_seqt->ccid2s_acked)
693 			break;
694 
695 		hc->tx_seqt = hc->tx_seqt->ccid2s_next;
696 	}
697 
698 	ccid2_hc_tx_check_sanity(hc);
699 }
700 
701 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
702 {
703 	struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
704 	struct dccp_sock *dp = dccp_sk(sk);
705 	u32 max_ratio;
706 
707 	/* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
708 	hc->tx_ssthresh = ~0U;
709 
710 	/*
711 	 * RFC 4341, 5: "The cwnd parameter is initialized to at most four
712 	 * packets for new connections, following the rules from [RFC3390]".
713 	 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
714 	 */
715 	hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
716 
717 	/* Make sure that Ack Ratio is enabled and within bounds. */
718 	max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
719 	if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
720 		dp->dccps_l_ack_ratio = max_ratio;
721 
722 	/* XXX init ~ to window size... */
723 	if (ccid2_hc_tx_alloc_seq(hc))
724 		return -ENOMEM;
725 
726 	hc->tx_rto	 = 3 * HZ;
727 	ccid2_change_srtt(hc, -1);
728 	hc->tx_rttvar    = -1;
729 	hc->tx_rpdupack  = -1;
730 	hc->tx_last_cong = jiffies;
731 	setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
732 			(unsigned long)sk);
733 
734 	ccid2_hc_tx_check_sanity(hc);
735 	return 0;
736 }
737 
738 static void ccid2_hc_tx_exit(struct sock *sk)
739 {
740 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
741 	int i;
742 
743 	ccid2_hc_tx_kill_rto_timer(sk);
744 
745 	for (i = 0; i < hc->tx_seqbufc; i++)
746 		kfree(hc->tx_seqbuf[i]);
747 	hc->tx_seqbufc = 0;
748 }
749 
750 static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
751 {
752 	const struct dccp_sock *dp = dccp_sk(sk);
753 	struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
754 
755 	switch (DCCP_SKB_CB(skb)->dccpd_type) {
756 	case DCCP_PKT_DATA:
757 	case DCCP_PKT_DATAACK:
758 		hc->rx_data++;
759 		if (hc->rx_data >= dp->dccps_r_ack_ratio) {
760 			dccp_send_ack(sk);
761 			hc->rx_data = 0;
762 		}
763 		break;
764 	}
765 }
766 
767 struct ccid_operations ccid2_ops = {
768 	.ccid_id		= DCCPC_CCID2,
769 	.ccid_name		= "TCP-like",
770 	.ccid_hc_tx_obj_size	= sizeof(struct ccid2_hc_tx_sock),
771 	.ccid_hc_tx_init	= ccid2_hc_tx_init,
772 	.ccid_hc_tx_exit	= ccid2_hc_tx_exit,
773 	.ccid_hc_tx_send_packet	= ccid2_hc_tx_send_packet,
774 	.ccid_hc_tx_packet_sent	= ccid2_hc_tx_packet_sent,
775 	.ccid_hc_tx_packet_recv	= ccid2_hc_tx_packet_recv,
776 	.ccid_hc_rx_obj_size	= sizeof(struct ccid2_hc_rx_sock),
777 	.ccid_hc_rx_packet_recv	= ccid2_hc_rx_packet_recv,
778 };
779 
780 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
781 module_param(ccid2_debug, bool, 0644);
782 MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
783 #endif
784