xref: /openbmc/linux/net/sctp/outqueue.c (revision 861e10be)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001-2003 Intel Corp.
6  *
7  * This file is part of the SCTP kernel implementation
8  *
9  * These functions implement the sctp_outq class.   The outqueue handles
10  * bundling and queueing of outgoing SCTP chunks.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, write to
26  * the Free Software Foundation, 59 Temple Place - Suite 330,
27  * Boston, MA 02111-1307, USA.
28  *
29  * Please send any bug reports or fixes you make to the
30  * email address(es):
31  *    lksctp developers <lksctp-developers@lists.sourceforge.net>
32  *
33  * Or submit a bug report through the following website:
34  *    http://www.sf.net/projects/lksctp
35  *
36  * Written or modified by:
37  *    La Monte H.P. Yarroll <piggy@acm.org>
38  *    Karl Knutson          <karl@athena.chicago.il.us>
39  *    Perry Melange         <pmelange@null.cc.uic.edu>
40  *    Xingang Guo           <xingang.guo@intel.com>
41  *    Hui Huang 	    <hui.huang@nokia.com>
42  *    Sridhar Samudrala     <sri@us.ibm.com>
43  *    Jon Grimm             <jgrimm@us.ibm.com>
44  *
45  * Any bugs reported given to us we will try to fix... any fixes shared will
46  * be incorporated into the next SCTP release.
47  */
48 
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 
51 #include <linux/types.h>
52 #include <linux/list.h>   /* For struct list_head */
53 #include <linux/socket.h>
54 #include <linux/ip.h>
55 #include <linux/slab.h>
56 #include <net/sock.h>	  /* For skb_set_owner_w */
57 
58 #include <net/sctp/sctp.h>
59 #include <net/sctp/sm.h>
60 
61 /* Declare internal functions here.  */
62 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
63 static void sctp_check_transmitted(struct sctp_outq *q,
64 				   struct list_head *transmitted_queue,
65 				   struct sctp_transport *transport,
66 				   union sctp_addr *saddr,
67 				   struct sctp_sackhdr *sack,
68 				   __u32 *highest_new_tsn);
69 
70 static void sctp_mark_missing(struct sctp_outq *q,
71 			      struct list_head *transmitted_queue,
72 			      struct sctp_transport *transport,
73 			      __u32 highest_new_tsn,
74 			      int count_of_newacks);
75 
76 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
77 
78 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
79 
80 /* Add data to the front of the queue. */
81 static inline void sctp_outq_head_data(struct sctp_outq *q,
82 					struct sctp_chunk *ch)
83 {
84 	list_add(&ch->list, &q->out_chunk_list);
85 	q->out_qlen += ch->skb->len;
86 }
87 
88 /* Take data from the front of the queue. */
89 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
90 {
91 	struct sctp_chunk *ch = NULL;
92 
93 	if (!list_empty(&q->out_chunk_list)) {
94 		struct list_head *entry = q->out_chunk_list.next;
95 
96 		ch = list_entry(entry, struct sctp_chunk, list);
97 		list_del_init(entry);
98 		q->out_qlen -= ch->skb->len;
99 	}
100 	return ch;
101 }
102 /* Add data chunk to the end of the queue. */
103 static inline void sctp_outq_tail_data(struct sctp_outq *q,
104 				       struct sctp_chunk *ch)
105 {
106 	list_add_tail(&ch->list, &q->out_chunk_list);
107 	q->out_qlen += ch->skb->len;
108 }
109 
110 /*
111  * SFR-CACC algorithm:
112  * D) If count_of_newacks is greater than or equal to 2
113  * and t was not sent to the current primary then the
114  * sender MUST NOT increment missing report count for t.
115  */
116 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
117 				       struct sctp_transport *transport,
118 				       int count_of_newacks)
119 {
120 	if (count_of_newacks >=2 && transport != primary)
121 		return 1;
122 	return 0;
123 }
124 
125 /*
126  * SFR-CACC algorithm:
127  * F) If count_of_newacks is less than 2, let d be the
128  * destination to which t was sent. If cacc_saw_newack
129  * is 0 for destination d, then the sender MUST NOT
130  * increment missing report count for t.
131  */
132 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
133 				       int count_of_newacks)
134 {
135 	if (count_of_newacks < 2 &&
136 			(transport && !transport->cacc.cacc_saw_newack))
137 		return 1;
138 	return 0;
139 }
140 
141 /*
142  * SFR-CACC algorithm:
143  * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
144  * execute steps C, D, F.
145  *
146  * C has been implemented in sctp_outq_sack
147  */
148 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
149 				     struct sctp_transport *transport,
150 				     int count_of_newacks)
151 {
152 	if (!primary->cacc.cycling_changeover) {
153 		if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
154 			return 1;
155 		if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
156 			return 1;
157 		return 0;
158 	}
159 	return 0;
160 }
161 
162 /*
163  * SFR-CACC algorithm:
164  * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
165  * than next_tsn_at_change of the current primary, then
166  * the sender MUST NOT increment missing report count
167  * for t.
168  */
169 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
170 {
171 	if (primary->cacc.cycling_changeover &&
172 	    TSN_lt(tsn, primary->cacc.next_tsn_at_change))
173 		return 1;
174 	return 0;
175 }
176 
177 /*
178  * SFR-CACC algorithm:
179  * 3) If the missing report count for TSN t is to be
180  * incremented according to [RFC2960] and
181  * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
182  * then the sender MUST further execute steps 3.1 and
183  * 3.2 to determine if the missing report count for
184  * TSN t SHOULD NOT be incremented.
185  *
186  * 3.3) If 3.1 and 3.2 do not dictate that the missing
187  * report count for t should not be incremented, then
188  * the sender SHOULD increment missing report count for
189  * t (according to [RFC2960] and [SCTP_STEWART_2002]).
190  */
191 static inline int sctp_cacc_skip(struct sctp_transport *primary,
192 				 struct sctp_transport *transport,
193 				 int count_of_newacks,
194 				 __u32 tsn)
195 {
196 	if (primary->cacc.changeover_active &&
197 	    (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
198 	     sctp_cacc_skip_3_2(primary, tsn)))
199 		return 1;
200 	return 0;
201 }
202 
203 /* Initialize an existing sctp_outq.  This does the boring stuff.
204  * You still need to define handlers if you really want to DO
205  * something with this structure...
206  */
207 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 {
209 	q->asoc = asoc;
210 	INIT_LIST_HEAD(&q->out_chunk_list);
211 	INIT_LIST_HEAD(&q->control_chunk_list);
212 	INIT_LIST_HEAD(&q->retransmit);
213 	INIT_LIST_HEAD(&q->sacked);
214 	INIT_LIST_HEAD(&q->abandoned);
215 
216 	q->fast_rtx = 0;
217 	q->outstanding_bytes = 0;
218 	q->empty = 1;
219 	q->cork  = 0;
220 
221 	q->malloced = 0;
222 	q->out_qlen = 0;
223 }
224 
225 /* Free the outqueue structure and any related pending chunks.
226  */
227 static void __sctp_outq_teardown(struct sctp_outq *q)
228 {
229 	struct sctp_transport *transport;
230 	struct list_head *lchunk, *temp;
231 	struct sctp_chunk *chunk, *tmp;
232 
233 	/* Throw away unacknowledged chunks. */
234 	list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
235 			transports) {
236 		while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
237 			chunk = list_entry(lchunk, struct sctp_chunk,
238 					   transmitted_list);
239 			/* Mark as part of a failed message. */
240 			sctp_chunk_fail(chunk, q->error);
241 			sctp_chunk_free(chunk);
242 		}
243 	}
244 
245 	/* Throw away chunks that have been gap ACKed.  */
246 	list_for_each_safe(lchunk, temp, &q->sacked) {
247 		list_del_init(lchunk);
248 		chunk = list_entry(lchunk, struct sctp_chunk,
249 				   transmitted_list);
250 		sctp_chunk_fail(chunk, q->error);
251 		sctp_chunk_free(chunk);
252 	}
253 
254 	/* Throw away any chunks in the retransmit queue. */
255 	list_for_each_safe(lchunk, temp, &q->retransmit) {
256 		list_del_init(lchunk);
257 		chunk = list_entry(lchunk, struct sctp_chunk,
258 				   transmitted_list);
259 		sctp_chunk_fail(chunk, q->error);
260 		sctp_chunk_free(chunk);
261 	}
262 
263 	/* Throw away any chunks that are in the abandoned queue. */
264 	list_for_each_safe(lchunk, temp, &q->abandoned) {
265 		list_del_init(lchunk);
266 		chunk = list_entry(lchunk, struct sctp_chunk,
267 				   transmitted_list);
268 		sctp_chunk_fail(chunk, q->error);
269 		sctp_chunk_free(chunk);
270 	}
271 
272 	/* Throw away any leftover data chunks. */
273 	while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
274 
275 		/* Mark as send failure. */
276 		sctp_chunk_fail(chunk, q->error);
277 		sctp_chunk_free(chunk);
278 	}
279 
280 	/* Throw away any leftover control chunks. */
281 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
282 		list_del_init(&chunk->list);
283 		sctp_chunk_free(chunk);
284 	}
285 }
286 
287 void sctp_outq_teardown(struct sctp_outq *q)
288 {
289 	__sctp_outq_teardown(q);
290 	sctp_outq_init(q->asoc, q);
291 }
292 
293 /* Free the outqueue structure and any related pending chunks.  */
294 void sctp_outq_free(struct sctp_outq *q)
295 {
296 	/* Throw away leftover chunks. */
297 	__sctp_outq_teardown(q);
298 
299 	/* If we were kmalloc()'d, free the memory.  */
300 	if (q->malloced)
301 		kfree(q);
302 }
303 
304 /* Put a new chunk in an sctp_outq.  */
305 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
306 {
307 	struct net *net = sock_net(q->asoc->base.sk);
308 	int error = 0;
309 
310 	SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
311 			  q, chunk, chunk && chunk->chunk_hdr ?
312 			  sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
313 			  : "Illegal Chunk");
314 
315 	/* If it is data, queue it up, otherwise, send it
316 	 * immediately.
317 	 */
318 	if (sctp_chunk_is_data(chunk)) {
319 		/* Is it OK to queue data chunks?  */
320 		/* From 9. Termination of Association
321 		 *
322 		 * When either endpoint performs a shutdown, the
323 		 * association on each peer will stop accepting new
324 		 * data from its user and only deliver data in queue
325 		 * at the time of sending or receiving the SHUTDOWN
326 		 * chunk.
327 		 */
328 		switch (q->asoc->state) {
329 		case SCTP_STATE_CLOSED:
330 		case SCTP_STATE_SHUTDOWN_PENDING:
331 		case SCTP_STATE_SHUTDOWN_SENT:
332 		case SCTP_STATE_SHUTDOWN_RECEIVED:
333 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
334 			/* Cannot send after transport endpoint shutdown */
335 			error = -ESHUTDOWN;
336 			break;
337 
338 		default:
339 			SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
340 			  q, chunk, chunk && chunk->chunk_hdr ?
341 			  sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
342 			  : "Illegal Chunk");
343 
344 			sctp_outq_tail_data(q, chunk);
345 			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
346 				SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
347 			else
348 				SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
349 			q->empty = 0;
350 			break;
351 		}
352 	} else {
353 		list_add_tail(&chunk->list, &q->control_chunk_list);
354 		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
355 	}
356 
357 	if (error < 0)
358 		return error;
359 
360 	if (!q->cork)
361 		error = sctp_outq_flush(q, 0);
362 
363 	return error;
364 }
365 
366 /* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
367  * and the abandoned list are in ascending order.
368  */
369 static void sctp_insert_list(struct list_head *head, struct list_head *new)
370 {
371 	struct list_head *pos;
372 	struct sctp_chunk *nchunk, *lchunk;
373 	__u32 ntsn, ltsn;
374 	int done = 0;
375 
376 	nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
377 	ntsn = ntohl(nchunk->subh.data_hdr->tsn);
378 
379 	list_for_each(pos, head) {
380 		lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
381 		ltsn = ntohl(lchunk->subh.data_hdr->tsn);
382 		if (TSN_lt(ntsn, ltsn)) {
383 			list_add(new, pos->prev);
384 			done = 1;
385 			break;
386 		}
387 	}
388 	if (!done)
389 		list_add_tail(new, head);
390 }
391 
392 /* Mark all the eligible packets on a transport for retransmission.  */
393 void sctp_retransmit_mark(struct sctp_outq *q,
394 			  struct sctp_transport *transport,
395 			  __u8 reason)
396 {
397 	struct list_head *lchunk, *ltemp;
398 	struct sctp_chunk *chunk;
399 
400 	/* Walk through the specified transmitted queue.  */
401 	list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
402 		chunk = list_entry(lchunk, struct sctp_chunk,
403 				   transmitted_list);
404 
405 		/* If the chunk is abandoned, move it to abandoned list. */
406 		if (sctp_chunk_abandoned(chunk)) {
407 			list_del_init(lchunk);
408 			sctp_insert_list(&q->abandoned, lchunk);
409 
410 			/* If this chunk has not been previousely acked,
411 			 * stop considering it 'outstanding'.  Our peer
412 			 * will most likely never see it since it will
413 			 * not be retransmitted
414 			 */
415 			if (!chunk->tsn_gap_acked) {
416 				if (chunk->transport)
417 					chunk->transport->flight_size -=
418 							sctp_data_size(chunk);
419 				q->outstanding_bytes -= sctp_data_size(chunk);
420 				q->asoc->peer.rwnd += sctp_data_size(chunk);
421 			}
422 			continue;
423 		}
424 
425 		/* If we are doing  retransmission due to a timeout or pmtu
426 		 * discovery, only the  chunks that are not yet acked should
427 		 * be added to the retransmit queue.
428 		 */
429 		if ((reason == SCTP_RTXR_FAST_RTX  &&
430 			    (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
431 		    (reason != SCTP_RTXR_FAST_RTX  && !chunk->tsn_gap_acked)) {
432 			/* RFC 2960 6.2.1 Processing a Received SACK
433 			 *
434 			 * C) Any time a DATA chunk is marked for
435 			 * retransmission (via either T3-rtx timer expiration
436 			 * (Section 6.3.3) or via fast retransmit
437 			 * (Section 7.2.4)), add the data size of those
438 			 * chunks to the rwnd.
439 			 */
440 			q->asoc->peer.rwnd += sctp_data_size(chunk);
441 			q->outstanding_bytes -= sctp_data_size(chunk);
442 			if (chunk->transport)
443 				transport->flight_size -= sctp_data_size(chunk);
444 
445 			/* sctpimpguide-05 Section 2.8.2
446 			 * M5) If a T3-rtx timer expires, the
447 			 * 'TSN.Missing.Report' of all affected TSNs is set
448 			 * to 0.
449 			 */
450 			chunk->tsn_missing_report = 0;
451 
452 			/* If a chunk that is being used for RTT measurement
453 			 * has to be retransmitted, we cannot use this chunk
454 			 * anymore for RTT measurements. Reset rto_pending so
455 			 * that a new RTT measurement is started when a new
456 			 * data chunk is sent.
457 			 */
458 			if (chunk->rtt_in_progress) {
459 				chunk->rtt_in_progress = 0;
460 				transport->rto_pending = 0;
461 			}
462 
463 			/* Move the chunk to the retransmit queue. The chunks
464 			 * on the retransmit queue are always kept in order.
465 			 */
466 			list_del_init(lchunk);
467 			sctp_insert_list(&q->retransmit, lchunk);
468 		}
469 	}
470 
471 	SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
472 			  "cwnd: %d, ssthresh: %d, flight_size: %d, "
473 			  "pba: %d\n", __func__,
474 			  transport, reason,
475 			  transport->cwnd, transport->ssthresh,
476 			  transport->flight_size,
477 			  transport->partial_bytes_acked);
478 
479 }
480 
481 /* Mark all the eligible packets on a transport for retransmission and force
482  * one packet out.
483  */
484 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
485 		     sctp_retransmit_reason_t reason)
486 {
487 	struct net *net = sock_net(q->asoc->base.sk);
488 	int error = 0;
489 
490 	switch(reason) {
491 	case SCTP_RTXR_T3_RTX:
492 		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
493 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
494 		/* Update the retran path if the T3-rtx timer has expired for
495 		 * the current retran path.
496 		 */
497 		if (transport == transport->asoc->peer.retran_path)
498 			sctp_assoc_update_retran_path(transport->asoc);
499 		transport->asoc->rtx_data_chunks +=
500 			transport->asoc->unack_data;
501 		break;
502 	case SCTP_RTXR_FAST_RTX:
503 		SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
504 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
505 		q->fast_rtx = 1;
506 		break;
507 	case SCTP_RTXR_PMTUD:
508 		SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
509 		break;
510 	case SCTP_RTXR_T1_RTX:
511 		SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
512 		transport->asoc->init_retries++;
513 		break;
514 	default:
515 		BUG();
516 	}
517 
518 	sctp_retransmit_mark(q, transport, reason);
519 
520 	/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
521 	 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
522 	 * following the procedures outlined in C1 - C5.
523 	 */
524 	if (reason == SCTP_RTXR_T3_RTX)
525 		sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
526 
527 	/* Flush the queues only on timeout, since fast_rtx is only
528 	 * triggered during sack processing and the queue
529 	 * will be flushed at the end.
530 	 */
531 	if (reason != SCTP_RTXR_FAST_RTX)
532 		error = sctp_outq_flush(q, /* rtx_timeout */ 1);
533 
534 	if (error)
535 		q->asoc->base.sk->sk_err = -error;
536 }
537 
538 /*
539  * Transmit DATA chunks on the retransmit queue.  Upon return from
540  * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
541  * need to be transmitted by the caller.
542  * We assume that pkt->transport has already been set.
543  *
544  * The return value is a normal kernel error return value.
545  */
546 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
547 			       int rtx_timeout, int *start_timer)
548 {
549 	struct list_head *lqueue;
550 	struct sctp_transport *transport = pkt->transport;
551 	sctp_xmit_t status;
552 	struct sctp_chunk *chunk, *chunk1;
553 	int fast_rtx;
554 	int error = 0;
555 	int timer = 0;
556 	int done = 0;
557 
558 	lqueue = &q->retransmit;
559 	fast_rtx = q->fast_rtx;
560 
561 	/* This loop handles time-out retransmissions, fast retransmissions,
562 	 * and retransmissions due to opening of whindow.
563 	 *
564 	 * RFC 2960 6.3.3 Handle T3-rtx Expiration
565 	 *
566 	 * E3) Determine how many of the earliest (i.e., lowest TSN)
567 	 * outstanding DATA chunks for the address for which the
568 	 * T3-rtx has expired will fit into a single packet, subject
569 	 * to the MTU constraint for the path corresponding to the
570 	 * destination transport address to which the retransmission
571 	 * is being sent (this may be different from the address for
572 	 * which the timer expires [see Section 6.4]). Call this value
573 	 * K. Bundle and retransmit those K DATA chunks in a single
574 	 * packet to the destination endpoint.
575 	 *
576 	 * [Just to be painfully clear, if we are retransmitting
577 	 * because a timeout just happened, we should send only ONE
578 	 * packet of retransmitted data.]
579 	 *
580 	 * For fast retransmissions we also send only ONE packet.  However,
581 	 * if we are just flushing the queue due to open window, we'll
582 	 * try to send as much as possible.
583 	 */
584 	list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
585 		/* If the chunk is abandoned, move it to abandoned list. */
586 		if (sctp_chunk_abandoned(chunk)) {
587 			list_del_init(&chunk->transmitted_list);
588 			sctp_insert_list(&q->abandoned,
589 					 &chunk->transmitted_list);
590 			continue;
591 		}
592 
593 		/* Make sure that Gap Acked TSNs are not retransmitted.  A
594 		 * simple approach is just to move such TSNs out of the
595 		 * way and into a 'transmitted' queue and skip to the
596 		 * next chunk.
597 		 */
598 		if (chunk->tsn_gap_acked) {
599 			list_move_tail(&chunk->transmitted_list,
600 				       &transport->transmitted);
601 			continue;
602 		}
603 
604 		/* If we are doing fast retransmit, ignore non-fast_rtransmit
605 		 * chunks
606 		 */
607 		if (fast_rtx && !chunk->fast_retransmit)
608 			continue;
609 
610 redo:
611 		/* Attempt to append this chunk to the packet. */
612 		status = sctp_packet_append_chunk(pkt, chunk);
613 
614 		switch (status) {
615 		case SCTP_XMIT_PMTU_FULL:
616 			if (!pkt->has_data && !pkt->has_cookie_echo) {
617 				/* If this packet did not contain DATA then
618 				 * retransmission did not happen, so do it
619 				 * again.  We'll ignore the error here since
620 				 * control chunks are already freed so there
621 				 * is nothing we can do.
622 				 */
623 				sctp_packet_transmit(pkt);
624 				goto redo;
625 			}
626 
627 			/* Send this packet.  */
628 			error = sctp_packet_transmit(pkt);
629 
630 			/* If we are retransmitting, we should only
631 			 * send a single packet.
632 			 * Otherwise, try appending this chunk again.
633 			 */
634 			if (rtx_timeout || fast_rtx)
635 				done = 1;
636 			else
637 				goto redo;
638 
639 			/* Bundle next chunk in the next round.  */
640 			break;
641 
642 		case SCTP_XMIT_RWND_FULL:
643 			/* Send this packet. */
644 			error = sctp_packet_transmit(pkt);
645 
646 			/* Stop sending DATA as there is no more room
647 			 * at the receiver.
648 			 */
649 			done = 1;
650 			break;
651 
652 		case SCTP_XMIT_NAGLE_DELAY:
653 			/* Send this packet. */
654 			error = sctp_packet_transmit(pkt);
655 
656 			/* Stop sending DATA because of nagle delay. */
657 			done = 1;
658 			break;
659 
660 		default:
661 			/* The append was successful, so add this chunk to
662 			 * the transmitted list.
663 			 */
664 			list_move_tail(&chunk->transmitted_list,
665 				       &transport->transmitted);
666 
667 			/* Mark the chunk as ineligible for fast retransmit
668 			 * after it is retransmitted.
669 			 */
670 			if (chunk->fast_retransmit == SCTP_NEED_FRTX)
671 				chunk->fast_retransmit = SCTP_DONT_FRTX;
672 
673 			q->empty = 0;
674 			q->asoc->stats.rtxchunks++;
675 			break;
676 		}
677 
678 		/* Set the timer if there were no errors */
679 		if (!error && !timer)
680 			timer = 1;
681 
682 		if (done)
683 			break;
684 	}
685 
686 	/* If we are here due to a retransmit timeout or a fast
687 	 * retransmit and if there are any chunks left in the retransmit
688 	 * queue that could not fit in the PMTU sized packet, they need
689 	 * to be marked as ineligible for a subsequent fast retransmit.
690 	 */
691 	if (rtx_timeout || fast_rtx) {
692 		list_for_each_entry(chunk1, lqueue, transmitted_list) {
693 			if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
694 				chunk1->fast_retransmit = SCTP_DONT_FRTX;
695 		}
696 	}
697 
698 	*start_timer = timer;
699 
700 	/* Clear fast retransmit hint */
701 	if (fast_rtx)
702 		q->fast_rtx = 0;
703 
704 	return error;
705 }
706 
707 /* Cork the outqueue so queued chunks are really queued. */
708 int sctp_outq_uncork(struct sctp_outq *q)
709 {
710 	int error = 0;
711 	if (q->cork)
712 		q->cork = 0;
713 	error = sctp_outq_flush(q, 0);
714 	return error;
715 }
716 
717 
718 /*
719  * Try to flush an outqueue.
720  *
721  * Description: Send everything in q which we legally can, subject to
722  * congestion limitations.
723  * * Note: This function can be called from multiple contexts so appropriate
724  * locking concerns must be made.  Today we use the sock lock to protect
725  * this function.
726  */
727 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
728 {
729 	struct sctp_packet *packet;
730 	struct sctp_packet singleton;
731 	struct sctp_association *asoc = q->asoc;
732 	__u16 sport = asoc->base.bind_addr.port;
733 	__u16 dport = asoc->peer.port;
734 	__u32 vtag = asoc->peer.i.init_tag;
735 	struct sctp_transport *transport = NULL;
736 	struct sctp_transport *new_transport;
737 	struct sctp_chunk *chunk, *tmp;
738 	sctp_xmit_t status;
739 	int error = 0;
740 	int start_timer = 0;
741 	int one_packet = 0;
742 
743 	/* These transports have chunks to send. */
744 	struct list_head transport_list;
745 	struct list_head *ltransport;
746 
747 	INIT_LIST_HEAD(&transport_list);
748 	packet = NULL;
749 
750 	/*
751 	 * 6.10 Bundling
752 	 *   ...
753 	 *   When bundling control chunks with DATA chunks, an
754 	 *   endpoint MUST place control chunks first in the outbound
755 	 *   SCTP packet.  The transmitter MUST transmit DATA chunks
756 	 *   within a SCTP packet in increasing order of TSN.
757 	 *   ...
758 	 */
759 
760 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
761 		/* RFC 5061, 5.3
762 		 * F1) This means that until such time as the ASCONF
763 		 * containing the add is acknowledged, the sender MUST
764 		 * NOT use the new IP address as a source for ANY SCTP
765 		 * packet except on carrying an ASCONF Chunk.
766 		 */
767 		if (asoc->src_out_of_asoc_ok &&
768 		    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
769 			continue;
770 
771 		list_del_init(&chunk->list);
772 
773 		/* Pick the right transport to use. */
774 		new_transport = chunk->transport;
775 
776 		if (!new_transport) {
777 			/*
778 			 * If we have a prior transport pointer, see if
779 			 * the destination address of the chunk
780 			 * matches the destination address of the
781 			 * current transport.  If not a match, then
782 			 * try to look up the transport with a given
783 			 * destination address.  We do this because
784 			 * after processing ASCONFs, we may have new
785 			 * transports created.
786 			 */
787 			if (transport &&
788 			    sctp_cmp_addr_exact(&chunk->dest,
789 						&transport->ipaddr))
790 					new_transport = transport;
791 			else
792 				new_transport = sctp_assoc_lookup_paddr(asoc,
793 								&chunk->dest);
794 
795 			/* if we still don't have a new transport, then
796 			 * use the current active path.
797 			 */
798 			if (!new_transport)
799 				new_transport = asoc->peer.active_path;
800 		} else if ((new_transport->state == SCTP_INACTIVE) ||
801 			   (new_transport->state == SCTP_UNCONFIRMED) ||
802 			   (new_transport->state == SCTP_PF)) {
803 			/* If the chunk is Heartbeat or Heartbeat Ack,
804 			 * send it to chunk->transport, even if it's
805 			 * inactive.
806 			 *
807 			 * 3.3.6 Heartbeat Acknowledgement:
808 			 * ...
809 			 * A HEARTBEAT ACK is always sent to the source IP
810 			 * address of the IP datagram containing the
811 			 * HEARTBEAT chunk to which this ack is responding.
812 			 * ...
813 			 *
814 			 * ASCONF_ACKs also must be sent to the source.
815 			 */
816 			if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
817 			    chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
818 			    chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
819 				new_transport = asoc->peer.active_path;
820 		}
821 
822 		/* Are we switching transports?
823 		 * Take care of transport locks.
824 		 */
825 		if (new_transport != transport) {
826 			transport = new_transport;
827 			if (list_empty(&transport->send_ready)) {
828 				list_add_tail(&transport->send_ready,
829 					      &transport_list);
830 			}
831 			packet = &transport->packet;
832 			sctp_packet_config(packet, vtag,
833 					   asoc->peer.ecn_capable);
834 		}
835 
836 		switch (chunk->chunk_hdr->type) {
837 		/*
838 		 * 6.10 Bundling
839 		 *   ...
840 		 *   An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
841 		 *   COMPLETE with any other chunks.  [Send them immediately.]
842 		 */
843 		case SCTP_CID_INIT:
844 		case SCTP_CID_INIT_ACK:
845 		case SCTP_CID_SHUTDOWN_COMPLETE:
846 			sctp_packet_init(&singleton, transport, sport, dport);
847 			sctp_packet_config(&singleton, vtag, 0);
848 			sctp_packet_append_chunk(&singleton, chunk);
849 			error = sctp_packet_transmit(&singleton);
850 			if (error < 0)
851 				return error;
852 			break;
853 
854 		case SCTP_CID_ABORT:
855 			if (sctp_test_T_bit(chunk)) {
856 				packet->vtag = asoc->c.my_vtag;
857 			}
858 		/* The following chunks are "response" chunks, i.e.
859 		 * they are generated in response to something we
860 		 * received.  If we are sending these, then we can
861 		 * send only 1 packet containing these chunks.
862 		 */
863 		case SCTP_CID_HEARTBEAT_ACK:
864 		case SCTP_CID_SHUTDOWN_ACK:
865 		case SCTP_CID_COOKIE_ACK:
866 		case SCTP_CID_COOKIE_ECHO:
867 		case SCTP_CID_ERROR:
868 		case SCTP_CID_ECN_CWR:
869 		case SCTP_CID_ASCONF_ACK:
870 			one_packet = 1;
871 			/* Fall through */
872 
873 		case SCTP_CID_SACK:
874 		case SCTP_CID_HEARTBEAT:
875 		case SCTP_CID_SHUTDOWN:
876 		case SCTP_CID_ECN_ECNE:
877 		case SCTP_CID_ASCONF:
878 		case SCTP_CID_FWD_TSN:
879 			status = sctp_packet_transmit_chunk(packet, chunk,
880 							    one_packet);
881 			if (status  != SCTP_XMIT_OK) {
882 				/* put the chunk back */
883 				list_add(&chunk->list, &q->control_chunk_list);
884 			} else {
885 				asoc->stats.octrlchunks++;
886 				/* PR-SCTP C5) If a FORWARD TSN is sent, the
887 				 * sender MUST assure that at least one T3-rtx
888 				 * timer is running.
889 				 */
890 				if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
891 					sctp_transport_reset_timers(transport);
892 			}
893 			break;
894 
895 		default:
896 			/* We built a chunk with an illegal type! */
897 			BUG();
898 		}
899 	}
900 
901 	if (q->asoc->src_out_of_asoc_ok)
902 		goto sctp_flush_out;
903 
904 	/* Is it OK to send data chunks?  */
905 	switch (asoc->state) {
906 	case SCTP_STATE_COOKIE_ECHOED:
907 		/* Only allow bundling when this packet has a COOKIE-ECHO
908 		 * chunk.
909 		 */
910 		if (!packet || !packet->has_cookie_echo)
911 			break;
912 
913 		/* fallthru */
914 	case SCTP_STATE_ESTABLISHED:
915 	case SCTP_STATE_SHUTDOWN_PENDING:
916 	case SCTP_STATE_SHUTDOWN_RECEIVED:
917 		/*
918 		 * RFC 2960 6.1  Transmission of DATA Chunks
919 		 *
920 		 * C) When the time comes for the sender to transmit,
921 		 * before sending new DATA chunks, the sender MUST
922 		 * first transmit any outstanding DATA chunks which
923 		 * are marked for retransmission (limited by the
924 		 * current cwnd).
925 		 */
926 		if (!list_empty(&q->retransmit)) {
927 			if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
928 				goto sctp_flush_out;
929 			if (transport == asoc->peer.retran_path)
930 				goto retran;
931 
932 			/* Switch transports & prepare the packet.  */
933 
934 			transport = asoc->peer.retran_path;
935 
936 			if (list_empty(&transport->send_ready)) {
937 				list_add_tail(&transport->send_ready,
938 					      &transport_list);
939 			}
940 
941 			packet = &transport->packet;
942 			sctp_packet_config(packet, vtag,
943 					   asoc->peer.ecn_capable);
944 		retran:
945 			error = sctp_outq_flush_rtx(q, packet,
946 						    rtx_timeout, &start_timer);
947 
948 			if (start_timer)
949 				sctp_transport_reset_timers(transport);
950 
951 			/* This can happen on COOKIE-ECHO resend.  Only
952 			 * one chunk can get bundled with a COOKIE-ECHO.
953 			 */
954 			if (packet->has_cookie_echo)
955 				goto sctp_flush_out;
956 
957 			/* Don't send new data if there is still data
958 			 * waiting to retransmit.
959 			 */
960 			if (!list_empty(&q->retransmit))
961 				goto sctp_flush_out;
962 		}
963 
964 		/* Apply Max.Burst limitation to the current transport in
965 		 * case it will be used for new data.  We are going to
966 		 * rest it before we return, but we want to apply the limit
967 		 * to the currently queued data.
968 		 */
969 		if (transport)
970 			sctp_transport_burst_limited(transport);
971 
972 		/* Finally, transmit new packets.  */
973 		while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
974 			/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
975 			 * stream identifier.
976 			 */
977 			if (chunk->sinfo.sinfo_stream >=
978 			    asoc->c.sinit_num_ostreams) {
979 
980 				/* Mark as failed send. */
981 				sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
982 				sctp_chunk_free(chunk);
983 				continue;
984 			}
985 
986 			/* Has this chunk expired? */
987 			if (sctp_chunk_abandoned(chunk)) {
988 				sctp_chunk_fail(chunk, 0);
989 				sctp_chunk_free(chunk);
990 				continue;
991 			}
992 
993 			/* If there is a specified transport, use it.
994 			 * Otherwise, we want to use the active path.
995 			 */
996 			new_transport = chunk->transport;
997 			if (!new_transport ||
998 			    ((new_transport->state == SCTP_INACTIVE) ||
999 			     (new_transport->state == SCTP_UNCONFIRMED) ||
1000 			     (new_transport->state == SCTP_PF)))
1001 				new_transport = asoc->peer.active_path;
1002 			if (new_transport->state == SCTP_UNCONFIRMED)
1003 				continue;
1004 
1005 			/* Change packets if necessary.  */
1006 			if (new_transport != transport) {
1007 				transport = new_transport;
1008 
1009 				/* Schedule to have this transport's
1010 				 * packet flushed.
1011 				 */
1012 				if (list_empty(&transport->send_ready)) {
1013 					list_add_tail(&transport->send_ready,
1014 						      &transport_list);
1015 				}
1016 
1017 				packet = &transport->packet;
1018 				sctp_packet_config(packet, vtag,
1019 						   asoc->peer.ecn_capable);
1020 				/* We've switched transports, so apply the
1021 				 * Burst limit to the new transport.
1022 				 */
1023 				sctp_transport_burst_limited(transport);
1024 			}
1025 
1026 			SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
1027 					  q, chunk,
1028 					  chunk && chunk->chunk_hdr ?
1029 					  sctp_cname(SCTP_ST_CHUNK(
1030 						  chunk->chunk_hdr->type))
1031 					  : "Illegal Chunk");
1032 
1033 			SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
1034 					"%p skb->users %d.\n",
1035 					ntohl(chunk->subh.data_hdr->tsn),
1036 					chunk->skb ?chunk->skb->head : NULL,
1037 					chunk->skb ?
1038 					atomic_read(&chunk->skb->users) : -1);
1039 
1040 			/* Add the chunk to the packet.  */
1041 			status = sctp_packet_transmit_chunk(packet, chunk, 0);
1042 
1043 			switch (status) {
1044 			case SCTP_XMIT_PMTU_FULL:
1045 			case SCTP_XMIT_RWND_FULL:
1046 			case SCTP_XMIT_NAGLE_DELAY:
1047 				/* We could not append this chunk, so put
1048 				 * the chunk back on the output queue.
1049 				 */
1050 				SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
1051 					"not transmit TSN: 0x%x, status: %d\n",
1052 					ntohl(chunk->subh.data_hdr->tsn),
1053 					status);
1054 				sctp_outq_head_data(q, chunk);
1055 				goto sctp_flush_out;
1056 				break;
1057 
1058 			case SCTP_XMIT_OK:
1059 				/* The sender is in the SHUTDOWN-PENDING state,
1060 				 * The sender MAY set the I-bit in the DATA
1061 				 * chunk header.
1062 				 */
1063 				if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1064 					chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1065 				if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1066 					asoc->stats.ouodchunks++;
1067 				else
1068 					asoc->stats.oodchunks++;
1069 
1070 				break;
1071 
1072 			default:
1073 				BUG();
1074 			}
1075 
1076 			/* BUG: We assume that the sctp_packet_transmit()
1077 			 * call below will succeed all the time and add the
1078 			 * chunk to the transmitted list and restart the
1079 			 * timers.
1080 			 * It is possible that the call can fail under OOM
1081 			 * conditions.
1082 			 *
1083 			 * Is this really a problem?  Won't this behave
1084 			 * like a lost TSN?
1085 			 */
1086 			list_add_tail(&chunk->transmitted_list,
1087 				      &transport->transmitted);
1088 
1089 			sctp_transport_reset_timers(transport);
1090 
1091 			q->empty = 0;
1092 
1093 			/* Only let one DATA chunk get bundled with a
1094 			 * COOKIE-ECHO chunk.
1095 			 */
1096 			if (packet->has_cookie_echo)
1097 				goto sctp_flush_out;
1098 		}
1099 		break;
1100 
1101 	default:
1102 		/* Do nothing.  */
1103 		break;
1104 	}
1105 
1106 sctp_flush_out:
1107 
1108 	/* Before returning, examine all the transports touched in
1109 	 * this call.  Right now, we bluntly force clear all the
1110 	 * transports.  Things might change after we implement Nagle.
1111 	 * But such an examination is still required.
1112 	 *
1113 	 * --xguo
1114 	 */
1115 	while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
1116 		struct sctp_transport *t = list_entry(ltransport,
1117 						      struct sctp_transport,
1118 						      send_ready);
1119 		packet = &t->packet;
1120 		if (!sctp_packet_empty(packet))
1121 			error = sctp_packet_transmit(packet);
1122 
1123 		/* Clear the burst limited state, if any */
1124 		sctp_transport_burst_reset(t);
1125 	}
1126 
1127 	return error;
1128 }
1129 
1130 /* Update unack_data based on the incoming SACK chunk */
1131 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1132 					struct sctp_sackhdr *sack)
1133 {
1134 	sctp_sack_variable_t *frags;
1135 	__u16 unack_data;
1136 	int i;
1137 
1138 	unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1139 
1140 	frags = sack->variable;
1141 	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1142 		unack_data -= ((ntohs(frags[i].gab.end) -
1143 				ntohs(frags[i].gab.start) + 1));
1144 	}
1145 
1146 	assoc->unack_data = unack_data;
1147 }
1148 
1149 /* This is where we REALLY process a SACK.
1150  *
1151  * Process the SACK against the outqueue.  Mostly, this just frees
1152  * things off the transmitted queue.
1153  */
1154 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1155 {
1156 	struct sctp_association *asoc = q->asoc;
1157 	struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1158 	struct sctp_transport *transport;
1159 	struct sctp_chunk *tchunk = NULL;
1160 	struct list_head *lchunk, *transport_list, *temp;
1161 	sctp_sack_variable_t *frags = sack->variable;
1162 	__u32 sack_ctsn, ctsn, tsn;
1163 	__u32 highest_tsn, highest_new_tsn;
1164 	__u32 sack_a_rwnd;
1165 	unsigned int outstanding;
1166 	struct sctp_transport *primary = asoc->peer.primary_path;
1167 	int count_of_newacks = 0;
1168 	int gap_ack_blocks;
1169 	u8 accum_moved = 0;
1170 
1171 	/* Grab the association's destination address list. */
1172 	transport_list = &asoc->peer.transport_addr_list;
1173 
1174 	sack_ctsn = ntohl(sack->cum_tsn_ack);
1175 	gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1176 	asoc->stats.gapcnt += gap_ack_blocks;
1177 	/*
1178 	 * SFR-CACC algorithm:
1179 	 * On receipt of a SACK the sender SHOULD execute the
1180 	 * following statements.
1181 	 *
1182 	 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1183 	 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1184 	 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1185 	 * all destinations.
1186 	 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1187 	 * is set the receiver of the SACK MUST take the following actions:
1188 	 *
1189 	 * A) Initialize the cacc_saw_newack to 0 for all destination
1190 	 * addresses.
1191 	 *
1192 	 * Only bother if changeover_active is set. Otherwise, this is
1193 	 * totally suboptimal to do on every SACK.
1194 	 */
1195 	if (primary->cacc.changeover_active) {
1196 		u8 clear_cycling = 0;
1197 
1198 		if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1199 			primary->cacc.changeover_active = 0;
1200 			clear_cycling = 1;
1201 		}
1202 
1203 		if (clear_cycling || gap_ack_blocks) {
1204 			list_for_each_entry(transport, transport_list,
1205 					transports) {
1206 				if (clear_cycling)
1207 					transport->cacc.cycling_changeover = 0;
1208 				if (gap_ack_blocks)
1209 					transport->cacc.cacc_saw_newack = 0;
1210 			}
1211 		}
1212 	}
1213 
1214 	/* Get the highest TSN in the sack. */
1215 	highest_tsn = sack_ctsn;
1216 	if (gap_ack_blocks)
1217 		highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1218 
1219 	if (TSN_lt(asoc->highest_sacked, highest_tsn))
1220 		asoc->highest_sacked = highest_tsn;
1221 
1222 	highest_new_tsn = sack_ctsn;
1223 
1224 	/* Run through the retransmit queue.  Credit bytes received
1225 	 * and free those chunks that we can.
1226 	 */
1227 	sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1228 
1229 	/* Run through the transmitted queue.
1230 	 * Credit bytes received and free those chunks which we can.
1231 	 *
1232 	 * This is a MASSIVE candidate for optimization.
1233 	 */
1234 	list_for_each_entry(transport, transport_list, transports) {
1235 		sctp_check_transmitted(q, &transport->transmitted,
1236 				       transport, &chunk->source, sack,
1237 				       &highest_new_tsn);
1238 		/*
1239 		 * SFR-CACC algorithm:
1240 		 * C) Let count_of_newacks be the number of
1241 		 * destinations for which cacc_saw_newack is set.
1242 		 */
1243 		if (transport->cacc.cacc_saw_newack)
1244 			count_of_newacks ++;
1245 	}
1246 
1247 	/* Move the Cumulative TSN Ack Point if appropriate.  */
1248 	if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1249 		asoc->ctsn_ack_point = sack_ctsn;
1250 		accum_moved = 1;
1251 	}
1252 
1253 	if (gap_ack_blocks) {
1254 
1255 		if (asoc->fast_recovery && accum_moved)
1256 			highest_new_tsn = highest_tsn;
1257 
1258 		list_for_each_entry(transport, transport_list, transports)
1259 			sctp_mark_missing(q, &transport->transmitted, transport,
1260 					  highest_new_tsn, count_of_newacks);
1261 	}
1262 
1263 	/* Update unack_data field in the assoc. */
1264 	sctp_sack_update_unack_data(asoc, sack);
1265 
1266 	ctsn = asoc->ctsn_ack_point;
1267 
1268 	/* Throw away stuff rotting on the sack queue.  */
1269 	list_for_each_safe(lchunk, temp, &q->sacked) {
1270 		tchunk = list_entry(lchunk, struct sctp_chunk,
1271 				    transmitted_list);
1272 		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1273 		if (TSN_lte(tsn, ctsn)) {
1274 			list_del_init(&tchunk->transmitted_list);
1275 			sctp_chunk_free(tchunk);
1276 		}
1277 	}
1278 
1279 	/* ii) Set rwnd equal to the newly received a_rwnd minus the
1280 	 *     number of bytes still outstanding after processing the
1281 	 *     Cumulative TSN Ack and the Gap Ack Blocks.
1282 	 */
1283 
1284 	sack_a_rwnd = ntohl(sack->a_rwnd);
1285 	outstanding = q->outstanding_bytes;
1286 
1287 	if (outstanding < sack_a_rwnd)
1288 		sack_a_rwnd -= outstanding;
1289 	else
1290 		sack_a_rwnd = 0;
1291 
1292 	asoc->peer.rwnd = sack_a_rwnd;
1293 
1294 	sctp_generate_fwdtsn(q, sack_ctsn);
1295 
1296 	SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
1297 			  __func__, sack_ctsn);
1298 	SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
1299 			  "%p is 0x%x. Adv peer ack point: 0x%x\n",
1300 			  __func__, asoc, ctsn, asoc->adv_peer_ack_point);
1301 
1302 	/* See if all chunks are acked.
1303 	 * Make sure the empty queue handler will get run later.
1304 	 */
1305 	q->empty = (list_empty(&q->out_chunk_list) &&
1306 		    list_empty(&q->retransmit));
1307 	if (!q->empty)
1308 		goto finish;
1309 
1310 	list_for_each_entry(transport, transport_list, transports) {
1311 		q->empty = q->empty && list_empty(&transport->transmitted);
1312 		if (!q->empty)
1313 			goto finish;
1314 	}
1315 
1316 	SCTP_DEBUG_PRINTK("sack queue is empty.\n");
1317 finish:
1318 	return q->empty;
1319 }
1320 
1321 /* Is the outqueue empty?  */
1322 int sctp_outq_is_empty(const struct sctp_outq *q)
1323 {
1324 	return q->empty;
1325 }
1326 
1327 /********************************************************************
1328  * 2nd Level Abstractions
1329  ********************************************************************/
1330 
1331 /* Go through a transport's transmitted list or the association's retransmit
1332  * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1333  * The retransmit list will not have an associated transport.
1334  *
1335  * I added coherent debug information output.	--xguo
1336  *
1337  * Instead of printing 'sacked' or 'kept' for each TSN on the
1338  * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1339  * KEPT TSN6-TSN7, etc.
1340  */
1341 static void sctp_check_transmitted(struct sctp_outq *q,
1342 				   struct list_head *transmitted_queue,
1343 				   struct sctp_transport *transport,
1344 				   union sctp_addr *saddr,
1345 				   struct sctp_sackhdr *sack,
1346 				   __u32 *highest_new_tsn_in_sack)
1347 {
1348 	struct list_head *lchunk;
1349 	struct sctp_chunk *tchunk;
1350 	struct list_head tlist;
1351 	__u32 tsn;
1352 	__u32 sack_ctsn;
1353 	__u32 rtt;
1354 	__u8 restart_timer = 0;
1355 	int bytes_acked = 0;
1356 	int migrate_bytes = 0;
1357 
1358 	/* These state variables are for coherent debug output. --xguo */
1359 
1360 #if SCTP_DEBUG
1361 	__u32 dbg_ack_tsn = 0;	/* An ACKed TSN range starts here... */
1362 	__u32 dbg_last_ack_tsn = 0;  /* ...and finishes here.	     */
1363 	__u32 dbg_kept_tsn = 0;	/* An un-ACKed range starts here...  */
1364 	__u32 dbg_last_kept_tsn = 0; /* ...and finishes here.	     */
1365 
1366 	/* 0 : The last TSN was ACKed.
1367 	 * 1 : The last TSN was NOT ACKed (i.e. KEPT).
1368 	 * -1: We need to initialize.
1369 	 */
1370 	int dbg_prt_state = -1;
1371 #endif /* SCTP_DEBUG */
1372 
1373 	sack_ctsn = ntohl(sack->cum_tsn_ack);
1374 
1375 	INIT_LIST_HEAD(&tlist);
1376 
1377 	/* The while loop will skip empty transmitted queues. */
1378 	while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1379 		tchunk = list_entry(lchunk, struct sctp_chunk,
1380 				    transmitted_list);
1381 
1382 		if (sctp_chunk_abandoned(tchunk)) {
1383 			/* Move the chunk to abandoned list. */
1384 			sctp_insert_list(&q->abandoned, lchunk);
1385 
1386 			/* If this chunk has not been acked, stop
1387 			 * considering it as 'outstanding'.
1388 			 */
1389 			if (!tchunk->tsn_gap_acked) {
1390 				if (tchunk->transport)
1391 					tchunk->transport->flight_size -=
1392 							sctp_data_size(tchunk);
1393 				q->outstanding_bytes -= sctp_data_size(tchunk);
1394 			}
1395 			continue;
1396 		}
1397 
1398 		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1399 		if (sctp_acked(sack, tsn)) {
1400 			/* If this queue is the retransmit queue, the
1401 			 * retransmit timer has already reclaimed
1402 			 * the outstanding bytes for this chunk, so only
1403 			 * count bytes associated with a transport.
1404 			 */
1405 			if (transport) {
1406 				/* If this chunk is being used for RTT
1407 				 * measurement, calculate the RTT and update
1408 				 * the RTO using this value.
1409 				 *
1410 				 * 6.3.1 C5) Karn's algorithm: RTT measurements
1411 				 * MUST NOT be made using packets that were
1412 				 * retransmitted (and thus for which it is
1413 				 * ambiguous whether the reply was for the
1414 				 * first instance of the packet or a later
1415 				 * instance).
1416 				 */
1417 				if (!tchunk->tsn_gap_acked &&
1418 				    tchunk->rtt_in_progress) {
1419 					tchunk->rtt_in_progress = 0;
1420 					rtt = jiffies - tchunk->sent_at;
1421 					sctp_transport_update_rto(transport,
1422 								  rtt);
1423 				}
1424 			}
1425 
1426 			/* If the chunk hasn't been marked as ACKED,
1427 			 * mark it and account bytes_acked if the
1428 			 * chunk had a valid transport (it will not
1429 			 * have a transport if ASCONF had deleted it
1430 			 * while DATA was outstanding).
1431 			 */
1432 			if (!tchunk->tsn_gap_acked) {
1433 				tchunk->tsn_gap_acked = 1;
1434 				*highest_new_tsn_in_sack = tsn;
1435 				bytes_acked += sctp_data_size(tchunk);
1436 				if (!tchunk->transport)
1437 					migrate_bytes += sctp_data_size(tchunk);
1438 			}
1439 
1440 			if (TSN_lte(tsn, sack_ctsn)) {
1441 				/* RFC 2960  6.3.2 Retransmission Timer Rules
1442 				 *
1443 				 * R3) Whenever a SACK is received
1444 				 * that acknowledges the DATA chunk
1445 				 * with the earliest outstanding TSN
1446 				 * for that address, restart T3-rtx
1447 				 * timer for that address with its
1448 				 * current RTO.
1449 				 */
1450 				restart_timer = 1;
1451 
1452 				if (!tchunk->tsn_gap_acked) {
1453 					/*
1454 					 * SFR-CACC algorithm:
1455 					 * 2) If the SACK contains gap acks
1456 					 * and the flag CHANGEOVER_ACTIVE is
1457 					 * set the receiver of the SACK MUST
1458 					 * take the following action:
1459 					 *
1460 					 * B) For each TSN t being acked that
1461 					 * has not been acked in any SACK so
1462 					 * far, set cacc_saw_newack to 1 for
1463 					 * the destination that the TSN was
1464 					 * sent to.
1465 					 */
1466 					if (transport &&
1467 					    sack->num_gap_ack_blocks &&
1468 					    q->asoc->peer.primary_path->cacc.
1469 					    changeover_active)
1470 						transport->cacc.cacc_saw_newack
1471 							= 1;
1472 				}
1473 
1474 				list_add_tail(&tchunk->transmitted_list,
1475 					      &q->sacked);
1476 			} else {
1477 				/* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1478 				 * M2) Each time a SACK arrives reporting
1479 				 * 'Stray DATA chunk(s)' record the highest TSN
1480 				 * reported as newly acknowledged, call this
1481 				 * value 'HighestTSNinSack'. A newly
1482 				 * acknowledged DATA chunk is one not
1483 				 * previously acknowledged in a SACK.
1484 				 *
1485 				 * When the SCTP sender of data receives a SACK
1486 				 * chunk that acknowledges, for the first time,
1487 				 * the receipt of a DATA chunk, all the still
1488 				 * unacknowledged DATA chunks whose TSN is
1489 				 * older than that newly acknowledged DATA
1490 				 * chunk, are qualified as 'Stray DATA chunks'.
1491 				 */
1492 				list_add_tail(lchunk, &tlist);
1493 			}
1494 
1495 #if SCTP_DEBUG
1496 			switch (dbg_prt_state) {
1497 			case 0:	/* last TSN was ACKed */
1498 				if (dbg_last_ack_tsn + 1 == tsn) {
1499 					/* This TSN belongs to the
1500 					 * current ACK range.
1501 					 */
1502 					break;
1503 				}
1504 
1505 				if (dbg_last_ack_tsn != dbg_ack_tsn) {
1506 					/* Display the end of the
1507 					 * current range.
1508 					 */
1509 					SCTP_DEBUG_PRINTK_CONT("-%08x",
1510 							       dbg_last_ack_tsn);
1511 				}
1512 
1513 				/* Start a new range.  */
1514 				SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1515 				dbg_ack_tsn = tsn;
1516 				break;
1517 
1518 			case 1:	/* The last TSN was NOT ACKed. */
1519 				if (dbg_last_kept_tsn != dbg_kept_tsn) {
1520 					/* Display the end of current range. */
1521 					SCTP_DEBUG_PRINTK_CONT("-%08x",
1522 							       dbg_last_kept_tsn);
1523 				}
1524 
1525 				SCTP_DEBUG_PRINTK_CONT("\n");
1526 
1527 				/* FALL THROUGH... */
1528 			default:
1529 				/* This is the first-ever TSN we examined.  */
1530 				/* Start a new range of ACK-ed TSNs.  */
1531 				SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);
1532 				dbg_prt_state = 0;
1533 				dbg_ack_tsn = tsn;
1534 			}
1535 
1536 			dbg_last_ack_tsn = tsn;
1537 #endif /* SCTP_DEBUG */
1538 
1539 		} else {
1540 			if (tchunk->tsn_gap_acked) {
1541 				SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
1542 						  "data TSN: 0x%x\n",
1543 						  __func__,
1544 						  tsn);
1545 				tchunk->tsn_gap_acked = 0;
1546 
1547 				if (tchunk->transport)
1548 					bytes_acked -= sctp_data_size(tchunk);
1549 
1550 				/* RFC 2960 6.3.2 Retransmission Timer Rules
1551 				 *
1552 				 * R4) Whenever a SACK is received missing a
1553 				 * TSN that was previously acknowledged via a
1554 				 * Gap Ack Block, start T3-rtx for the
1555 				 * destination address to which the DATA
1556 				 * chunk was originally
1557 				 * transmitted if it is not already running.
1558 				 */
1559 				restart_timer = 1;
1560 			}
1561 
1562 			list_add_tail(lchunk, &tlist);
1563 
1564 #if SCTP_DEBUG
1565 			/* See the above comments on ACK-ed TSNs. */
1566 			switch (dbg_prt_state) {
1567 			case 1:
1568 				if (dbg_last_kept_tsn + 1 == tsn)
1569 					break;
1570 
1571 				if (dbg_last_kept_tsn != dbg_kept_tsn)
1572 					SCTP_DEBUG_PRINTK_CONT("-%08x",
1573 							       dbg_last_kept_tsn);
1574 
1575 				SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1576 				dbg_kept_tsn = tsn;
1577 				break;
1578 
1579 			case 0:
1580 				if (dbg_last_ack_tsn != dbg_ack_tsn)
1581 					SCTP_DEBUG_PRINTK_CONT("-%08x",
1582 							       dbg_last_ack_tsn);
1583 				SCTP_DEBUG_PRINTK_CONT("\n");
1584 
1585 				/* FALL THROUGH... */
1586 			default:
1587 				SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);
1588 				dbg_prt_state = 1;
1589 				dbg_kept_tsn = tsn;
1590 			}
1591 
1592 			dbg_last_kept_tsn = tsn;
1593 #endif /* SCTP_DEBUG */
1594 		}
1595 	}
1596 
1597 #if SCTP_DEBUG
1598 	/* Finish off the last range, displaying its ending TSN.  */
1599 	switch (dbg_prt_state) {
1600 	case 0:
1601 		if (dbg_last_ack_tsn != dbg_ack_tsn) {
1602 			SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
1603 		} else {
1604 			SCTP_DEBUG_PRINTK_CONT("\n");
1605 		}
1606 	break;
1607 
1608 	case 1:
1609 		if (dbg_last_kept_tsn != dbg_kept_tsn) {
1610 			SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
1611 		} else {
1612 			SCTP_DEBUG_PRINTK_CONT("\n");
1613 		}
1614 	}
1615 #endif /* SCTP_DEBUG */
1616 	if (transport) {
1617 		if (bytes_acked) {
1618 			struct sctp_association *asoc = transport->asoc;
1619 
1620 			/* We may have counted DATA that was migrated
1621 			 * to this transport due to DEL-IP operation.
1622 			 * Subtract those bytes, since the were never
1623 			 * send on this transport and shouldn't be
1624 			 * credited to this transport.
1625 			 */
1626 			bytes_acked -= migrate_bytes;
1627 
1628 			/* 8.2. When an outstanding TSN is acknowledged,
1629 			 * the endpoint shall clear the error counter of
1630 			 * the destination transport address to which the
1631 			 * DATA chunk was last sent.
1632 			 * The association's overall error counter is
1633 			 * also cleared.
1634 			 */
1635 			transport->error_count = 0;
1636 			transport->asoc->overall_error_count = 0;
1637 
1638 			/*
1639 			 * While in SHUTDOWN PENDING, we may have started
1640 			 * the T5 shutdown guard timer after reaching the
1641 			 * retransmission limit. Stop that timer as soon
1642 			 * as the receiver acknowledged any data.
1643 			 */
1644 			if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1645 			    del_timer(&asoc->timers
1646 				[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1647 					sctp_association_put(asoc);
1648 
1649 			/* Mark the destination transport address as
1650 			 * active if it is not so marked.
1651 			 */
1652 			if ((transport->state == SCTP_INACTIVE ||
1653 			     transport->state == SCTP_UNCONFIRMED) &&
1654 			    sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1655 				sctp_assoc_control_transport(
1656 					transport->asoc,
1657 					transport,
1658 					SCTP_TRANSPORT_UP,
1659 					SCTP_RECEIVED_SACK);
1660 			}
1661 
1662 			sctp_transport_raise_cwnd(transport, sack_ctsn,
1663 						  bytes_acked);
1664 
1665 			transport->flight_size -= bytes_acked;
1666 			if (transport->flight_size == 0)
1667 				transport->partial_bytes_acked = 0;
1668 			q->outstanding_bytes -= bytes_acked + migrate_bytes;
1669 		} else {
1670 			/* RFC 2960 6.1, sctpimpguide-06 2.15.2
1671 			 * When a sender is doing zero window probing, it
1672 			 * should not timeout the association if it continues
1673 			 * to receive new packets from the receiver. The
1674 			 * reason is that the receiver MAY keep its window
1675 			 * closed for an indefinite time.
1676 			 * A sender is doing zero window probing when the
1677 			 * receiver's advertised window is zero, and there is
1678 			 * only one data chunk in flight to the receiver.
1679 			 *
1680 			 * Allow the association to timeout while in SHUTDOWN
1681 			 * PENDING or SHUTDOWN RECEIVED in case the receiver
1682 			 * stays in zero window mode forever.
1683 			 */
1684 			if (!q->asoc->peer.rwnd &&
1685 			    !list_empty(&tlist) &&
1686 			    (sack_ctsn+2 == q->asoc->next_tsn) &&
1687 			    q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1688 				SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1689 						  "window probe: %u\n",
1690 						  __func__, sack_ctsn);
1691 				q->asoc->overall_error_count = 0;
1692 				transport->error_count = 0;
1693 			}
1694 		}
1695 
1696 		/* RFC 2960 6.3.2 Retransmission Timer Rules
1697 		 *
1698 		 * R2) Whenever all outstanding data sent to an address have
1699 		 * been acknowledged, turn off the T3-rtx timer of that
1700 		 * address.
1701 		 */
1702 		if (!transport->flight_size) {
1703 			if (timer_pending(&transport->T3_rtx_timer) &&
1704 			    del_timer(&transport->T3_rtx_timer)) {
1705 				sctp_transport_put(transport);
1706 			}
1707 		} else if (restart_timer) {
1708 			if (!mod_timer(&transport->T3_rtx_timer,
1709 				       jiffies + transport->rto))
1710 				sctp_transport_hold(transport);
1711 		}
1712 	}
1713 
1714 	list_splice(&tlist, transmitted_queue);
1715 }
1716 
1717 /* Mark chunks as missing and consequently may get retransmitted. */
1718 static void sctp_mark_missing(struct sctp_outq *q,
1719 			      struct list_head *transmitted_queue,
1720 			      struct sctp_transport *transport,
1721 			      __u32 highest_new_tsn_in_sack,
1722 			      int count_of_newacks)
1723 {
1724 	struct sctp_chunk *chunk;
1725 	__u32 tsn;
1726 	char do_fast_retransmit = 0;
1727 	struct sctp_association *asoc = q->asoc;
1728 	struct sctp_transport *primary = asoc->peer.primary_path;
1729 
1730 	list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1731 
1732 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1733 
1734 		/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1735 		 * 'Unacknowledged TSN's', if the TSN number of an
1736 		 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1737 		 * value, increment the 'TSN.Missing.Report' count on that
1738 		 * chunk if it has NOT been fast retransmitted or marked for
1739 		 * fast retransmit already.
1740 		 */
1741 		if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1742 		    !chunk->tsn_gap_acked &&
1743 		    TSN_lt(tsn, highest_new_tsn_in_sack)) {
1744 
1745 			/* SFR-CACC may require us to skip marking
1746 			 * this chunk as missing.
1747 			 */
1748 			if (!transport || !sctp_cacc_skip(primary,
1749 						chunk->transport,
1750 						count_of_newacks, tsn)) {
1751 				chunk->tsn_missing_report++;
1752 
1753 				SCTP_DEBUG_PRINTK(
1754 					"%s: TSN 0x%x missing counter: %d\n",
1755 					__func__, tsn,
1756 					chunk->tsn_missing_report);
1757 			}
1758 		}
1759 		/*
1760 		 * M4) If any DATA chunk is found to have a
1761 		 * 'TSN.Missing.Report'
1762 		 * value larger than or equal to 3, mark that chunk for
1763 		 * retransmission and start the fast retransmit procedure.
1764 		 */
1765 
1766 		if (chunk->tsn_missing_report >= 3) {
1767 			chunk->fast_retransmit = SCTP_NEED_FRTX;
1768 			do_fast_retransmit = 1;
1769 		}
1770 	}
1771 
1772 	if (transport) {
1773 		if (do_fast_retransmit)
1774 			sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1775 
1776 		SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1777 				  "ssthresh: %d, flight_size: %d, pba: %d\n",
1778 				  __func__, transport, transport->cwnd,
1779 				  transport->ssthresh, transport->flight_size,
1780 				  transport->partial_bytes_acked);
1781 	}
1782 }
1783 
1784 /* Is the given TSN acked by this packet?  */
1785 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1786 {
1787 	int i;
1788 	sctp_sack_variable_t *frags;
1789 	__u16 gap;
1790 	__u32 ctsn = ntohl(sack->cum_tsn_ack);
1791 
1792 	if (TSN_lte(tsn, ctsn))
1793 		goto pass;
1794 
1795 	/* 3.3.4 Selective Acknowledgement (SACK) (3):
1796 	 *
1797 	 * Gap Ack Blocks:
1798 	 *  These fields contain the Gap Ack Blocks. They are repeated
1799 	 *  for each Gap Ack Block up to the number of Gap Ack Blocks
1800 	 *  defined in the Number of Gap Ack Blocks field. All DATA
1801 	 *  chunks with TSNs greater than or equal to (Cumulative TSN
1802 	 *  Ack + Gap Ack Block Start) and less than or equal to
1803 	 *  (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1804 	 *  Block are assumed to have been received correctly.
1805 	 */
1806 
1807 	frags = sack->variable;
1808 	gap = tsn - ctsn;
1809 	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1810 		if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1811 		    TSN_lte(gap, ntohs(frags[i].gab.end)))
1812 			goto pass;
1813 	}
1814 
1815 	return 0;
1816 pass:
1817 	return 1;
1818 }
1819 
1820 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1821 				    int nskips, __be16 stream)
1822 {
1823 	int i;
1824 
1825 	for (i = 0; i < nskips; i++) {
1826 		if (skiplist[i].stream == stream)
1827 			return i;
1828 	}
1829 	return i;
1830 }
1831 
1832 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1833 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1834 {
1835 	struct sctp_association *asoc = q->asoc;
1836 	struct sctp_chunk *ftsn_chunk = NULL;
1837 	struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1838 	int nskips = 0;
1839 	int skip_pos = 0;
1840 	__u32 tsn;
1841 	struct sctp_chunk *chunk;
1842 	struct list_head *lchunk, *temp;
1843 
1844 	if (!asoc->peer.prsctp_capable)
1845 		return;
1846 
1847 	/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1848 	 * received SACK.
1849 	 *
1850 	 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1851 	 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1852 	 */
1853 	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1854 		asoc->adv_peer_ack_point = ctsn;
1855 
1856 	/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1857 	 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1858 	 * the chunk next in the out-queue space is marked as "abandoned" as
1859 	 * shown in the following example:
1860 	 *
1861 	 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1862 	 * and the Advanced.Peer.Ack.Point is updated to this value:
1863 	 *
1864 	 *   out-queue at the end of  ==>   out-queue after Adv.Ack.Point
1865 	 *   normal SACK processing           local advancement
1866 	 *                ...                           ...
1867 	 *   Adv.Ack.Pt-> 102 acked                     102 acked
1868 	 *                103 abandoned                 103 abandoned
1869 	 *                104 abandoned     Adv.Ack.P-> 104 abandoned
1870 	 *                105                           105
1871 	 *                106 acked                     106 acked
1872 	 *                ...                           ...
1873 	 *
1874 	 * In this example, the data sender successfully advanced the
1875 	 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1876 	 */
1877 	list_for_each_safe(lchunk, temp, &q->abandoned) {
1878 		chunk = list_entry(lchunk, struct sctp_chunk,
1879 					transmitted_list);
1880 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1881 
1882 		/* Remove any chunks in the abandoned queue that are acked by
1883 		 * the ctsn.
1884 		 */
1885 		if (TSN_lte(tsn, ctsn)) {
1886 			list_del_init(lchunk);
1887 			sctp_chunk_free(chunk);
1888 		} else {
1889 			if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1890 				asoc->adv_peer_ack_point = tsn;
1891 				if (chunk->chunk_hdr->flags &
1892 					 SCTP_DATA_UNORDERED)
1893 					continue;
1894 				skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1895 						nskips,
1896 						chunk->subh.data_hdr->stream);
1897 				ftsn_skip_arr[skip_pos].stream =
1898 					chunk->subh.data_hdr->stream;
1899 				ftsn_skip_arr[skip_pos].ssn =
1900 					 chunk->subh.data_hdr->ssn;
1901 				if (skip_pos == nskips)
1902 					nskips++;
1903 				if (nskips == 10)
1904 					break;
1905 			} else
1906 				break;
1907 		}
1908 	}
1909 
1910 	/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1911 	 * is greater than the Cumulative TSN ACK carried in the received
1912 	 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1913 	 * chunk containing the latest value of the
1914 	 * "Advanced.Peer.Ack.Point".
1915 	 *
1916 	 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1917 	 * list each stream and sequence number in the forwarded TSN. This
1918 	 * information will enable the receiver to easily find any
1919 	 * stranded TSN's waiting on stream reorder queues. Each stream
1920 	 * SHOULD only be reported once; this means that if multiple
1921 	 * abandoned messages occur in the same stream then only the
1922 	 * highest abandoned stream sequence number is reported. If the
1923 	 * total size of the FORWARD TSN does NOT fit in a single MTU then
1924 	 * the sender of the FORWARD TSN SHOULD lower the
1925 	 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1926 	 * single MTU.
1927 	 */
1928 	if (asoc->adv_peer_ack_point > ctsn)
1929 		ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1930 					      nskips, &ftsn_skip_arr[0]);
1931 
1932 	if (ftsn_chunk) {
1933 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1934 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1935 	}
1936 }
1937