xref: /openbmc/linux/net/sctp/outqueue.c (revision e5f612969c6f965e3bd1158598e0a3b1c4f389b9)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001-2003 Intel Corp.
6  *
7  * This file is part of the SCTP kernel implementation
8  *
9  * These functions implement the sctp_outq class.   The outqueue handles
10  * bundling and queueing of outgoing SCTP chunks.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, see
26  * <http://www.gnu.org/licenses/>.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <linux-sctp@vger.kernel.org>
31  *
32  * Written or modified by:
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Karl Knutson          <karl@athena.chicago.il.us>
35  *    Perry Melange         <pmelange@null.cc.uic.edu>
36  *    Xingang Guo           <xingang.guo@intel.com>
37  *    Hui Huang 	    <hui.huang@nokia.com>
38  *    Sridhar Samudrala     <sri@us.ibm.com>
39  *    Jon Grimm             <jgrimm@us.ibm.com>
40  */
41 
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 
44 #include <linux/types.h>
45 #include <linux/list.h>   /* For struct list_head */
46 #include <linux/socket.h>
47 #include <linux/ip.h>
48 #include <linux/slab.h>
49 #include <net/sock.h>	  /* For skb_set_owner_w */
50 
51 #include <net/sctp/sctp.h>
52 #include <net/sctp/sm.h>
53 #include <net/sctp/stream_sched.h>
54 
55 /* Declare internal functions here.  */
56 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
57 static void sctp_check_transmitted(struct sctp_outq *q,
58 				   struct list_head *transmitted_queue,
59 				   struct sctp_transport *transport,
60 				   union sctp_addr *saddr,
61 				   struct sctp_sackhdr *sack,
62 				   __u32 *highest_new_tsn);
63 
64 static void sctp_mark_missing(struct sctp_outq *q,
65 			      struct list_head *transmitted_queue,
66 			      struct sctp_transport *transport,
67 			      __u32 highest_new_tsn,
68 			      int count_of_newacks);
69 
70 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
71 
72 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
73 
74 /* Add data to the front of the queue. */
75 static inline void sctp_outq_head_data(struct sctp_outq *q,
76 				       struct sctp_chunk *ch)
77 {
78 	struct sctp_stream_out_ext *oute;
79 	__u16 stream;
80 
81 	list_add(&ch->list, &q->out_chunk_list);
82 	q->out_qlen += ch->skb->len;
83 
84 	stream = sctp_chunk_stream_no(ch);
85 	oute = q->asoc->stream.out[stream].ext;
86 	list_add(&ch->stream_list, &oute->outq);
87 }
88 
89 /* Take data from the front of the queue. */
90 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
91 {
92 	return q->sched->dequeue(q);
93 }
94 
95 /* Add data chunk to the end of the queue. */
96 static inline void sctp_outq_tail_data(struct sctp_outq *q,
97 				       struct sctp_chunk *ch)
98 {
99 	struct sctp_stream_out_ext *oute;
100 	__u16 stream;
101 
102 	list_add_tail(&ch->list, &q->out_chunk_list);
103 	q->out_qlen += ch->skb->len;
104 
105 	stream = sctp_chunk_stream_no(ch);
106 	oute = q->asoc->stream.out[stream].ext;
107 	list_add_tail(&ch->stream_list, &oute->outq);
108 }
109 
110 /*
111  * SFR-CACC algorithm:
112  * D) If count_of_newacks is greater than or equal to 2
113  * and t was not sent to the current primary then the
114  * sender MUST NOT increment missing report count for t.
115  */
116 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
117 				       struct sctp_transport *transport,
118 				       int count_of_newacks)
119 {
120 	if (count_of_newacks >= 2 && transport != primary)
121 		return 1;
122 	return 0;
123 }
124 
125 /*
126  * SFR-CACC algorithm:
127  * F) If count_of_newacks is less than 2, let d be the
128  * destination to which t was sent. If cacc_saw_newack
129  * is 0 for destination d, then the sender MUST NOT
130  * increment missing report count for t.
131  */
132 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
133 				       int count_of_newacks)
134 {
135 	if (count_of_newacks < 2 &&
136 			(transport && !transport->cacc.cacc_saw_newack))
137 		return 1;
138 	return 0;
139 }
140 
141 /*
142  * SFR-CACC algorithm:
143  * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
144  * execute steps C, D, F.
145  *
146  * C has been implemented in sctp_outq_sack
147  */
148 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
149 				     struct sctp_transport *transport,
150 				     int count_of_newacks)
151 {
152 	if (!primary->cacc.cycling_changeover) {
153 		if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
154 			return 1;
155 		if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
156 			return 1;
157 		return 0;
158 	}
159 	return 0;
160 }
161 
162 /*
163  * SFR-CACC algorithm:
164  * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
165  * than next_tsn_at_change of the current primary, then
166  * the sender MUST NOT increment missing report count
167  * for t.
168  */
169 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
170 {
171 	if (primary->cacc.cycling_changeover &&
172 	    TSN_lt(tsn, primary->cacc.next_tsn_at_change))
173 		return 1;
174 	return 0;
175 }
176 
177 /*
178  * SFR-CACC algorithm:
179  * 3) If the missing report count for TSN t is to be
180  * incremented according to [RFC2960] and
181  * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
182  * then the sender MUST further execute steps 3.1 and
183  * 3.2 to determine if the missing report count for
184  * TSN t SHOULD NOT be incremented.
185  *
186  * 3.3) If 3.1 and 3.2 do not dictate that the missing
187  * report count for t should not be incremented, then
188  * the sender SHOULD increment missing report count for
189  * t (according to [RFC2960] and [SCTP_STEWART_2002]).
190  */
191 static inline int sctp_cacc_skip(struct sctp_transport *primary,
192 				 struct sctp_transport *transport,
193 				 int count_of_newacks,
194 				 __u32 tsn)
195 {
196 	if (primary->cacc.changeover_active &&
197 	    (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
198 	     sctp_cacc_skip_3_2(primary, tsn)))
199 		return 1;
200 	return 0;
201 }
202 
203 /* Initialize an existing sctp_outq.  This does the boring stuff.
204  * You still need to define handlers if you really want to DO
205  * something with this structure...
206  */
207 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 {
209 	memset(q, 0, sizeof(struct sctp_outq));
210 
211 	q->asoc = asoc;
212 	INIT_LIST_HEAD(&q->out_chunk_list);
213 	INIT_LIST_HEAD(&q->control_chunk_list);
214 	INIT_LIST_HEAD(&q->retransmit);
215 	INIT_LIST_HEAD(&q->sacked);
216 	INIT_LIST_HEAD(&q->abandoned);
217 	sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
218 }
219 
220 /* Free the outqueue structure and any related pending chunks.
221  */
222 static void __sctp_outq_teardown(struct sctp_outq *q)
223 {
224 	struct sctp_transport *transport;
225 	struct list_head *lchunk, *temp;
226 	struct sctp_chunk *chunk, *tmp;
227 
228 	/* Throw away unacknowledged chunks. */
229 	list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
230 			transports) {
231 		while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
232 			chunk = list_entry(lchunk, struct sctp_chunk,
233 					   transmitted_list);
234 			/* Mark as part of a failed message. */
235 			sctp_chunk_fail(chunk, q->error);
236 			sctp_chunk_free(chunk);
237 		}
238 	}
239 
240 	/* Throw away chunks that have been gap ACKed.  */
241 	list_for_each_safe(lchunk, temp, &q->sacked) {
242 		list_del_init(lchunk);
243 		chunk = list_entry(lchunk, struct sctp_chunk,
244 				   transmitted_list);
245 		sctp_chunk_fail(chunk, q->error);
246 		sctp_chunk_free(chunk);
247 	}
248 
249 	/* Throw away any chunks in the retransmit queue. */
250 	list_for_each_safe(lchunk, temp, &q->retransmit) {
251 		list_del_init(lchunk);
252 		chunk = list_entry(lchunk, struct sctp_chunk,
253 				   transmitted_list);
254 		sctp_chunk_fail(chunk, q->error);
255 		sctp_chunk_free(chunk);
256 	}
257 
258 	/* Throw away any chunks that are in the abandoned queue. */
259 	list_for_each_safe(lchunk, temp, &q->abandoned) {
260 		list_del_init(lchunk);
261 		chunk = list_entry(lchunk, struct sctp_chunk,
262 				   transmitted_list);
263 		sctp_chunk_fail(chunk, q->error);
264 		sctp_chunk_free(chunk);
265 	}
266 
267 	/* Throw away any leftover data chunks. */
268 	while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
269 		sctp_sched_dequeue_done(q, chunk);
270 
271 		/* Mark as send failure. */
272 		sctp_chunk_fail(chunk, q->error);
273 		sctp_chunk_free(chunk);
274 	}
275 
276 	/* Throw away any leftover control chunks. */
277 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
278 		list_del_init(&chunk->list);
279 		sctp_chunk_free(chunk);
280 	}
281 }
282 
283 void sctp_outq_teardown(struct sctp_outq *q)
284 {
285 	__sctp_outq_teardown(q);
286 	sctp_outq_init(q->asoc, q);
287 }
288 
289 /* Free the outqueue structure and any related pending chunks.  */
290 void sctp_outq_free(struct sctp_outq *q)
291 {
292 	/* Throw away leftover chunks. */
293 	__sctp_outq_teardown(q);
294 }
295 
296 /* Put a new chunk in an sctp_outq.  */
297 void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
298 {
299 	struct net *net = sock_net(q->asoc->base.sk);
300 
301 	pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
302 		 chunk && chunk->chunk_hdr ?
303 		 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
304 		 "illegal chunk");
305 
306 	/* If it is data, queue it up, otherwise, send it
307 	 * immediately.
308 	 */
309 	if (sctp_chunk_is_data(chunk)) {
310 		pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
311 			 __func__, q, chunk, chunk && chunk->chunk_hdr ?
312 			 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
313 			 "illegal chunk");
314 
315 		sctp_outq_tail_data(q, chunk);
316 		if (chunk->asoc->peer.prsctp_capable &&
317 		    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
318 			chunk->asoc->sent_cnt_removable++;
319 		if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
320 			SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
321 		else
322 			SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
323 	} else {
324 		list_add_tail(&chunk->list, &q->control_chunk_list);
325 		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
326 	}
327 
328 	if (!q->cork)
329 		sctp_outq_flush(q, 0, gfp);
330 }
331 
332 /* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
333  * and the abandoned list are in ascending order.
334  */
335 static void sctp_insert_list(struct list_head *head, struct list_head *new)
336 {
337 	struct list_head *pos;
338 	struct sctp_chunk *nchunk, *lchunk;
339 	__u32 ntsn, ltsn;
340 	int done = 0;
341 
342 	nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
343 	ntsn = ntohl(nchunk->subh.data_hdr->tsn);
344 
345 	list_for_each(pos, head) {
346 		lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
347 		ltsn = ntohl(lchunk->subh.data_hdr->tsn);
348 		if (TSN_lt(ntsn, ltsn)) {
349 			list_add(new, pos->prev);
350 			done = 1;
351 			break;
352 		}
353 	}
354 	if (!done)
355 		list_add_tail(new, head);
356 }
357 
358 static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
359 				  struct sctp_sndrcvinfo *sinfo,
360 				  struct list_head *queue, int msg_len)
361 {
362 	struct sctp_chunk *chk, *temp;
363 
364 	list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
365 		struct sctp_stream_out *streamout;
366 
367 		if (!chk->msg->abandoned &&
368 		    (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
369 		     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
370 			continue;
371 
372 		chk->msg->abandoned = 1;
373 		list_del_init(&chk->transmitted_list);
374 		sctp_insert_list(&asoc->outqueue.abandoned,
375 				 &chk->transmitted_list);
376 
377 		streamout = &asoc->stream.out[chk->sinfo.sinfo_stream];
378 		asoc->sent_cnt_removable--;
379 		asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
380 		streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
381 
382 		if (queue != &asoc->outqueue.retransmit &&
383 		    !chk->tsn_gap_acked) {
384 			if (chk->transport)
385 				chk->transport->flight_size -=
386 						sctp_data_size(chk);
387 			asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
388 		}
389 
390 		msg_len -= SCTP_DATA_SNDSIZE(chk) +
391 			   sizeof(struct sk_buff) +
392 			   sizeof(struct sctp_chunk);
393 		if (msg_len <= 0)
394 			break;
395 	}
396 
397 	return msg_len;
398 }
399 
400 static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
401 				    struct sctp_sndrcvinfo *sinfo, int msg_len)
402 {
403 	struct sctp_outq *q = &asoc->outqueue;
404 	struct sctp_chunk *chk, *temp;
405 
406 	q->sched->unsched_all(&asoc->stream);
407 
408 	list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
409 		if (!chk->msg->abandoned &&
410 		    (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
411 		     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
412 			continue;
413 
414 		chk->msg->abandoned = 1;
415 		sctp_sched_dequeue_common(q, chk);
416 		asoc->sent_cnt_removable--;
417 		asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
418 		if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
419 			struct sctp_stream_out *streamout =
420 				&asoc->stream.out[chk->sinfo.sinfo_stream];
421 
422 			streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
423 		}
424 
425 		msg_len -= SCTP_DATA_SNDSIZE(chk) +
426 			   sizeof(struct sk_buff) +
427 			   sizeof(struct sctp_chunk);
428 		sctp_chunk_free(chk);
429 		if (msg_len <= 0)
430 			break;
431 	}
432 
433 	q->sched->sched_all(&asoc->stream);
434 
435 	return msg_len;
436 }
437 
438 /* Abandon the chunks according their priorities */
439 void sctp_prsctp_prune(struct sctp_association *asoc,
440 		       struct sctp_sndrcvinfo *sinfo, int msg_len)
441 {
442 	struct sctp_transport *transport;
443 
444 	if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
445 		return;
446 
447 	msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
448 					 &asoc->outqueue.retransmit,
449 					 msg_len);
450 	if (msg_len <= 0)
451 		return;
452 
453 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
454 			    transports) {
455 		msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
456 						 &transport->transmitted,
457 						 msg_len);
458 		if (msg_len <= 0)
459 			return;
460 	}
461 
462 	sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
463 }
464 
465 /* Mark all the eligible packets on a transport for retransmission.  */
466 void sctp_retransmit_mark(struct sctp_outq *q,
467 			  struct sctp_transport *transport,
468 			  __u8 reason)
469 {
470 	struct list_head *lchunk, *ltemp;
471 	struct sctp_chunk *chunk;
472 
473 	/* Walk through the specified transmitted queue.  */
474 	list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
475 		chunk = list_entry(lchunk, struct sctp_chunk,
476 				   transmitted_list);
477 
478 		/* If the chunk is abandoned, move it to abandoned list. */
479 		if (sctp_chunk_abandoned(chunk)) {
480 			list_del_init(lchunk);
481 			sctp_insert_list(&q->abandoned, lchunk);
482 
483 			/* If this chunk has not been previousely acked,
484 			 * stop considering it 'outstanding'.  Our peer
485 			 * will most likely never see it since it will
486 			 * not be retransmitted
487 			 */
488 			if (!chunk->tsn_gap_acked) {
489 				if (chunk->transport)
490 					chunk->transport->flight_size -=
491 							sctp_data_size(chunk);
492 				q->outstanding_bytes -= sctp_data_size(chunk);
493 				q->asoc->peer.rwnd += sctp_data_size(chunk);
494 			}
495 			continue;
496 		}
497 
498 		/* If we are doing  retransmission due to a timeout or pmtu
499 		 * discovery, only the  chunks that are not yet acked should
500 		 * be added to the retransmit queue.
501 		 */
502 		if ((reason == SCTP_RTXR_FAST_RTX  &&
503 			    (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
504 		    (reason != SCTP_RTXR_FAST_RTX  && !chunk->tsn_gap_acked)) {
505 			/* RFC 2960 6.2.1 Processing a Received SACK
506 			 *
507 			 * C) Any time a DATA chunk is marked for
508 			 * retransmission (via either T3-rtx timer expiration
509 			 * (Section 6.3.3) or via fast retransmit
510 			 * (Section 7.2.4)), add the data size of those
511 			 * chunks to the rwnd.
512 			 */
513 			q->asoc->peer.rwnd += sctp_data_size(chunk);
514 			q->outstanding_bytes -= sctp_data_size(chunk);
515 			if (chunk->transport)
516 				transport->flight_size -= sctp_data_size(chunk);
517 
518 			/* sctpimpguide-05 Section 2.8.2
519 			 * M5) If a T3-rtx timer expires, the
520 			 * 'TSN.Missing.Report' of all affected TSNs is set
521 			 * to 0.
522 			 */
523 			chunk->tsn_missing_report = 0;
524 
525 			/* If a chunk that is being used for RTT measurement
526 			 * has to be retransmitted, we cannot use this chunk
527 			 * anymore for RTT measurements. Reset rto_pending so
528 			 * that a new RTT measurement is started when a new
529 			 * data chunk is sent.
530 			 */
531 			if (chunk->rtt_in_progress) {
532 				chunk->rtt_in_progress = 0;
533 				transport->rto_pending = 0;
534 			}
535 
536 			/* Move the chunk to the retransmit queue. The chunks
537 			 * on the retransmit queue are always kept in order.
538 			 */
539 			list_del_init(lchunk);
540 			sctp_insert_list(&q->retransmit, lchunk);
541 		}
542 	}
543 
544 	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
545 		 "flight_size:%d, pba:%d\n", __func__, transport, reason,
546 		 transport->cwnd, transport->ssthresh, transport->flight_size,
547 		 transport->partial_bytes_acked);
548 }
549 
550 /* Mark all the eligible packets on a transport for retransmission and force
551  * one packet out.
552  */
553 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
554 		     enum sctp_retransmit_reason reason)
555 {
556 	struct net *net = sock_net(q->asoc->base.sk);
557 
558 	switch (reason) {
559 	case SCTP_RTXR_T3_RTX:
560 		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
561 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
562 		/* Update the retran path if the T3-rtx timer has expired for
563 		 * the current retran path.
564 		 */
565 		if (transport == transport->asoc->peer.retran_path)
566 			sctp_assoc_update_retran_path(transport->asoc);
567 		transport->asoc->rtx_data_chunks +=
568 			transport->asoc->unack_data;
569 		break;
570 	case SCTP_RTXR_FAST_RTX:
571 		SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
572 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
573 		q->fast_rtx = 1;
574 		break;
575 	case SCTP_RTXR_PMTUD:
576 		SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
577 		break;
578 	case SCTP_RTXR_T1_RTX:
579 		SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
580 		transport->asoc->init_retries++;
581 		break;
582 	default:
583 		BUG();
584 	}
585 
586 	sctp_retransmit_mark(q, transport, reason);
587 
588 	/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
589 	 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
590 	 * following the procedures outlined in C1 - C5.
591 	 */
592 	if (reason == SCTP_RTXR_T3_RTX)
593 		sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
594 
595 	/* Flush the queues only on timeout, since fast_rtx is only
596 	 * triggered during sack processing and the queue
597 	 * will be flushed at the end.
598 	 */
599 	if (reason != SCTP_RTXR_FAST_RTX)
600 		sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
601 }
602 
603 /*
604  * Transmit DATA chunks on the retransmit queue.  Upon return from
605  * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
606  * need to be transmitted by the caller.
607  * We assume that pkt->transport has already been set.
608  *
609  * The return value is a normal kernel error return value.
610  */
611 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
612 			       int rtx_timeout, int *start_timer)
613 {
614 	struct sctp_transport *transport = pkt->transport;
615 	struct sctp_chunk *chunk, *chunk1;
616 	struct list_head *lqueue;
617 	enum sctp_xmit status;
618 	int error = 0;
619 	int timer = 0;
620 	int done = 0;
621 	int fast_rtx;
622 
623 	lqueue = &q->retransmit;
624 	fast_rtx = q->fast_rtx;
625 
626 	/* This loop handles time-out retransmissions, fast retransmissions,
627 	 * and retransmissions due to opening of whindow.
628 	 *
629 	 * RFC 2960 6.3.3 Handle T3-rtx Expiration
630 	 *
631 	 * E3) Determine how many of the earliest (i.e., lowest TSN)
632 	 * outstanding DATA chunks for the address for which the
633 	 * T3-rtx has expired will fit into a single packet, subject
634 	 * to the MTU constraint for the path corresponding to the
635 	 * destination transport address to which the retransmission
636 	 * is being sent (this may be different from the address for
637 	 * which the timer expires [see Section 6.4]). Call this value
638 	 * K. Bundle and retransmit those K DATA chunks in a single
639 	 * packet to the destination endpoint.
640 	 *
641 	 * [Just to be painfully clear, if we are retransmitting
642 	 * because a timeout just happened, we should send only ONE
643 	 * packet of retransmitted data.]
644 	 *
645 	 * For fast retransmissions we also send only ONE packet.  However,
646 	 * if we are just flushing the queue due to open window, we'll
647 	 * try to send as much as possible.
648 	 */
649 	list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
650 		/* If the chunk is abandoned, move it to abandoned list. */
651 		if (sctp_chunk_abandoned(chunk)) {
652 			list_del_init(&chunk->transmitted_list);
653 			sctp_insert_list(&q->abandoned,
654 					 &chunk->transmitted_list);
655 			continue;
656 		}
657 
658 		/* Make sure that Gap Acked TSNs are not retransmitted.  A
659 		 * simple approach is just to move such TSNs out of the
660 		 * way and into a 'transmitted' queue and skip to the
661 		 * next chunk.
662 		 */
663 		if (chunk->tsn_gap_acked) {
664 			list_move_tail(&chunk->transmitted_list,
665 				       &transport->transmitted);
666 			continue;
667 		}
668 
669 		/* If we are doing fast retransmit, ignore non-fast_rtransmit
670 		 * chunks
671 		 */
672 		if (fast_rtx && !chunk->fast_retransmit)
673 			continue;
674 
675 redo:
676 		/* Attempt to append this chunk to the packet. */
677 		status = sctp_packet_append_chunk(pkt, chunk);
678 
679 		switch (status) {
680 		case SCTP_XMIT_PMTU_FULL:
681 			if (!pkt->has_data && !pkt->has_cookie_echo) {
682 				/* If this packet did not contain DATA then
683 				 * retransmission did not happen, so do it
684 				 * again.  We'll ignore the error here since
685 				 * control chunks are already freed so there
686 				 * is nothing we can do.
687 				 */
688 				sctp_packet_transmit(pkt, GFP_ATOMIC);
689 				goto redo;
690 			}
691 
692 			/* Send this packet.  */
693 			error = sctp_packet_transmit(pkt, GFP_ATOMIC);
694 
695 			/* If we are retransmitting, we should only
696 			 * send a single packet.
697 			 * Otherwise, try appending this chunk again.
698 			 */
699 			if (rtx_timeout || fast_rtx)
700 				done = 1;
701 			else
702 				goto redo;
703 
704 			/* Bundle next chunk in the next round.  */
705 			break;
706 
707 		case SCTP_XMIT_RWND_FULL:
708 			/* Send this packet. */
709 			error = sctp_packet_transmit(pkt, GFP_ATOMIC);
710 
711 			/* Stop sending DATA as there is no more room
712 			 * at the receiver.
713 			 */
714 			done = 1;
715 			break;
716 
717 		case SCTP_XMIT_DELAY:
718 			/* Send this packet. */
719 			error = sctp_packet_transmit(pkt, GFP_ATOMIC);
720 
721 			/* Stop sending DATA because of nagle delay. */
722 			done = 1;
723 			break;
724 
725 		default:
726 			/* The append was successful, so add this chunk to
727 			 * the transmitted list.
728 			 */
729 			list_move_tail(&chunk->transmitted_list,
730 				       &transport->transmitted);
731 
732 			/* Mark the chunk as ineligible for fast retransmit
733 			 * after it is retransmitted.
734 			 */
735 			if (chunk->fast_retransmit == SCTP_NEED_FRTX)
736 				chunk->fast_retransmit = SCTP_DONT_FRTX;
737 
738 			q->asoc->stats.rtxchunks++;
739 			break;
740 		}
741 
742 		/* Set the timer if there were no errors */
743 		if (!error && !timer)
744 			timer = 1;
745 
746 		if (done)
747 			break;
748 	}
749 
750 	/* If we are here due to a retransmit timeout or a fast
751 	 * retransmit and if there are any chunks left in the retransmit
752 	 * queue that could not fit in the PMTU sized packet, they need
753 	 * to be marked as ineligible for a subsequent fast retransmit.
754 	 */
755 	if (rtx_timeout || fast_rtx) {
756 		list_for_each_entry(chunk1, lqueue, transmitted_list) {
757 			if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
758 				chunk1->fast_retransmit = SCTP_DONT_FRTX;
759 		}
760 	}
761 
762 	*start_timer = timer;
763 
764 	/* Clear fast retransmit hint */
765 	if (fast_rtx)
766 		q->fast_rtx = 0;
767 
768 	return error;
769 }
770 
771 /* Cork the outqueue so queued chunks are really queued. */
772 void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
773 {
774 	if (q->cork)
775 		q->cork = 0;
776 
777 	sctp_outq_flush(q, 0, gfp);
778 }
779 
780 
781 /*
782  * Try to flush an outqueue.
783  *
784  * Description: Send everything in q which we legally can, subject to
785  * congestion limitations.
786  * * Note: This function can be called from multiple contexts so appropriate
787  * locking concerns must be made.  Today we use the sock lock to protect
788  * this function.
789  */
790 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
791 {
792 	struct sctp_packet *packet;
793 	struct sctp_packet singleton;
794 	struct sctp_association *asoc = q->asoc;
795 	__u16 sport = asoc->base.bind_addr.port;
796 	__u16 dport = asoc->peer.port;
797 	__u32 vtag = asoc->peer.i.init_tag;
798 	struct sctp_transport *transport = NULL;
799 	struct sctp_transport *new_transport;
800 	struct sctp_chunk *chunk, *tmp;
801 	enum sctp_xmit status;
802 	int error = 0;
803 	int start_timer = 0;
804 	int one_packet = 0;
805 
806 	/* These transports have chunks to send. */
807 	struct list_head transport_list;
808 	struct list_head *ltransport;
809 
810 	INIT_LIST_HEAD(&transport_list);
811 	packet = NULL;
812 
813 	/*
814 	 * 6.10 Bundling
815 	 *   ...
816 	 *   When bundling control chunks with DATA chunks, an
817 	 *   endpoint MUST place control chunks first in the outbound
818 	 *   SCTP packet.  The transmitter MUST transmit DATA chunks
819 	 *   within a SCTP packet in increasing order of TSN.
820 	 *   ...
821 	 */
822 
823 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
824 		/* RFC 5061, 5.3
825 		 * F1) This means that until such time as the ASCONF
826 		 * containing the add is acknowledged, the sender MUST
827 		 * NOT use the new IP address as a source for ANY SCTP
828 		 * packet except on carrying an ASCONF Chunk.
829 		 */
830 		if (asoc->src_out_of_asoc_ok &&
831 		    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
832 			continue;
833 
834 		list_del_init(&chunk->list);
835 
836 		/* Pick the right transport to use. */
837 		new_transport = chunk->transport;
838 
839 		if (!new_transport) {
840 			/*
841 			 * If we have a prior transport pointer, see if
842 			 * the destination address of the chunk
843 			 * matches the destination address of the
844 			 * current transport.  If not a match, then
845 			 * try to look up the transport with a given
846 			 * destination address.  We do this because
847 			 * after processing ASCONFs, we may have new
848 			 * transports created.
849 			 */
850 			if (transport &&
851 			    sctp_cmp_addr_exact(&chunk->dest,
852 						&transport->ipaddr))
853 					new_transport = transport;
854 			else
855 				new_transport = sctp_assoc_lookup_paddr(asoc,
856 								&chunk->dest);
857 
858 			/* if we still don't have a new transport, then
859 			 * use the current active path.
860 			 */
861 			if (!new_transport)
862 				new_transport = asoc->peer.active_path;
863 		} else if ((new_transport->state == SCTP_INACTIVE) ||
864 			   (new_transport->state == SCTP_UNCONFIRMED) ||
865 			   (new_transport->state == SCTP_PF)) {
866 			/* If the chunk is Heartbeat or Heartbeat Ack,
867 			 * send it to chunk->transport, even if it's
868 			 * inactive.
869 			 *
870 			 * 3.3.6 Heartbeat Acknowledgement:
871 			 * ...
872 			 * A HEARTBEAT ACK is always sent to the source IP
873 			 * address of the IP datagram containing the
874 			 * HEARTBEAT chunk to which this ack is responding.
875 			 * ...
876 			 *
877 			 * ASCONF_ACKs also must be sent to the source.
878 			 */
879 			if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
880 			    chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
881 			    chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
882 				new_transport = asoc->peer.active_path;
883 		}
884 
885 		/* Are we switching transports?
886 		 * Take care of transport locks.
887 		 */
888 		if (new_transport != transport) {
889 			transport = new_transport;
890 			if (list_empty(&transport->send_ready)) {
891 				list_add_tail(&transport->send_ready,
892 					      &transport_list);
893 			}
894 			packet = &transport->packet;
895 			sctp_packet_config(packet, vtag,
896 					   asoc->peer.ecn_capable);
897 		}
898 
899 		switch (chunk->chunk_hdr->type) {
900 		/*
901 		 * 6.10 Bundling
902 		 *   ...
903 		 *   An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
904 		 *   COMPLETE with any other chunks.  [Send them immediately.]
905 		 */
906 		case SCTP_CID_INIT:
907 		case SCTP_CID_INIT_ACK:
908 		case SCTP_CID_SHUTDOWN_COMPLETE:
909 			sctp_packet_init(&singleton, transport, sport, dport);
910 			sctp_packet_config(&singleton, vtag, 0);
911 			sctp_packet_append_chunk(&singleton, chunk);
912 			error = sctp_packet_transmit(&singleton, gfp);
913 			if (error < 0) {
914 				asoc->base.sk->sk_err = -error;
915 				return;
916 			}
917 			break;
918 
919 		case SCTP_CID_ABORT:
920 			if (sctp_test_T_bit(chunk)) {
921 				packet->vtag = asoc->c.my_vtag;
922 			}
923 		/* The following chunks are "response" chunks, i.e.
924 		 * they are generated in response to something we
925 		 * received.  If we are sending these, then we can
926 		 * send only 1 packet containing these chunks.
927 		 */
928 		case SCTP_CID_HEARTBEAT_ACK:
929 		case SCTP_CID_SHUTDOWN_ACK:
930 		case SCTP_CID_COOKIE_ACK:
931 		case SCTP_CID_COOKIE_ECHO:
932 		case SCTP_CID_ERROR:
933 		case SCTP_CID_ECN_CWR:
934 		case SCTP_CID_ASCONF_ACK:
935 			one_packet = 1;
936 			/* Fall through */
937 
938 		case SCTP_CID_SACK:
939 		case SCTP_CID_HEARTBEAT:
940 		case SCTP_CID_SHUTDOWN:
941 		case SCTP_CID_ECN_ECNE:
942 		case SCTP_CID_ASCONF:
943 		case SCTP_CID_FWD_TSN:
944 		case SCTP_CID_RECONF:
945 			status = sctp_packet_transmit_chunk(packet, chunk,
946 							    one_packet, gfp);
947 			if (status  != SCTP_XMIT_OK) {
948 				/* put the chunk back */
949 				list_add(&chunk->list, &q->control_chunk_list);
950 				break;
951 			}
952 
953 			asoc->stats.octrlchunks++;
954 			/* PR-SCTP C5) If a FORWARD TSN is sent, the
955 			 * sender MUST assure that at least one T3-rtx
956 			 * timer is running.
957 			 */
958 			if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
959 				sctp_transport_reset_t3_rtx(transport);
960 				transport->last_time_sent = jiffies;
961 			}
962 
963 			if (chunk == asoc->strreset_chunk)
964 				sctp_transport_reset_reconf_timer(transport);
965 
966 			break;
967 
968 		default:
969 			/* We built a chunk with an illegal type! */
970 			BUG();
971 		}
972 	}
973 
974 	if (q->asoc->src_out_of_asoc_ok)
975 		goto sctp_flush_out;
976 
977 	/* Is it OK to send data chunks?  */
978 	switch (asoc->state) {
979 	case SCTP_STATE_COOKIE_ECHOED:
980 		/* Only allow bundling when this packet has a COOKIE-ECHO
981 		 * chunk.
982 		 */
983 		if (!packet || !packet->has_cookie_echo)
984 			break;
985 
986 		/* fallthru */
987 	case SCTP_STATE_ESTABLISHED:
988 	case SCTP_STATE_SHUTDOWN_PENDING:
989 	case SCTP_STATE_SHUTDOWN_RECEIVED:
990 		/*
991 		 * RFC 2960 6.1  Transmission of DATA Chunks
992 		 *
993 		 * C) When the time comes for the sender to transmit,
994 		 * before sending new DATA chunks, the sender MUST
995 		 * first transmit any outstanding DATA chunks which
996 		 * are marked for retransmission (limited by the
997 		 * current cwnd).
998 		 */
999 		if (!list_empty(&q->retransmit)) {
1000 			if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
1001 				goto sctp_flush_out;
1002 			if (transport == asoc->peer.retran_path)
1003 				goto retran;
1004 
1005 			/* Switch transports & prepare the packet.  */
1006 
1007 			transport = asoc->peer.retran_path;
1008 
1009 			if (list_empty(&transport->send_ready)) {
1010 				list_add_tail(&transport->send_ready,
1011 					      &transport_list);
1012 			}
1013 
1014 			packet = &transport->packet;
1015 			sctp_packet_config(packet, vtag,
1016 					   asoc->peer.ecn_capable);
1017 		retran:
1018 			error = sctp_outq_flush_rtx(q, packet,
1019 						    rtx_timeout, &start_timer);
1020 			if (error < 0)
1021 				asoc->base.sk->sk_err = -error;
1022 
1023 			if (start_timer) {
1024 				sctp_transport_reset_t3_rtx(transport);
1025 				transport->last_time_sent = jiffies;
1026 			}
1027 
1028 			/* This can happen on COOKIE-ECHO resend.  Only
1029 			 * one chunk can get bundled with a COOKIE-ECHO.
1030 			 */
1031 			if (packet->has_cookie_echo)
1032 				goto sctp_flush_out;
1033 
1034 			/* Don't send new data if there is still data
1035 			 * waiting to retransmit.
1036 			 */
1037 			if (!list_empty(&q->retransmit))
1038 				goto sctp_flush_out;
1039 		}
1040 
1041 		/* Apply Max.Burst limitation to the current transport in
1042 		 * case it will be used for new data.  We are going to
1043 		 * rest it before we return, but we want to apply the limit
1044 		 * to the currently queued data.
1045 		 */
1046 		if (transport)
1047 			sctp_transport_burst_limited(transport);
1048 
1049 		/* Finally, transmit new packets.  */
1050 		while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
1051 			__u32 sid = ntohs(chunk->subh.data_hdr->stream);
1052 
1053 			/* Has this chunk expired? */
1054 			if (sctp_chunk_abandoned(chunk)) {
1055 				sctp_sched_dequeue_done(q, chunk);
1056 				sctp_chunk_fail(chunk, 0);
1057 				sctp_chunk_free(chunk);
1058 				continue;
1059 			}
1060 
1061 			if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) {
1062 				sctp_outq_head_data(q, chunk);
1063 				goto sctp_flush_out;
1064 			}
1065 
1066 			/* If there is a specified transport, use it.
1067 			 * Otherwise, we want to use the active path.
1068 			 */
1069 			new_transport = chunk->transport;
1070 			if (!new_transport ||
1071 			    ((new_transport->state == SCTP_INACTIVE) ||
1072 			     (new_transport->state == SCTP_UNCONFIRMED) ||
1073 			     (new_transport->state == SCTP_PF)))
1074 				new_transport = asoc->peer.active_path;
1075 			if (new_transport->state == SCTP_UNCONFIRMED) {
1076 				WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
1077 				sctp_sched_dequeue_done(q, chunk);
1078 				sctp_chunk_fail(chunk, 0);
1079 				sctp_chunk_free(chunk);
1080 				continue;
1081 			}
1082 
1083 			/* Change packets if necessary.  */
1084 			if (new_transport != transport) {
1085 				transport = new_transport;
1086 
1087 				/* Schedule to have this transport's
1088 				 * packet flushed.
1089 				 */
1090 				if (list_empty(&transport->send_ready)) {
1091 					list_add_tail(&transport->send_ready,
1092 						      &transport_list);
1093 				}
1094 
1095 				packet = &transport->packet;
1096 				sctp_packet_config(packet, vtag,
1097 						   asoc->peer.ecn_capable);
1098 				/* We've switched transports, so apply the
1099 				 * Burst limit to the new transport.
1100 				 */
1101 				sctp_transport_burst_limited(transport);
1102 			}
1103 
1104 			pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
1105 				 "skb->users:%d\n",
1106 				 __func__, q, chunk, chunk && chunk->chunk_hdr ?
1107 				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1108 				 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1109 				 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1110 				 refcount_read(&chunk->skb->users) : -1);
1111 
1112 			/* Add the chunk to the packet.  */
1113 			status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
1114 
1115 			switch (status) {
1116 			case SCTP_XMIT_PMTU_FULL:
1117 			case SCTP_XMIT_RWND_FULL:
1118 			case SCTP_XMIT_DELAY:
1119 				/* We could not append this chunk, so put
1120 				 * the chunk back on the output queue.
1121 				 */
1122 				pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1123 					 __func__, ntohl(chunk->subh.data_hdr->tsn),
1124 					 status);
1125 
1126 				sctp_outq_head_data(q, chunk);
1127 				goto sctp_flush_out;
1128 
1129 			case SCTP_XMIT_OK:
1130 				/* The sender is in the SHUTDOWN-PENDING state,
1131 				 * The sender MAY set the I-bit in the DATA
1132 				 * chunk header.
1133 				 */
1134 				if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1135 					chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1136 				if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1137 					asoc->stats.ouodchunks++;
1138 				else
1139 					asoc->stats.oodchunks++;
1140 
1141 				/* Only now it's safe to consider this
1142 				 * chunk as sent, sched-wise.
1143 				 */
1144 				sctp_sched_dequeue_done(q, chunk);
1145 
1146 				break;
1147 
1148 			default:
1149 				BUG();
1150 			}
1151 
1152 			/* BUG: We assume that the sctp_packet_transmit()
1153 			 * call below will succeed all the time and add the
1154 			 * chunk to the transmitted list and restart the
1155 			 * timers.
1156 			 * It is possible that the call can fail under OOM
1157 			 * conditions.
1158 			 *
1159 			 * Is this really a problem?  Won't this behave
1160 			 * like a lost TSN?
1161 			 */
1162 			list_add_tail(&chunk->transmitted_list,
1163 				      &transport->transmitted);
1164 
1165 			sctp_transport_reset_t3_rtx(transport);
1166 			transport->last_time_sent = jiffies;
1167 
1168 			/* Only let one DATA chunk get bundled with a
1169 			 * COOKIE-ECHO chunk.
1170 			 */
1171 			if (packet->has_cookie_echo)
1172 				goto sctp_flush_out;
1173 		}
1174 		break;
1175 
1176 	default:
1177 		/* Do nothing.  */
1178 		break;
1179 	}
1180 
1181 sctp_flush_out:
1182 
1183 	/* Before returning, examine all the transports touched in
1184 	 * this call.  Right now, we bluntly force clear all the
1185 	 * transports.  Things might change after we implement Nagle.
1186 	 * But such an examination is still required.
1187 	 *
1188 	 * --xguo
1189 	 */
1190 	while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) {
1191 		struct sctp_transport *t = list_entry(ltransport,
1192 						      struct sctp_transport,
1193 						      send_ready);
1194 		packet = &t->packet;
1195 		if (!sctp_packet_empty(packet)) {
1196 			error = sctp_packet_transmit(packet, gfp);
1197 			if (error < 0)
1198 				asoc->base.sk->sk_err = -error;
1199 		}
1200 
1201 		/* Clear the burst limited state, if any */
1202 		sctp_transport_burst_reset(t);
1203 	}
1204 }
1205 
1206 /* Update unack_data based on the incoming SACK chunk */
1207 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1208 					struct sctp_sackhdr *sack)
1209 {
1210 	union sctp_sack_variable *frags;
1211 	__u16 unack_data;
1212 	int i;
1213 
1214 	unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1215 
1216 	frags = sack->variable;
1217 	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1218 		unack_data -= ((ntohs(frags[i].gab.end) -
1219 				ntohs(frags[i].gab.start) + 1));
1220 	}
1221 
1222 	assoc->unack_data = unack_data;
1223 }
1224 
1225 /* This is where we REALLY process a SACK.
1226  *
1227  * Process the SACK against the outqueue.  Mostly, this just frees
1228  * things off the transmitted queue.
1229  */
1230 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1231 {
1232 	struct sctp_association *asoc = q->asoc;
1233 	struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1234 	struct sctp_transport *transport;
1235 	struct sctp_chunk *tchunk = NULL;
1236 	struct list_head *lchunk, *transport_list, *temp;
1237 	union sctp_sack_variable *frags = sack->variable;
1238 	__u32 sack_ctsn, ctsn, tsn;
1239 	__u32 highest_tsn, highest_new_tsn;
1240 	__u32 sack_a_rwnd;
1241 	unsigned int outstanding;
1242 	struct sctp_transport *primary = asoc->peer.primary_path;
1243 	int count_of_newacks = 0;
1244 	int gap_ack_blocks;
1245 	u8 accum_moved = 0;
1246 
1247 	/* Grab the association's destination address list. */
1248 	transport_list = &asoc->peer.transport_addr_list;
1249 
1250 	sack_ctsn = ntohl(sack->cum_tsn_ack);
1251 	gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1252 	asoc->stats.gapcnt += gap_ack_blocks;
1253 	/*
1254 	 * SFR-CACC algorithm:
1255 	 * On receipt of a SACK the sender SHOULD execute the
1256 	 * following statements.
1257 	 *
1258 	 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1259 	 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1260 	 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1261 	 * all destinations.
1262 	 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1263 	 * is set the receiver of the SACK MUST take the following actions:
1264 	 *
1265 	 * A) Initialize the cacc_saw_newack to 0 for all destination
1266 	 * addresses.
1267 	 *
1268 	 * Only bother if changeover_active is set. Otherwise, this is
1269 	 * totally suboptimal to do on every SACK.
1270 	 */
1271 	if (primary->cacc.changeover_active) {
1272 		u8 clear_cycling = 0;
1273 
1274 		if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1275 			primary->cacc.changeover_active = 0;
1276 			clear_cycling = 1;
1277 		}
1278 
1279 		if (clear_cycling || gap_ack_blocks) {
1280 			list_for_each_entry(transport, transport_list,
1281 					transports) {
1282 				if (clear_cycling)
1283 					transport->cacc.cycling_changeover = 0;
1284 				if (gap_ack_blocks)
1285 					transport->cacc.cacc_saw_newack = 0;
1286 			}
1287 		}
1288 	}
1289 
1290 	/* Get the highest TSN in the sack. */
1291 	highest_tsn = sack_ctsn;
1292 	if (gap_ack_blocks)
1293 		highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1294 
1295 	if (TSN_lt(asoc->highest_sacked, highest_tsn))
1296 		asoc->highest_sacked = highest_tsn;
1297 
1298 	highest_new_tsn = sack_ctsn;
1299 
1300 	/* Run through the retransmit queue.  Credit bytes received
1301 	 * and free those chunks that we can.
1302 	 */
1303 	sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1304 
1305 	/* Run through the transmitted queue.
1306 	 * Credit bytes received and free those chunks which we can.
1307 	 *
1308 	 * This is a MASSIVE candidate for optimization.
1309 	 */
1310 	list_for_each_entry(transport, transport_list, transports) {
1311 		sctp_check_transmitted(q, &transport->transmitted,
1312 				       transport, &chunk->source, sack,
1313 				       &highest_new_tsn);
1314 		/*
1315 		 * SFR-CACC algorithm:
1316 		 * C) Let count_of_newacks be the number of
1317 		 * destinations for which cacc_saw_newack is set.
1318 		 */
1319 		if (transport->cacc.cacc_saw_newack)
1320 			count_of_newacks++;
1321 	}
1322 
1323 	/* Move the Cumulative TSN Ack Point if appropriate.  */
1324 	if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1325 		asoc->ctsn_ack_point = sack_ctsn;
1326 		accum_moved = 1;
1327 	}
1328 
1329 	if (gap_ack_blocks) {
1330 
1331 		if (asoc->fast_recovery && accum_moved)
1332 			highest_new_tsn = highest_tsn;
1333 
1334 		list_for_each_entry(transport, transport_list, transports)
1335 			sctp_mark_missing(q, &transport->transmitted, transport,
1336 					  highest_new_tsn, count_of_newacks);
1337 	}
1338 
1339 	/* Update unack_data field in the assoc. */
1340 	sctp_sack_update_unack_data(asoc, sack);
1341 
1342 	ctsn = asoc->ctsn_ack_point;
1343 
1344 	/* Throw away stuff rotting on the sack queue.  */
1345 	list_for_each_safe(lchunk, temp, &q->sacked) {
1346 		tchunk = list_entry(lchunk, struct sctp_chunk,
1347 				    transmitted_list);
1348 		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1349 		if (TSN_lte(tsn, ctsn)) {
1350 			list_del_init(&tchunk->transmitted_list);
1351 			if (asoc->peer.prsctp_capable &&
1352 			    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1353 				asoc->sent_cnt_removable--;
1354 			sctp_chunk_free(tchunk);
1355 		}
1356 	}
1357 
1358 	/* ii) Set rwnd equal to the newly received a_rwnd minus the
1359 	 *     number of bytes still outstanding after processing the
1360 	 *     Cumulative TSN Ack and the Gap Ack Blocks.
1361 	 */
1362 
1363 	sack_a_rwnd = ntohl(sack->a_rwnd);
1364 	asoc->peer.zero_window_announced = !sack_a_rwnd;
1365 	outstanding = q->outstanding_bytes;
1366 
1367 	if (outstanding < sack_a_rwnd)
1368 		sack_a_rwnd -= outstanding;
1369 	else
1370 		sack_a_rwnd = 0;
1371 
1372 	asoc->peer.rwnd = sack_a_rwnd;
1373 
1374 	sctp_generate_fwdtsn(q, sack_ctsn);
1375 
1376 	pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1377 	pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1378 		 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1379 		 asoc->adv_peer_ack_point);
1380 
1381 	return sctp_outq_is_empty(q);
1382 }
1383 
1384 /* Is the outqueue empty?
1385  * The queue is empty when we have not pending data, no in-flight data
1386  * and nothing pending retransmissions.
1387  */
1388 int sctp_outq_is_empty(const struct sctp_outq *q)
1389 {
1390 	return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1391 	       list_empty(&q->retransmit);
1392 }
1393 
1394 /********************************************************************
1395  * 2nd Level Abstractions
1396  ********************************************************************/
1397 
1398 /* Go through a transport's transmitted list or the association's retransmit
1399  * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1400  * The retransmit list will not have an associated transport.
1401  *
1402  * I added coherent debug information output.	--xguo
1403  *
1404  * Instead of printing 'sacked' or 'kept' for each TSN on the
1405  * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1406  * KEPT TSN6-TSN7, etc.
1407  */
1408 static void sctp_check_transmitted(struct sctp_outq *q,
1409 				   struct list_head *transmitted_queue,
1410 				   struct sctp_transport *transport,
1411 				   union sctp_addr *saddr,
1412 				   struct sctp_sackhdr *sack,
1413 				   __u32 *highest_new_tsn_in_sack)
1414 {
1415 	struct list_head *lchunk;
1416 	struct sctp_chunk *tchunk;
1417 	struct list_head tlist;
1418 	__u32 tsn;
1419 	__u32 sack_ctsn;
1420 	__u32 rtt;
1421 	__u8 restart_timer = 0;
1422 	int bytes_acked = 0;
1423 	int migrate_bytes = 0;
1424 	bool forward_progress = false;
1425 
1426 	sack_ctsn = ntohl(sack->cum_tsn_ack);
1427 
1428 	INIT_LIST_HEAD(&tlist);
1429 
1430 	/* The while loop will skip empty transmitted queues. */
1431 	while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1432 		tchunk = list_entry(lchunk, struct sctp_chunk,
1433 				    transmitted_list);
1434 
1435 		if (sctp_chunk_abandoned(tchunk)) {
1436 			/* Move the chunk to abandoned list. */
1437 			sctp_insert_list(&q->abandoned, lchunk);
1438 
1439 			/* If this chunk has not been acked, stop
1440 			 * considering it as 'outstanding'.
1441 			 */
1442 			if (transmitted_queue != &q->retransmit &&
1443 			    !tchunk->tsn_gap_acked) {
1444 				if (tchunk->transport)
1445 					tchunk->transport->flight_size -=
1446 							sctp_data_size(tchunk);
1447 				q->outstanding_bytes -= sctp_data_size(tchunk);
1448 			}
1449 			continue;
1450 		}
1451 
1452 		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1453 		if (sctp_acked(sack, tsn)) {
1454 			/* If this queue is the retransmit queue, the
1455 			 * retransmit timer has already reclaimed
1456 			 * the outstanding bytes for this chunk, so only
1457 			 * count bytes associated with a transport.
1458 			 */
1459 			if (transport) {
1460 				/* If this chunk is being used for RTT
1461 				 * measurement, calculate the RTT and update
1462 				 * the RTO using this value.
1463 				 *
1464 				 * 6.3.1 C5) Karn's algorithm: RTT measurements
1465 				 * MUST NOT be made using packets that were
1466 				 * retransmitted (and thus for which it is
1467 				 * ambiguous whether the reply was for the
1468 				 * first instance of the packet or a later
1469 				 * instance).
1470 				 */
1471 				if (!tchunk->tsn_gap_acked &&
1472 				    !sctp_chunk_retransmitted(tchunk) &&
1473 				    tchunk->rtt_in_progress) {
1474 					tchunk->rtt_in_progress = 0;
1475 					rtt = jiffies - tchunk->sent_at;
1476 					sctp_transport_update_rto(transport,
1477 								  rtt);
1478 				}
1479 			}
1480 
1481 			/* If the chunk hasn't been marked as ACKED,
1482 			 * mark it and account bytes_acked if the
1483 			 * chunk had a valid transport (it will not
1484 			 * have a transport if ASCONF had deleted it
1485 			 * while DATA was outstanding).
1486 			 */
1487 			if (!tchunk->tsn_gap_acked) {
1488 				tchunk->tsn_gap_acked = 1;
1489 				if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1490 					*highest_new_tsn_in_sack = tsn;
1491 				bytes_acked += sctp_data_size(tchunk);
1492 				if (!tchunk->transport)
1493 					migrate_bytes += sctp_data_size(tchunk);
1494 				forward_progress = true;
1495 			}
1496 
1497 			if (TSN_lte(tsn, sack_ctsn)) {
1498 				/* RFC 2960  6.3.2 Retransmission Timer Rules
1499 				 *
1500 				 * R3) Whenever a SACK is received
1501 				 * that acknowledges the DATA chunk
1502 				 * with the earliest outstanding TSN
1503 				 * for that address, restart T3-rtx
1504 				 * timer for that address with its
1505 				 * current RTO.
1506 				 */
1507 				restart_timer = 1;
1508 				forward_progress = true;
1509 
1510 				if (!tchunk->tsn_gap_acked) {
1511 					/*
1512 					 * SFR-CACC algorithm:
1513 					 * 2) If the SACK contains gap acks
1514 					 * and the flag CHANGEOVER_ACTIVE is
1515 					 * set the receiver of the SACK MUST
1516 					 * take the following action:
1517 					 *
1518 					 * B) For each TSN t being acked that
1519 					 * has not been acked in any SACK so
1520 					 * far, set cacc_saw_newack to 1 for
1521 					 * the destination that the TSN was
1522 					 * sent to.
1523 					 */
1524 					if (transport &&
1525 					    sack->num_gap_ack_blocks &&
1526 					    q->asoc->peer.primary_path->cacc.
1527 					    changeover_active)
1528 						transport->cacc.cacc_saw_newack
1529 							= 1;
1530 				}
1531 
1532 				list_add_tail(&tchunk->transmitted_list,
1533 					      &q->sacked);
1534 			} else {
1535 				/* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1536 				 * M2) Each time a SACK arrives reporting
1537 				 * 'Stray DATA chunk(s)' record the highest TSN
1538 				 * reported as newly acknowledged, call this
1539 				 * value 'HighestTSNinSack'. A newly
1540 				 * acknowledged DATA chunk is one not
1541 				 * previously acknowledged in a SACK.
1542 				 *
1543 				 * When the SCTP sender of data receives a SACK
1544 				 * chunk that acknowledges, for the first time,
1545 				 * the receipt of a DATA chunk, all the still
1546 				 * unacknowledged DATA chunks whose TSN is
1547 				 * older than that newly acknowledged DATA
1548 				 * chunk, are qualified as 'Stray DATA chunks'.
1549 				 */
1550 				list_add_tail(lchunk, &tlist);
1551 			}
1552 		} else {
1553 			if (tchunk->tsn_gap_acked) {
1554 				pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1555 					 __func__, tsn);
1556 
1557 				tchunk->tsn_gap_acked = 0;
1558 
1559 				if (tchunk->transport)
1560 					bytes_acked -= sctp_data_size(tchunk);
1561 
1562 				/* RFC 2960 6.3.2 Retransmission Timer Rules
1563 				 *
1564 				 * R4) Whenever a SACK is received missing a
1565 				 * TSN that was previously acknowledged via a
1566 				 * Gap Ack Block, start T3-rtx for the
1567 				 * destination address to which the DATA
1568 				 * chunk was originally
1569 				 * transmitted if it is not already running.
1570 				 */
1571 				restart_timer = 1;
1572 			}
1573 
1574 			list_add_tail(lchunk, &tlist);
1575 		}
1576 	}
1577 
1578 	if (transport) {
1579 		if (bytes_acked) {
1580 			struct sctp_association *asoc = transport->asoc;
1581 
1582 			/* We may have counted DATA that was migrated
1583 			 * to this transport due to DEL-IP operation.
1584 			 * Subtract those bytes, since the were never
1585 			 * send on this transport and shouldn't be
1586 			 * credited to this transport.
1587 			 */
1588 			bytes_acked -= migrate_bytes;
1589 
1590 			/* 8.2. When an outstanding TSN is acknowledged,
1591 			 * the endpoint shall clear the error counter of
1592 			 * the destination transport address to which the
1593 			 * DATA chunk was last sent.
1594 			 * The association's overall error counter is
1595 			 * also cleared.
1596 			 */
1597 			transport->error_count = 0;
1598 			transport->asoc->overall_error_count = 0;
1599 			forward_progress = true;
1600 
1601 			/*
1602 			 * While in SHUTDOWN PENDING, we may have started
1603 			 * the T5 shutdown guard timer after reaching the
1604 			 * retransmission limit. Stop that timer as soon
1605 			 * as the receiver acknowledged any data.
1606 			 */
1607 			if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1608 			    del_timer(&asoc->timers
1609 				[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1610 					sctp_association_put(asoc);
1611 
1612 			/* Mark the destination transport address as
1613 			 * active if it is not so marked.
1614 			 */
1615 			if ((transport->state == SCTP_INACTIVE ||
1616 			     transport->state == SCTP_UNCONFIRMED) &&
1617 			    sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1618 				sctp_assoc_control_transport(
1619 					transport->asoc,
1620 					transport,
1621 					SCTP_TRANSPORT_UP,
1622 					SCTP_RECEIVED_SACK);
1623 			}
1624 
1625 			sctp_transport_raise_cwnd(transport, sack_ctsn,
1626 						  bytes_acked);
1627 
1628 			transport->flight_size -= bytes_acked;
1629 			if (transport->flight_size == 0)
1630 				transport->partial_bytes_acked = 0;
1631 			q->outstanding_bytes -= bytes_acked + migrate_bytes;
1632 		} else {
1633 			/* RFC 2960 6.1, sctpimpguide-06 2.15.2
1634 			 * When a sender is doing zero window probing, it
1635 			 * should not timeout the association if it continues
1636 			 * to receive new packets from the receiver. The
1637 			 * reason is that the receiver MAY keep its window
1638 			 * closed for an indefinite time.
1639 			 * A sender is doing zero window probing when the
1640 			 * receiver's advertised window is zero, and there is
1641 			 * only one data chunk in flight to the receiver.
1642 			 *
1643 			 * Allow the association to timeout while in SHUTDOWN
1644 			 * PENDING or SHUTDOWN RECEIVED in case the receiver
1645 			 * stays in zero window mode forever.
1646 			 */
1647 			if (!q->asoc->peer.rwnd &&
1648 			    !list_empty(&tlist) &&
1649 			    (sack_ctsn+2 == q->asoc->next_tsn) &&
1650 			    q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1651 				pr_debug("%s: sack received for zero window "
1652 					 "probe:%u\n", __func__, sack_ctsn);
1653 
1654 				q->asoc->overall_error_count = 0;
1655 				transport->error_count = 0;
1656 			}
1657 		}
1658 
1659 		/* RFC 2960 6.3.2 Retransmission Timer Rules
1660 		 *
1661 		 * R2) Whenever all outstanding data sent to an address have
1662 		 * been acknowledged, turn off the T3-rtx timer of that
1663 		 * address.
1664 		 */
1665 		if (!transport->flight_size) {
1666 			if (del_timer(&transport->T3_rtx_timer))
1667 				sctp_transport_put(transport);
1668 		} else if (restart_timer) {
1669 			if (!mod_timer(&transport->T3_rtx_timer,
1670 				       jiffies + transport->rto))
1671 				sctp_transport_hold(transport);
1672 		}
1673 
1674 		if (forward_progress) {
1675 			if (transport->dst)
1676 				sctp_transport_dst_confirm(transport);
1677 		}
1678 	}
1679 
1680 	list_splice(&tlist, transmitted_queue);
1681 }
1682 
1683 /* Mark chunks as missing and consequently may get retransmitted. */
1684 static void sctp_mark_missing(struct sctp_outq *q,
1685 			      struct list_head *transmitted_queue,
1686 			      struct sctp_transport *transport,
1687 			      __u32 highest_new_tsn_in_sack,
1688 			      int count_of_newacks)
1689 {
1690 	struct sctp_chunk *chunk;
1691 	__u32 tsn;
1692 	char do_fast_retransmit = 0;
1693 	struct sctp_association *asoc = q->asoc;
1694 	struct sctp_transport *primary = asoc->peer.primary_path;
1695 
1696 	list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1697 
1698 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1699 
1700 		/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1701 		 * 'Unacknowledged TSN's', if the TSN number of an
1702 		 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1703 		 * value, increment the 'TSN.Missing.Report' count on that
1704 		 * chunk if it has NOT been fast retransmitted or marked for
1705 		 * fast retransmit already.
1706 		 */
1707 		if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1708 		    !chunk->tsn_gap_acked &&
1709 		    TSN_lt(tsn, highest_new_tsn_in_sack)) {
1710 
1711 			/* SFR-CACC may require us to skip marking
1712 			 * this chunk as missing.
1713 			 */
1714 			if (!transport || !sctp_cacc_skip(primary,
1715 						chunk->transport,
1716 						count_of_newacks, tsn)) {
1717 				chunk->tsn_missing_report++;
1718 
1719 				pr_debug("%s: tsn:0x%x missing counter:%d\n",
1720 					 __func__, tsn, chunk->tsn_missing_report);
1721 			}
1722 		}
1723 		/*
1724 		 * M4) If any DATA chunk is found to have a
1725 		 * 'TSN.Missing.Report'
1726 		 * value larger than or equal to 3, mark that chunk for
1727 		 * retransmission and start the fast retransmit procedure.
1728 		 */
1729 
1730 		if (chunk->tsn_missing_report >= 3) {
1731 			chunk->fast_retransmit = SCTP_NEED_FRTX;
1732 			do_fast_retransmit = 1;
1733 		}
1734 	}
1735 
1736 	if (transport) {
1737 		if (do_fast_retransmit)
1738 			sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1739 
1740 		pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1741 			 "flight_size:%d, pba:%d\n",  __func__, transport,
1742 			 transport->cwnd, transport->ssthresh,
1743 			 transport->flight_size, transport->partial_bytes_acked);
1744 	}
1745 }
1746 
1747 /* Is the given TSN acked by this packet?  */
1748 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1749 {
1750 	__u32 ctsn = ntohl(sack->cum_tsn_ack);
1751 	union sctp_sack_variable *frags;
1752 	__u16 tsn_offset, blocks;
1753 	int i;
1754 
1755 	if (TSN_lte(tsn, ctsn))
1756 		goto pass;
1757 
1758 	/* 3.3.4 Selective Acknowledgement (SACK) (3):
1759 	 *
1760 	 * Gap Ack Blocks:
1761 	 *  These fields contain the Gap Ack Blocks. They are repeated
1762 	 *  for each Gap Ack Block up to the number of Gap Ack Blocks
1763 	 *  defined in the Number of Gap Ack Blocks field. All DATA
1764 	 *  chunks with TSNs greater than or equal to (Cumulative TSN
1765 	 *  Ack + Gap Ack Block Start) and less than or equal to
1766 	 *  (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1767 	 *  Block are assumed to have been received correctly.
1768 	 */
1769 
1770 	frags = sack->variable;
1771 	blocks = ntohs(sack->num_gap_ack_blocks);
1772 	tsn_offset = tsn - ctsn;
1773 	for (i = 0; i < blocks; ++i) {
1774 		if (tsn_offset >= ntohs(frags[i].gab.start) &&
1775 		    tsn_offset <= ntohs(frags[i].gab.end))
1776 			goto pass;
1777 	}
1778 
1779 	return 0;
1780 pass:
1781 	return 1;
1782 }
1783 
1784 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1785 				    int nskips, __be16 stream)
1786 {
1787 	int i;
1788 
1789 	for (i = 0; i < nskips; i++) {
1790 		if (skiplist[i].stream == stream)
1791 			return i;
1792 	}
1793 	return i;
1794 }
1795 
1796 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1797 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1798 {
1799 	struct sctp_association *asoc = q->asoc;
1800 	struct sctp_chunk *ftsn_chunk = NULL;
1801 	struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1802 	int nskips = 0;
1803 	int skip_pos = 0;
1804 	__u32 tsn;
1805 	struct sctp_chunk *chunk;
1806 	struct list_head *lchunk, *temp;
1807 
1808 	if (!asoc->peer.prsctp_capable)
1809 		return;
1810 
1811 	/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1812 	 * received SACK.
1813 	 *
1814 	 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1815 	 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1816 	 */
1817 	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1818 		asoc->adv_peer_ack_point = ctsn;
1819 
1820 	/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1821 	 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1822 	 * the chunk next in the out-queue space is marked as "abandoned" as
1823 	 * shown in the following example:
1824 	 *
1825 	 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1826 	 * and the Advanced.Peer.Ack.Point is updated to this value:
1827 	 *
1828 	 *   out-queue at the end of  ==>   out-queue after Adv.Ack.Point
1829 	 *   normal SACK processing           local advancement
1830 	 *                ...                           ...
1831 	 *   Adv.Ack.Pt-> 102 acked                     102 acked
1832 	 *                103 abandoned                 103 abandoned
1833 	 *                104 abandoned     Adv.Ack.P-> 104 abandoned
1834 	 *                105                           105
1835 	 *                106 acked                     106 acked
1836 	 *                ...                           ...
1837 	 *
1838 	 * In this example, the data sender successfully advanced the
1839 	 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1840 	 */
1841 	list_for_each_safe(lchunk, temp, &q->abandoned) {
1842 		chunk = list_entry(lchunk, struct sctp_chunk,
1843 					transmitted_list);
1844 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1845 
1846 		/* Remove any chunks in the abandoned queue that are acked by
1847 		 * the ctsn.
1848 		 */
1849 		if (TSN_lte(tsn, ctsn)) {
1850 			list_del_init(lchunk);
1851 			sctp_chunk_free(chunk);
1852 		} else {
1853 			if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1854 				asoc->adv_peer_ack_point = tsn;
1855 				if (chunk->chunk_hdr->flags &
1856 					 SCTP_DATA_UNORDERED)
1857 					continue;
1858 				skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1859 						nskips,
1860 						chunk->subh.data_hdr->stream);
1861 				ftsn_skip_arr[skip_pos].stream =
1862 					chunk->subh.data_hdr->stream;
1863 				ftsn_skip_arr[skip_pos].ssn =
1864 					 chunk->subh.data_hdr->ssn;
1865 				if (skip_pos == nskips)
1866 					nskips++;
1867 				if (nskips == 10)
1868 					break;
1869 			} else
1870 				break;
1871 		}
1872 	}
1873 
1874 	/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1875 	 * is greater than the Cumulative TSN ACK carried in the received
1876 	 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1877 	 * chunk containing the latest value of the
1878 	 * "Advanced.Peer.Ack.Point".
1879 	 *
1880 	 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1881 	 * list each stream and sequence number in the forwarded TSN. This
1882 	 * information will enable the receiver to easily find any
1883 	 * stranded TSN's waiting on stream reorder queues. Each stream
1884 	 * SHOULD only be reported once; this means that if multiple
1885 	 * abandoned messages occur in the same stream then only the
1886 	 * highest abandoned stream sequence number is reported. If the
1887 	 * total size of the FORWARD TSN does NOT fit in a single MTU then
1888 	 * the sender of the FORWARD TSN SHOULD lower the
1889 	 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1890 	 * single MTU.
1891 	 */
1892 	if (asoc->adv_peer_ack_point > ctsn)
1893 		ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1894 					      nskips, &ftsn_skip_arr[0]);
1895 
1896 	if (ftsn_chunk) {
1897 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1898 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1899 	}
1900 }
1901