xref: /openbmc/linux/net/sctp/outqueue.c (revision bc5aa3a0)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001-2003 Intel Corp.
6  *
7  * This file is part of the SCTP kernel implementation
8  *
9  * These functions implement the sctp_outq class.   The outqueue handles
10  * bundling and queueing of outgoing SCTP chunks.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, see
26  * <http://www.gnu.org/licenses/>.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <linux-sctp@vger.kernel.org>
31  *
32  * Written or modified by:
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Karl Knutson          <karl@athena.chicago.il.us>
35  *    Perry Melange         <pmelange@null.cc.uic.edu>
36  *    Xingang Guo           <xingang.guo@intel.com>
37  *    Hui Huang 	    <hui.huang@nokia.com>
38  *    Sridhar Samudrala     <sri@us.ibm.com>
39  *    Jon Grimm             <jgrimm@us.ibm.com>
40  */
41 
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 
44 #include <linux/types.h>
45 #include <linux/list.h>   /* For struct list_head */
46 #include <linux/socket.h>
47 #include <linux/ip.h>
48 #include <linux/slab.h>
49 #include <net/sock.h>	  /* For skb_set_owner_w */
50 
51 #include <net/sctp/sctp.h>
52 #include <net/sctp/sm.h>
53 
54 /* Declare internal functions here.  */
55 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
56 static void sctp_check_transmitted(struct sctp_outq *q,
57 				   struct list_head *transmitted_queue,
58 				   struct sctp_transport *transport,
59 				   union sctp_addr *saddr,
60 				   struct sctp_sackhdr *sack,
61 				   __u32 *highest_new_tsn);
62 
63 static void sctp_mark_missing(struct sctp_outq *q,
64 			      struct list_head *transmitted_queue,
65 			      struct sctp_transport *transport,
66 			      __u32 highest_new_tsn,
67 			      int count_of_newacks);
68 
69 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
70 
71 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
72 
73 /* Add data to the front of the queue. */
74 static inline void sctp_outq_head_data(struct sctp_outq *q,
75 					struct sctp_chunk *ch)
76 {
77 	list_add(&ch->list, &q->out_chunk_list);
78 	q->out_qlen += ch->skb->len;
79 }
80 
81 /* Take data from the front of the queue. */
82 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
83 {
84 	struct sctp_chunk *ch = NULL;
85 
86 	if (!list_empty(&q->out_chunk_list)) {
87 		struct list_head *entry = q->out_chunk_list.next;
88 
89 		ch = list_entry(entry, struct sctp_chunk, list);
90 		list_del_init(entry);
91 		q->out_qlen -= ch->skb->len;
92 	}
93 	return ch;
94 }
95 /* Add data chunk to the end of the queue. */
96 static inline void sctp_outq_tail_data(struct sctp_outq *q,
97 				       struct sctp_chunk *ch)
98 {
99 	list_add_tail(&ch->list, &q->out_chunk_list);
100 	q->out_qlen += ch->skb->len;
101 }
102 
103 /*
104  * SFR-CACC algorithm:
105  * D) If count_of_newacks is greater than or equal to 2
106  * and t was not sent to the current primary then the
107  * sender MUST NOT increment missing report count for t.
108  */
109 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
110 				       struct sctp_transport *transport,
111 				       int count_of_newacks)
112 {
113 	if (count_of_newacks >= 2 && transport != primary)
114 		return 1;
115 	return 0;
116 }
117 
118 /*
119  * SFR-CACC algorithm:
120  * F) If count_of_newacks is less than 2, let d be the
121  * destination to which t was sent. If cacc_saw_newack
122  * is 0 for destination d, then the sender MUST NOT
123  * increment missing report count for t.
124  */
125 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
126 				       int count_of_newacks)
127 {
128 	if (count_of_newacks < 2 &&
129 			(transport && !transport->cacc.cacc_saw_newack))
130 		return 1;
131 	return 0;
132 }
133 
134 /*
135  * SFR-CACC algorithm:
136  * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
137  * execute steps C, D, F.
138  *
139  * C has been implemented in sctp_outq_sack
140  */
141 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
142 				     struct sctp_transport *transport,
143 				     int count_of_newacks)
144 {
145 	if (!primary->cacc.cycling_changeover) {
146 		if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
147 			return 1;
148 		if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
149 			return 1;
150 		return 0;
151 	}
152 	return 0;
153 }
154 
155 /*
156  * SFR-CACC algorithm:
157  * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
158  * than next_tsn_at_change of the current primary, then
159  * the sender MUST NOT increment missing report count
160  * for t.
161  */
162 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
163 {
164 	if (primary->cacc.cycling_changeover &&
165 	    TSN_lt(tsn, primary->cacc.next_tsn_at_change))
166 		return 1;
167 	return 0;
168 }
169 
170 /*
171  * SFR-CACC algorithm:
172  * 3) If the missing report count for TSN t is to be
173  * incremented according to [RFC2960] and
174  * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
175  * then the sender MUST further execute steps 3.1 and
176  * 3.2 to determine if the missing report count for
177  * TSN t SHOULD NOT be incremented.
178  *
179  * 3.3) If 3.1 and 3.2 do not dictate that the missing
180  * report count for t should not be incremented, then
181  * the sender SHOULD increment missing report count for
182  * t (according to [RFC2960] and [SCTP_STEWART_2002]).
183  */
184 static inline int sctp_cacc_skip(struct sctp_transport *primary,
185 				 struct sctp_transport *transport,
186 				 int count_of_newacks,
187 				 __u32 tsn)
188 {
189 	if (primary->cacc.changeover_active &&
190 	    (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
191 	     sctp_cacc_skip_3_2(primary, tsn)))
192 		return 1;
193 	return 0;
194 }
195 
196 /* Initialize an existing sctp_outq.  This does the boring stuff.
197  * You still need to define handlers if you really want to DO
198  * something with this structure...
199  */
200 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
201 {
202 	memset(q, 0, sizeof(struct sctp_outq));
203 
204 	q->asoc = asoc;
205 	INIT_LIST_HEAD(&q->out_chunk_list);
206 	INIT_LIST_HEAD(&q->control_chunk_list);
207 	INIT_LIST_HEAD(&q->retransmit);
208 	INIT_LIST_HEAD(&q->sacked);
209 	INIT_LIST_HEAD(&q->abandoned);
210 }
211 
212 /* Free the outqueue structure and any related pending chunks.
213  */
214 static void __sctp_outq_teardown(struct sctp_outq *q)
215 {
216 	struct sctp_transport *transport;
217 	struct list_head *lchunk, *temp;
218 	struct sctp_chunk *chunk, *tmp;
219 
220 	/* Throw away unacknowledged chunks. */
221 	list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
222 			transports) {
223 		while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
224 			chunk = list_entry(lchunk, struct sctp_chunk,
225 					   transmitted_list);
226 			/* Mark as part of a failed message. */
227 			sctp_chunk_fail(chunk, q->error);
228 			sctp_chunk_free(chunk);
229 		}
230 	}
231 
232 	/* Throw away chunks that have been gap ACKed.  */
233 	list_for_each_safe(lchunk, temp, &q->sacked) {
234 		list_del_init(lchunk);
235 		chunk = list_entry(lchunk, struct sctp_chunk,
236 				   transmitted_list);
237 		sctp_chunk_fail(chunk, q->error);
238 		sctp_chunk_free(chunk);
239 	}
240 
241 	/* Throw away any chunks in the retransmit queue. */
242 	list_for_each_safe(lchunk, temp, &q->retransmit) {
243 		list_del_init(lchunk);
244 		chunk = list_entry(lchunk, struct sctp_chunk,
245 				   transmitted_list);
246 		sctp_chunk_fail(chunk, q->error);
247 		sctp_chunk_free(chunk);
248 	}
249 
250 	/* Throw away any chunks that are in the abandoned queue. */
251 	list_for_each_safe(lchunk, temp, &q->abandoned) {
252 		list_del_init(lchunk);
253 		chunk = list_entry(lchunk, struct sctp_chunk,
254 				   transmitted_list);
255 		sctp_chunk_fail(chunk, q->error);
256 		sctp_chunk_free(chunk);
257 	}
258 
259 	/* Throw away any leftover data chunks. */
260 	while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
261 
262 		/* Mark as send failure. */
263 		sctp_chunk_fail(chunk, q->error);
264 		sctp_chunk_free(chunk);
265 	}
266 
267 	/* Throw away any leftover control chunks. */
268 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
269 		list_del_init(&chunk->list);
270 		sctp_chunk_free(chunk);
271 	}
272 }
273 
274 void sctp_outq_teardown(struct sctp_outq *q)
275 {
276 	__sctp_outq_teardown(q);
277 	sctp_outq_init(q->asoc, q);
278 }
279 
280 /* Free the outqueue structure and any related pending chunks.  */
281 void sctp_outq_free(struct sctp_outq *q)
282 {
283 	/* Throw away leftover chunks. */
284 	__sctp_outq_teardown(q);
285 }
286 
287 /* Put a new chunk in an sctp_outq.  */
288 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
289 {
290 	struct net *net = sock_net(q->asoc->base.sk);
291 	int error = 0;
292 
293 	pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
294 		 chunk && chunk->chunk_hdr ?
295 		 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
296 		 "illegal chunk");
297 
298 	/* If it is data, queue it up, otherwise, send it
299 	 * immediately.
300 	 */
301 	if (sctp_chunk_is_data(chunk)) {
302 		/* Is it OK to queue data chunks?  */
303 		/* From 9. Termination of Association
304 		 *
305 		 * When either endpoint performs a shutdown, the
306 		 * association on each peer will stop accepting new
307 		 * data from its user and only deliver data in queue
308 		 * at the time of sending or receiving the SHUTDOWN
309 		 * chunk.
310 		 */
311 		switch (q->asoc->state) {
312 		case SCTP_STATE_CLOSED:
313 		case SCTP_STATE_SHUTDOWN_PENDING:
314 		case SCTP_STATE_SHUTDOWN_SENT:
315 		case SCTP_STATE_SHUTDOWN_RECEIVED:
316 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
317 			/* Cannot send after transport endpoint shutdown */
318 			error = -ESHUTDOWN;
319 			break;
320 
321 		default:
322 			pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
323 				 __func__, q, chunk, chunk && chunk->chunk_hdr ?
324 				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
325 				 "illegal chunk");
326 
327 			sctp_chunk_hold(chunk);
328 			sctp_outq_tail_data(q, chunk);
329 			if (chunk->asoc->prsctp_enable &&
330 			    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
331 				chunk->asoc->sent_cnt_removable++;
332 			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
333 				SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
334 			else
335 				SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
336 			break;
337 		}
338 	} else {
339 		list_add_tail(&chunk->list, &q->control_chunk_list);
340 		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
341 	}
342 
343 	if (error < 0)
344 		return error;
345 
346 	if (!q->cork)
347 		error = sctp_outq_flush(q, 0, gfp);
348 
349 	return error;
350 }
351 
352 /* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
353  * and the abandoned list are in ascending order.
354  */
355 static void sctp_insert_list(struct list_head *head, struct list_head *new)
356 {
357 	struct list_head *pos;
358 	struct sctp_chunk *nchunk, *lchunk;
359 	__u32 ntsn, ltsn;
360 	int done = 0;
361 
362 	nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
363 	ntsn = ntohl(nchunk->subh.data_hdr->tsn);
364 
365 	list_for_each(pos, head) {
366 		lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
367 		ltsn = ntohl(lchunk->subh.data_hdr->tsn);
368 		if (TSN_lt(ntsn, ltsn)) {
369 			list_add(new, pos->prev);
370 			done = 1;
371 			break;
372 		}
373 	}
374 	if (!done)
375 		list_add_tail(new, head);
376 }
377 
378 static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
379 				  struct sctp_sndrcvinfo *sinfo,
380 				  struct list_head *queue, int msg_len)
381 {
382 	struct sctp_chunk *chk, *temp;
383 
384 	list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
385 		if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
386 		    chk->prsctp_param <= sinfo->sinfo_timetolive)
387 			continue;
388 
389 		list_del_init(&chk->transmitted_list);
390 		sctp_insert_list(&asoc->outqueue.abandoned,
391 				 &chk->transmitted_list);
392 
393 		asoc->sent_cnt_removable--;
394 		asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
395 
396 		if (!chk->tsn_gap_acked) {
397 			if (chk->transport)
398 				chk->transport->flight_size -=
399 						sctp_data_size(chk);
400 			asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
401 		}
402 
403 		msg_len -= SCTP_DATA_SNDSIZE(chk) +
404 			   sizeof(struct sk_buff) +
405 			   sizeof(struct sctp_chunk);
406 		if (msg_len <= 0)
407 			break;
408 	}
409 
410 	return msg_len;
411 }
412 
413 static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
414 				    struct sctp_sndrcvinfo *sinfo,
415 				    struct list_head *queue, int msg_len)
416 {
417 	struct sctp_chunk *chk, *temp;
418 
419 	list_for_each_entry_safe(chk, temp, queue, list) {
420 		if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
421 		    chk->prsctp_param <= sinfo->sinfo_timetolive)
422 			continue;
423 
424 		list_del_init(&chk->list);
425 		asoc->sent_cnt_removable--;
426 		asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
427 
428 		msg_len -= SCTP_DATA_SNDSIZE(chk) +
429 			   sizeof(struct sk_buff) +
430 			   sizeof(struct sctp_chunk);
431 		sctp_chunk_free(chk);
432 		if (msg_len <= 0)
433 			break;
434 	}
435 
436 	return msg_len;
437 }
438 
439 /* Abandon the chunks according their priorities */
440 void sctp_prsctp_prune(struct sctp_association *asoc,
441 		       struct sctp_sndrcvinfo *sinfo, int msg_len)
442 {
443 	struct sctp_transport *transport;
444 
445 	if (!asoc->prsctp_enable || !asoc->sent_cnt_removable)
446 		return;
447 
448 	msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
449 					 &asoc->outqueue.retransmit,
450 					 msg_len);
451 	if (msg_len <= 0)
452 		return;
453 
454 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
455 			    transports) {
456 		msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
457 						 &transport->transmitted,
458 						 msg_len);
459 		if (msg_len <= 0)
460 			return;
461 	}
462 
463 	sctp_prsctp_prune_unsent(asoc, sinfo,
464 				 &asoc->outqueue.out_chunk_list,
465 				 msg_len);
466 }
467 
468 /* Mark all the eligible packets on a transport for retransmission.  */
469 void sctp_retransmit_mark(struct sctp_outq *q,
470 			  struct sctp_transport *transport,
471 			  __u8 reason)
472 {
473 	struct list_head *lchunk, *ltemp;
474 	struct sctp_chunk *chunk;
475 
476 	/* Walk through the specified transmitted queue.  */
477 	list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
478 		chunk = list_entry(lchunk, struct sctp_chunk,
479 				   transmitted_list);
480 
481 		/* If the chunk is abandoned, move it to abandoned list. */
482 		if (sctp_chunk_abandoned(chunk)) {
483 			list_del_init(lchunk);
484 			sctp_insert_list(&q->abandoned, lchunk);
485 
486 			/* If this chunk has not been previousely acked,
487 			 * stop considering it 'outstanding'.  Our peer
488 			 * will most likely never see it since it will
489 			 * not be retransmitted
490 			 */
491 			if (!chunk->tsn_gap_acked) {
492 				if (chunk->transport)
493 					chunk->transport->flight_size -=
494 							sctp_data_size(chunk);
495 				q->outstanding_bytes -= sctp_data_size(chunk);
496 				q->asoc->peer.rwnd += sctp_data_size(chunk);
497 			}
498 			continue;
499 		}
500 
501 		/* If we are doing  retransmission due to a timeout or pmtu
502 		 * discovery, only the  chunks that are not yet acked should
503 		 * be added to the retransmit queue.
504 		 */
505 		if ((reason == SCTP_RTXR_FAST_RTX  &&
506 			    (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
507 		    (reason != SCTP_RTXR_FAST_RTX  && !chunk->tsn_gap_acked)) {
508 			/* RFC 2960 6.2.1 Processing a Received SACK
509 			 *
510 			 * C) Any time a DATA chunk is marked for
511 			 * retransmission (via either T3-rtx timer expiration
512 			 * (Section 6.3.3) or via fast retransmit
513 			 * (Section 7.2.4)), add the data size of those
514 			 * chunks to the rwnd.
515 			 */
516 			q->asoc->peer.rwnd += sctp_data_size(chunk);
517 			q->outstanding_bytes -= sctp_data_size(chunk);
518 			if (chunk->transport)
519 				transport->flight_size -= sctp_data_size(chunk);
520 
521 			/* sctpimpguide-05 Section 2.8.2
522 			 * M5) If a T3-rtx timer expires, the
523 			 * 'TSN.Missing.Report' of all affected TSNs is set
524 			 * to 0.
525 			 */
526 			chunk->tsn_missing_report = 0;
527 
528 			/* If a chunk that is being used for RTT measurement
529 			 * has to be retransmitted, we cannot use this chunk
530 			 * anymore for RTT measurements. Reset rto_pending so
531 			 * that a new RTT measurement is started when a new
532 			 * data chunk is sent.
533 			 */
534 			if (chunk->rtt_in_progress) {
535 				chunk->rtt_in_progress = 0;
536 				transport->rto_pending = 0;
537 			}
538 
539 			chunk->resent = 1;
540 
541 			/* Move the chunk to the retransmit queue. The chunks
542 			 * on the retransmit queue are always kept in order.
543 			 */
544 			list_del_init(lchunk);
545 			sctp_insert_list(&q->retransmit, lchunk);
546 		}
547 	}
548 
549 	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
550 		 "flight_size:%d, pba:%d\n", __func__, transport, reason,
551 		 transport->cwnd, transport->ssthresh, transport->flight_size,
552 		 transport->partial_bytes_acked);
553 }
554 
555 /* Mark all the eligible packets on a transport for retransmission and force
556  * one packet out.
557  */
558 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
559 		     sctp_retransmit_reason_t reason)
560 {
561 	struct net *net = sock_net(q->asoc->base.sk);
562 	int error = 0;
563 
564 	switch (reason) {
565 	case SCTP_RTXR_T3_RTX:
566 		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
567 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
568 		/* Update the retran path if the T3-rtx timer has expired for
569 		 * the current retran path.
570 		 */
571 		if (transport == transport->asoc->peer.retran_path)
572 			sctp_assoc_update_retran_path(transport->asoc);
573 		transport->asoc->rtx_data_chunks +=
574 			transport->asoc->unack_data;
575 		break;
576 	case SCTP_RTXR_FAST_RTX:
577 		SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
578 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
579 		q->fast_rtx = 1;
580 		break;
581 	case SCTP_RTXR_PMTUD:
582 		SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
583 		break;
584 	case SCTP_RTXR_T1_RTX:
585 		SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
586 		transport->asoc->init_retries++;
587 		break;
588 	default:
589 		BUG();
590 	}
591 
592 	sctp_retransmit_mark(q, transport, reason);
593 
594 	/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
595 	 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
596 	 * following the procedures outlined in C1 - C5.
597 	 */
598 	if (reason == SCTP_RTXR_T3_RTX)
599 		sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
600 
601 	/* Flush the queues only on timeout, since fast_rtx is only
602 	 * triggered during sack processing and the queue
603 	 * will be flushed at the end.
604 	 */
605 	if (reason != SCTP_RTXR_FAST_RTX)
606 		error = sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
607 
608 	if (error)
609 		q->asoc->base.sk->sk_err = -error;
610 }
611 
612 /*
613  * Transmit DATA chunks on the retransmit queue.  Upon return from
614  * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
615  * need to be transmitted by the caller.
616  * We assume that pkt->transport has already been set.
617  *
618  * The return value is a normal kernel error return value.
619  */
620 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
621 			       int rtx_timeout, int *start_timer)
622 {
623 	struct list_head *lqueue;
624 	struct sctp_transport *transport = pkt->transport;
625 	sctp_xmit_t status;
626 	struct sctp_chunk *chunk, *chunk1;
627 	int fast_rtx;
628 	int error = 0;
629 	int timer = 0;
630 	int done = 0;
631 
632 	lqueue = &q->retransmit;
633 	fast_rtx = q->fast_rtx;
634 
635 	/* This loop handles time-out retransmissions, fast retransmissions,
636 	 * and retransmissions due to opening of whindow.
637 	 *
638 	 * RFC 2960 6.3.3 Handle T3-rtx Expiration
639 	 *
640 	 * E3) Determine how many of the earliest (i.e., lowest TSN)
641 	 * outstanding DATA chunks for the address for which the
642 	 * T3-rtx has expired will fit into a single packet, subject
643 	 * to the MTU constraint for the path corresponding to the
644 	 * destination transport address to which the retransmission
645 	 * is being sent (this may be different from the address for
646 	 * which the timer expires [see Section 6.4]). Call this value
647 	 * K. Bundle and retransmit those K DATA chunks in a single
648 	 * packet to the destination endpoint.
649 	 *
650 	 * [Just to be painfully clear, if we are retransmitting
651 	 * because a timeout just happened, we should send only ONE
652 	 * packet of retransmitted data.]
653 	 *
654 	 * For fast retransmissions we also send only ONE packet.  However,
655 	 * if we are just flushing the queue due to open window, we'll
656 	 * try to send as much as possible.
657 	 */
658 	list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
659 		/* If the chunk is abandoned, move it to abandoned list. */
660 		if (sctp_chunk_abandoned(chunk)) {
661 			list_del_init(&chunk->transmitted_list);
662 			sctp_insert_list(&q->abandoned,
663 					 &chunk->transmitted_list);
664 			continue;
665 		}
666 
667 		/* Make sure that Gap Acked TSNs are not retransmitted.  A
668 		 * simple approach is just to move such TSNs out of the
669 		 * way and into a 'transmitted' queue and skip to the
670 		 * next chunk.
671 		 */
672 		if (chunk->tsn_gap_acked) {
673 			list_move_tail(&chunk->transmitted_list,
674 				       &transport->transmitted);
675 			continue;
676 		}
677 
678 		/* If we are doing fast retransmit, ignore non-fast_rtransmit
679 		 * chunks
680 		 */
681 		if (fast_rtx && !chunk->fast_retransmit)
682 			continue;
683 
684 redo:
685 		/* Attempt to append this chunk to the packet. */
686 		status = sctp_packet_append_chunk(pkt, chunk);
687 
688 		switch (status) {
689 		case SCTP_XMIT_PMTU_FULL:
690 			if (!pkt->has_data && !pkt->has_cookie_echo) {
691 				/* If this packet did not contain DATA then
692 				 * retransmission did not happen, so do it
693 				 * again.  We'll ignore the error here since
694 				 * control chunks are already freed so there
695 				 * is nothing we can do.
696 				 */
697 				sctp_packet_transmit(pkt, GFP_ATOMIC);
698 				goto redo;
699 			}
700 
701 			/* Send this packet.  */
702 			error = sctp_packet_transmit(pkt, GFP_ATOMIC);
703 
704 			/* If we are retransmitting, we should only
705 			 * send a single packet.
706 			 * Otherwise, try appending this chunk again.
707 			 */
708 			if (rtx_timeout || fast_rtx)
709 				done = 1;
710 			else
711 				goto redo;
712 
713 			/* Bundle next chunk in the next round.  */
714 			break;
715 
716 		case SCTP_XMIT_RWND_FULL:
717 			/* Send this packet. */
718 			error = sctp_packet_transmit(pkt, GFP_ATOMIC);
719 
720 			/* Stop sending DATA as there is no more room
721 			 * at the receiver.
722 			 */
723 			done = 1;
724 			break;
725 
726 		case SCTP_XMIT_DELAY:
727 			/* Send this packet. */
728 			error = sctp_packet_transmit(pkt, GFP_ATOMIC);
729 
730 			/* Stop sending DATA because of nagle delay. */
731 			done = 1;
732 			break;
733 
734 		default:
735 			/* The append was successful, so add this chunk to
736 			 * the transmitted list.
737 			 */
738 			list_move_tail(&chunk->transmitted_list,
739 				       &transport->transmitted);
740 
741 			/* Mark the chunk as ineligible for fast retransmit
742 			 * after it is retransmitted.
743 			 */
744 			if (chunk->fast_retransmit == SCTP_NEED_FRTX)
745 				chunk->fast_retransmit = SCTP_DONT_FRTX;
746 
747 			q->asoc->stats.rtxchunks++;
748 			break;
749 		}
750 
751 		/* Set the timer if there were no errors */
752 		if (!error && !timer)
753 			timer = 1;
754 
755 		if (done)
756 			break;
757 	}
758 
759 	/* If we are here due to a retransmit timeout or a fast
760 	 * retransmit and if there are any chunks left in the retransmit
761 	 * queue that could not fit in the PMTU sized packet, they need
762 	 * to be marked as ineligible for a subsequent fast retransmit.
763 	 */
764 	if (rtx_timeout || fast_rtx) {
765 		list_for_each_entry(chunk1, lqueue, transmitted_list) {
766 			if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
767 				chunk1->fast_retransmit = SCTP_DONT_FRTX;
768 		}
769 	}
770 
771 	*start_timer = timer;
772 
773 	/* Clear fast retransmit hint */
774 	if (fast_rtx)
775 		q->fast_rtx = 0;
776 
777 	return error;
778 }
779 
780 /* Cork the outqueue so queued chunks are really queued. */
781 int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
782 {
783 	if (q->cork)
784 		q->cork = 0;
785 
786 	return sctp_outq_flush(q, 0, gfp);
787 }
788 
789 
790 /*
791  * Try to flush an outqueue.
792  *
793  * Description: Send everything in q which we legally can, subject to
794  * congestion limitations.
795  * * Note: This function can be called from multiple contexts so appropriate
796  * locking concerns must be made.  Today we use the sock lock to protect
797  * this function.
798  */
799 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
800 {
801 	struct sctp_packet *packet;
802 	struct sctp_packet singleton;
803 	struct sctp_association *asoc = q->asoc;
804 	__u16 sport = asoc->base.bind_addr.port;
805 	__u16 dport = asoc->peer.port;
806 	__u32 vtag = asoc->peer.i.init_tag;
807 	struct sctp_transport *transport = NULL;
808 	struct sctp_transport *new_transport;
809 	struct sctp_chunk *chunk, *tmp;
810 	sctp_xmit_t status;
811 	int error = 0;
812 	int start_timer = 0;
813 	int one_packet = 0;
814 
815 	/* These transports have chunks to send. */
816 	struct list_head transport_list;
817 	struct list_head *ltransport;
818 
819 	INIT_LIST_HEAD(&transport_list);
820 	packet = NULL;
821 
822 	/*
823 	 * 6.10 Bundling
824 	 *   ...
825 	 *   When bundling control chunks with DATA chunks, an
826 	 *   endpoint MUST place control chunks first in the outbound
827 	 *   SCTP packet.  The transmitter MUST transmit DATA chunks
828 	 *   within a SCTP packet in increasing order of TSN.
829 	 *   ...
830 	 */
831 
832 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
833 		/* RFC 5061, 5.3
834 		 * F1) This means that until such time as the ASCONF
835 		 * containing the add is acknowledged, the sender MUST
836 		 * NOT use the new IP address as a source for ANY SCTP
837 		 * packet except on carrying an ASCONF Chunk.
838 		 */
839 		if (asoc->src_out_of_asoc_ok &&
840 		    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
841 			continue;
842 
843 		list_del_init(&chunk->list);
844 
845 		/* Pick the right transport to use. */
846 		new_transport = chunk->transport;
847 
848 		if (!new_transport) {
849 			/*
850 			 * If we have a prior transport pointer, see if
851 			 * the destination address of the chunk
852 			 * matches the destination address of the
853 			 * current transport.  If not a match, then
854 			 * try to look up the transport with a given
855 			 * destination address.  We do this because
856 			 * after processing ASCONFs, we may have new
857 			 * transports created.
858 			 */
859 			if (transport &&
860 			    sctp_cmp_addr_exact(&chunk->dest,
861 						&transport->ipaddr))
862 					new_transport = transport;
863 			else
864 				new_transport = sctp_assoc_lookup_paddr(asoc,
865 								&chunk->dest);
866 
867 			/* if we still don't have a new transport, then
868 			 * use the current active path.
869 			 */
870 			if (!new_transport)
871 				new_transport = asoc->peer.active_path;
872 		} else if ((new_transport->state == SCTP_INACTIVE) ||
873 			   (new_transport->state == SCTP_UNCONFIRMED) ||
874 			   (new_transport->state == SCTP_PF)) {
875 			/* If the chunk is Heartbeat or Heartbeat Ack,
876 			 * send it to chunk->transport, even if it's
877 			 * inactive.
878 			 *
879 			 * 3.3.6 Heartbeat Acknowledgement:
880 			 * ...
881 			 * A HEARTBEAT ACK is always sent to the source IP
882 			 * address of the IP datagram containing the
883 			 * HEARTBEAT chunk to which this ack is responding.
884 			 * ...
885 			 *
886 			 * ASCONF_ACKs also must be sent to the source.
887 			 */
888 			if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
889 			    chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
890 			    chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
891 				new_transport = asoc->peer.active_path;
892 		}
893 
894 		/* Are we switching transports?
895 		 * Take care of transport locks.
896 		 */
897 		if (new_transport != transport) {
898 			transport = new_transport;
899 			if (list_empty(&transport->send_ready)) {
900 				list_add_tail(&transport->send_ready,
901 					      &transport_list);
902 			}
903 			packet = &transport->packet;
904 			sctp_packet_config(packet, vtag,
905 					   asoc->peer.ecn_capable);
906 		}
907 
908 		switch (chunk->chunk_hdr->type) {
909 		/*
910 		 * 6.10 Bundling
911 		 *   ...
912 		 *   An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
913 		 *   COMPLETE with any other chunks.  [Send them immediately.]
914 		 */
915 		case SCTP_CID_INIT:
916 		case SCTP_CID_INIT_ACK:
917 		case SCTP_CID_SHUTDOWN_COMPLETE:
918 			sctp_packet_init(&singleton, transport, sport, dport);
919 			sctp_packet_config(&singleton, vtag, 0);
920 			sctp_packet_append_chunk(&singleton, chunk);
921 			error = sctp_packet_transmit(&singleton, gfp);
922 			if (error < 0)
923 				return error;
924 			break;
925 
926 		case SCTP_CID_ABORT:
927 			if (sctp_test_T_bit(chunk)) {
928 				packet->vtag = asoc->c.my_vtag;
929 			}
930 		/* The following chunks are "response" chunks, i.e.
931 		 * they are generated in response to something we
932 		 * received.  If we are sending these, then we can
933 		 * send only 1 packet containing these chunks.
934 		 */
935 		case SCTP_CID_HEARTBEAT_ACK:
936 		case SCTP_CID_SHUTDOWN_ACK:
937 		case SCTP_CID_COOKIE_ACK:
938 		case SCTP_CID_COOKIE_ECHO:
939 		case SCTP_CID_ERROR:
940 		case SCTP_CID_ECN_CWR:
941 		case SCTP_CID_ASCONF_ACK:
942 			one_packet = 1;
943 			/* Fall through */
944 
945 		case SCTP_CID_SACK:
946 		case SCTP_CID_HEARTBEAT:
947 		case SCTP_CID_SHUTDOWN:
948 		case SCTP_CID_ECN_ECNE:
949 		case SCTP_CID_ASCONF:
950 		case SCTP_CID_FWD_TSN:
951 			status = sctp_packet_transmit_chunk(packet, chunk,
952 							    one_packet, gfp);
953 			if (status  != SCTP_XMIT_OK) {
954 				/* put the chunk back */
955 				list_add(&chunk->list, &q->control_chunk_list);
956 			} else {
957 				asoc->stats.octrlchunks++;
958 				/* PR-SCTP C5) If a FORWARD TSN is sent, the
959 				 * sender MUST assure that at least one T3-rtx
960 				 * timer is running.
961 				 */
962 				if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
963 					sctp_transport_reset_t3_rtx(transport);
964 					transport->last_time_sent = jiffies;
965 				}
966 			}
967 			break;
968 
969 		default:
970 			/* We built a chunk with an illegal type! */
971 			BUG();
972 		}
973 	}
974 
975 	if (q->asoc->src_out_of_asoc_ok)
976 		goto sctp_flush_out;
977 
978 	/* Is it OK to send data chunks?  */
979 	switch (asoc->state) {
980 	case SCTP_STATE_COOKIE_ECHOED:
981 		/* Only allow bundling when this packet has a COOKIE-ECHO
982 		 * chunk.
983 		 */
984 		if (!packet || !packet->has_cookie_echo)
985 			break;
986 
987 		/* fallthru */
988 	case SCTP_STATE_ESTABLISHED:
989 	case SCTP_STATE_SHUTDOWN_PENDING:
990 	case SCTP_STATE_SHUTDOWN_RECEIVED:
991 		/*
992 		 * RFC 2960 6.1  Transmission of DATA Chunks
993 		 *
994 		 * C) When the time comes for the sender to transmit,
995 		 * before sending new DATA chunks, the sender MUST
996 		 * first transmit any outstanding DATA chunks which
997 		 * are marked for retransmission (limited by the
998 		 * current cwnd).
999 		 */
1000 		if (!list_empty(&q->retransmit)) {
1001 			if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
1002 				goto sctp_flush_out;
1003 			if (transport == asoc->peer.retran_path)
1004 				goto retran;
1005 
1006 			/* Switch transports & prepare the packet.  */
1007 
1008 			transport = asoc->peer.retran_path;
1009 
1010 			if (list_empty(&transport->send_ready)) {
1011 				list_add_tail(&transport->send_ready,
1012 					      &transport_list);
1013 			}
1014 
1015 			packet = &transport->packet;
1016 			sctp_packet_config(packet, vtag,
1017 					   asoc->peer.ecn_capable);
1018 		retran:
1019 			error = sctp_outq_flush_rtx(q, packet,
1020 						    rtx_timeout, &start_timer);
1021 
1022 			if (start_timer) {
1023 				sctp_transport_reset_t3_rtx(transport);
1024 				transport->last_time_sent = jiffies;
1025 			}
1026 
1027 			/* This can happen on COOKIE-ECHO resend.  Only
1028 			 * one chunk can get bundled with a COOKIE-ECHO.
1029 			 */
1030 			if (packet->has_cookie_echo)
1031 				goto sctp_flush_out;
1032 
1033 			/* Don't send new data if there is still data
1034 			 * waiting to retransmit.
1035 			 */
1036 			if (!list_empty(&q->retransmit))
1037 				goto sctp_flush_out;
1038 		}
1039 
1040 		/* Apply Max.Burst limitation to the current transport in
1041 		 * case it will be used for new data.  We are going to
1042 		 * rest it before we return, but we want to apply the limit
1043 		 * to the currently queued data.
1044 		 */
1045 		if (transport)
1046 			sctp_transport_burst_limited(transport);
1047 
1048 		/* Finally, transmit new packets.  */
1049 		while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
1050 			/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
1051 			 * stream identifier.
1052 			 */
1053 			if (chunk->sinfo.sinfo_stream >=
1054 			    asoc->c.sinit_num_ostreams) {
1055 
1056 				/* Mark as failed send. */
1057 				sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
1058 				if (asoc->prsctp_enable &&
1059 				    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1060 					asoc->sent_cnt_removable--;
1061 				sctp_chunk_free(chunk);
1062 				continue;
1063 			}
1064 
1065 			/* Has this chunk expired? */
1066 			if (sctp_chunk_abandoned(chunk)) {
1067 				sctp_chunk_fail(chunk, 0);
1068 				sctp_chunk_free(chunk);
1069 				continue;
1070 			}
1071 
1072 			/* If there is a specified transport, use it.
1073 			 * Otherwise, we want to use the active path.
1074 			 */
1075 			new_transport = chunk->transport;
1076 			if (!new_transport ||
1077 			    ((new_transport->state == SCTP_INACTIVE) ||
1078 			     (new_transport->state == SCTP_UNCONFIRMED) ||
1079 			     (new_transport->state == SCTP_PF)))
1080 				new_transport = asoc->peer.active_path;
1081 			if (new_transport->state == SCTP_UNCONFIRMED) {
1082 				WARN_ONCE(1, "Atempt to send packet on unconfirmed path.");
1083 				sctp_chunk_fail(chunk, 0);
1084 				sctp_chunk_free(chunk);
1085 				continue;
1086 			}
1087 
1088 			/* Change packets if necessary.  */
1089 			if (new_transport != transport) {
1090 				transport = new_transport;
1091 
1092 				/* Schedule to have this transport's
1093 				 * packet flushed.
1094 				 */
1095 				if (list_empty(&transport->send_ready)) {
1096 					list_add_tail(&transport->send_ready,
1097 						      &transport_list);
1098 				}
1099 
1100 				packet = &transport->packet;
1101 				sctp_packet_config(packet, vtag,
1102 						   asoc->peer.ecn_capable);
1103 				/* We've switched transports, so apply the
1104 				 * Burst limit to the new transport.
1105 				 */
1106 				sctp_transport_burst_limited(transport);
1107 			}
1108 
1109 			pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
1110 				 "skb->users:%d\n",
1111 				 __func__, q, chunk, chunk && chunk->chunk_hdr ?
1112 				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1113 				 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1114 				 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1115 				 atomic_read(&chunk->skb->users) : -1);
1116 
1117 			/* Add the chunk to the packet.  */
1118 			status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
1119 
1120 			switch (status) {
1121 			case SCTP_XMIT_PMTU_FULL:
1122 			case SCTP_XMIT_RWND_FULL:
1123 			case SCTP_XMIT_DELAY:
1124 				/* We could not append this chunk, so put
1125 				 * the chunk back on the output queue.
1126 				 */
1127 				pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1128 					 __func__, ntohl(chunk->subh.data_hdr->tsn),
1129 					 status);
1130 
1131 				sctp_outq_head_data(q, chunk);
1132 				goto sctp_flush_out;
1133 
1134 			case SCTP_XMIT_OK:
1135 				/* The sender is in the SHUTDOWN-PENDING state,
1136 				 * The sender MAY set the I-bit in the DATA
1137 				 * chunk header.
1138 				 */
1139 				if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1140 					chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1141 				if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1142 					asoc->stats.ouodchunks++;
1143 				else
1144 					asoc->stats.oodchunks++;
1145 
1146 				break;
1147 
1148 			default:
1149 				BUG();
1150 			}
1151 
1152 			/* BUG: We assume that the sctp_packet_transmit()
1153 			 * call below will succeed all the time and add the
1154 			 * chunk to the transmitted list and restart the
1155 			 * timers.
1156 			 * It is possible that the call can fail under OOM
1157 			 * conditions.
1158 			 *
1159 			 * Is this really a problem?  Won't this behave
1160 			 * like a lost TSN?
1161 			 */
1162 			list_add_tail(&chunk->transmitted_list,
1163 				      &transport->transmitted);
1164 
1165 			sctp_transport_reset_t3_rtx(transport);
1166 			transport->last_time_sent = jiffies;
1167 
1168 			/* Only let one DATA chunk get bundled with a
1169 			 * COOKIE-ECHO chunk.
1170 			 */
1171 			if (packet->has_cookie_echo)
1172 				goto sctp_flush_out;
1173 		}
1174 		break;
1175 
1176 	default:
1177 		/* Do nothing.  */
1178 		break;
1179 	}
1180 
1181 sctp_flush_out:
1182 
1183 	/* Before returning, examine all the transports touched in
1184 	 * this call.  Right now, we bluntly force clear all the
1185 	 * transports.  Things might change after we implement Nagle.
1186 	 * But such an examination is still required.
1187 	 *
1188 	 * --xguo
1189 	 */
1190 	while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) {
1191 		struct sctp_transport *t = list_entry(ltransport,
1192 						      struct sctp_transport,
1193 						      send_ready);
1194 		packet = &t->packet;
1195 		if (!sctp_packet_empty(packet))
1196 			error = sctp_packet_transmit(packet, gfp);
1197 
1198 		/* Clear the burst limited state, if any */
1199 		sctp_transport_burst_reset(t);
1200 	}
1201 
1202 	return error;
1203 }
1204 
1205 /* Update unack_data based on the incoming SACK chunk */
1206 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1207 					struct sctp_sackhdr *sack)
1208 {
1209 	sctp_sack_variable_t *frags;
1210 	__u16 unack_data;
1211 	int i;
1212 
1213 	unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1214 
1215 	frags = sack->variable;
1216 	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1217 		unack_data -= ((ntohs(frags[i].gab.end) -
1218 				ntohs(frags[i].gab.start) + 1));
1219 	}
1220 
1221 	assoc->unack_data = unack_data;
1222 }
1223 
1224 /* This is where we REALLY process a SACK.
1225  *
1226  * Process the SACK against the outqueue.  Mostly, this just frees
1227  * things off the transmitted queue.
1228  */
1229 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1230 {
1231 	struct sctp_association *asoc = q->asoc;
1232 	struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1233 	struct sctp_transport *transport;
1234 	struct sctp_chunk *tchunk = NULL;
1235 	struct list_head *lchunk, *transport_list, *temp;
1236 	sctp_sack_variable_t *frags = sack->variable;
1237 	__u32 sack_ctsn, ctsn, tsn;
1238 	__u32 highest_tsn, highest_new_tsn;
1239 	__u32 sack_a_rwnd;
1240 	unsigned int outstanding;
1241 	struct sctp_transport *primary = asoc->peer.primary_path;
1242 	int count_of_newacks = 0;
1243 	int gap_ack_blocks;
1244 	u8 accum_moved = 0;
1245 
1246 	/* Grab the association's destination address list. */
1247 	transport_list = &asoc->peer.transport_addr_list;
1248 
1249 	sack_ctsn = ntohl(sack->cum_tsn_ack);
1250 	gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1251 	asoc->stats.gapcnt += gap_ack_blocks;
1252 	/*
1253 	 * SFR-CACC algorithm:
1254 	 * On receipt of a SACK the sender SHOULD execute the
1255 	 * following statements.
1256 	 *
1257 	 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1258 	 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1259 	 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1260 	 * all destinations.
1261 	 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1262 	 * is set the receiver of the SACK MUST take the following actions:
1263 	 *
1264 	 * A) Initialize the cacc_saw_newack to 0 for all destination
1265 	 * addresses.
1266 	 *
1267 	 * Only bother if changeover_active is set. Otherwise, this is
1268 	 * totally suboptimal to do on every SACK.
1269 	 */
1270 	if (primary->cacc.changeover_active) {
1271 		u8 clear_cycling = 0;
1272 
1273 		if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1274 			primary->cacc.changeover_active = 0;
1275 			clear_cycling = 1;
1276 		}
1277 
1278 		if (clear_cycling || gap_ack_blocks) {
1279 			list_for_each_entry(transport, transport_list,
1280 					transports) {
1281 				if (clear_cycling)
1282 					transport->cacc.cycling_changeover = 0;
1283 				if (gap_ack_blocks)
1284 					transport->cacc.cacc_saw_newack = 0;
1285 			}
1286 		}
1287 	}
1288 
1289 	/* Get the highest TSN in the sack. */
1290 	highest_tsn = sack_ctsn;
1291 	if (gap_ack_blocks)
1292 		highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1293 
1294 	if (TSN_lt(asoc->highest_sacked, highest_tsn))
1295 		asoc->highest_sacked = highest_tsn;
1296 
1297 	highest_new_tsn = sack_ctsn;
1298 
1299 	/* Run through the retransmit queue.  Credit bytes received
1300 	 * and free those chunks that we can.
1301 	 */
1302 	sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1303 
1304 	/* Run through the transmitted queue.
1305 	 * Credit bytes received and free those chunks which we can.
1306 	 *
1307 	 * This is a MASSIVE candidate for optimization.
1308 	 */
1309 	list_for_each_entry(transport, transport_list, transports) {
1310 		sctp_check_transmitted(q, &transport->transmitted,
1311 				       transport, &chunk->source, sack,
1312 				       &highest_new_tsn);
1313 		/*
1314 		 * SFR-CACC algorithm:
1315 		 * C) Let count_of_newacks be the number of
1316 		 * destinations for which cacc_saw_newack is set.
1317 		 */
1318 		if (transport->cacc.cacc_saw_newack)
1319 			count_of_newacks++;
1320 	}
1321 
1322 	/* Move the Cumulative TSN Ack Point if appropriate.  */
1323 	if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1324 		asoc->ctsn_ack_point = sack_ctsn;
1325 		accum_moved = 1;
1326 	}
1327 
1328 	if (gap_ack_blocks) {
1329 
1330 		if (asoc->fast_recovery && accum_moved)
1331 			highest_new_tsn = highest_tsn;
1332 
1333 		list_for_each_entry(transport, transport_list, transports)
1334 			sctp_mark_missing(q, &transport->transmitted, transport,
1335 					  highest_new_tsn, count_of_newacks);
1336 	}
1337 
1338 	/* Update unack_data field in the assoc. */
1339 	sctp_sack_update_unack_data(asoc, sack);
1340 
1341 	ctsn = asoc->ctsn_ack_point;
1342 
1343 	/* Throw away stuff rotting on the sack queue.  */
1344 	list_for_each_safe(lchunk, temp, &q->sacked) {
1345 		tchunk = list_entry(lchunk, struct sctp_chunk,
1346 				    transmitted_list);
1347 		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1348 		if (TSN_lte(tsn, ctsn)) {
1349 			list_del_init(&tchunk->transmitted_list);
1350 			if (asoc->prsctp_enable &&
1351 			    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1352 				asoc->sent_cnt_removable--;
1353 			sctp_chunk_free(tchunk);
1354 		}
1355 	}
1356 
1357 	/* ii) Set rwnd equal to the newly received a_rwnd minus the
1358 	 *     number of bytes still outstanding after processing the
1359 	 *     Cumulative TSN Ack and the Gap Ack Blocks.
1360 	 */
1361 
1362 	sack_a_rwnd = ntohl(sack->a_rwnd);
1363 	asoc->peer.zero_window_announced = !sack_a_rwnd;
1364 	outstanding = q->outstanding_bytes;
1365 
1366 	if (outstanding < sack_a_rwnd)
1367 		sack_a_rwnd -= outstanding;
1368 	else
1369 		sack_a_rwnd = 0;
1370 
1371 	asoc->peer.rwnd = sack_a_rwnd;
1372 
1373 	sctp_generate_fwdtsn(q, sack_ctsn);
1374 
1375 	pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1376 	pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1377 		 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1378 		 asoc->adv_peer_ack_point);
1379 
1380 	return sctp_outq_is_empty(q);
1381 }
1382 
1383 /* Is the outqueue empty?
1384  * The queue is empty when we have not pending data, no in-flight data
1385  * and nothing pending retransmissions.
1386  */
1387 int sctp_outq_is_empty(const struct sctp_outq *q)
1388 {
1389 	return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1390 	       list_empty(&q->retransmit);
1391 }
1392 
1393 /********************************************************************
1394  * 2nd Level Abstractions
1395  ********************************************************************/
1396 
1397 /* Go through a transport's transmitted list or the association's retransmit
1398  * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1399  * The retransmit list will not have an associated transport.
1400  *
1401  * I added coherent debug information output.	--xguo
1402  *
1403  * Instead of printing 'sacked' or 'kept' for each TSN on the
1404  * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1405  * KEPT TSN6-TSN7, etc.
1406  */
1407 static void sctp_check_transmitted(struct sctp_outq *q,
1408 				   struct list_head *transmitted_queue,
1409 				   struct sctp_transport *transport,
1410 				   union sctp_addr *saddr,
1411 				   struct sctp_sackhdr *sack,
1412 				   __u32 *highest_new_tsn_in_sack)
1413 {
1414 	struct list_head *lchunk;
1415 	struct sctp_chunk *tchunk;
1416 	struct list_head tlist;
1417 	__u32 tsn;
1418 	__u32 sack_ctsn;
1419 	__u32 rtt;
1420 	__u8 restart_timer = 0;
1421 	int bytes_acked = 0;
1422 	int migrate_bytes = 0;
1423 	bool forward_progress = false;
1424 
1425 	sack_ctsn = ntohl(sack->cum_tsn_ack);
1426 
1427 	INIT_LIST_HEAD(&tlist);
1428 
1429 	/* The while loop will skip empty transmitted queues. */
1430 	while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1431 		tchunk = list_entry(lchunk, struct sctp_chunk,
1432 				    transmitted_list);
1433 
1434 		if (sctp_chunk_abandoned(tchunk)) {
1435 			/* Move the chunk to abandoned list. */
1436 			sctp_insert_list(&q->abandoned, lchunk);
1437 
1438 			/* If this chunk has not been acked, stop
1439 			 * considering it as 'outstanding'.
1440 			 */
1441 			if (!tchunk->tsn_gap_acked) {
1442 				if (tchunk->transport)
1443 					tchunk->transport->flight_size -=
1444 							sctp_data_size(tchunk);
1445 				q->outstanding_bytes -= sctp_data_size(tchunk);
1446 			}
1447 			continue;
1448 		}
1449 
1450 		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1451 		if (sctp_acked(sack, tsn)) {
1452 			/* If this queue is the retransmit queue, the
1453 			 * retransmit timer has already reclaimed
1454 			 * the outstanding bytes for this chunk, so only
1455 			 * count bytes associated with a transport.
1456 			 */
1457 			if (transport) {
1458 				/* If this chunk is being used for RTT
1459 				 * measurement, calculate the RTT and update
1460 				 * the RTO using this value.
1461 				 *
1462 				 * 6.3.1 C5) Karn's algorithm: RTT measurements
1463 				 * MUST NOT be made using packets that were
1464 				 * retransmitted (and thus for which it is
1465 				 * ambiguous whether the reply was for the
1466 				 * first instance of the packet or a later
1467 				 * instance).
1468 				 */
1469 				if (!tchunk->tsn_gap_acked &&
1470 				    !tchunk->resent &&
1471 				    tchunk->rtt_in_progress) {
1472 					tchunk->rtt_in_progress = 0;
1473 					rtt = jiffies - tchunk->sent_at;
1474 					sctp_transport_update_rto(transport,
1475 								  rtt);
1476 				}
1477 			}
1478 
1479 			/* If the chunk hasn't been marked as ACKED,
1480 			 * mark it and account bytes_acked if the
1481 			 * chunk had a valid transport (it will not
1482 			 * have a transport if ASCONF had deleted it
1483 			 * while DATA was outstanding).
1484 			 */
1485 			if (!tchunk->tsn_gap_acked) {
1486 				tchunk->tsn_gap_acked = 1;
1487 				if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1488 					*highest_new_tsn_in_sack = tsn;
1489 				bytes_acked += sctp_data_size(tchunk);
1490 				if (!tchunk->transport)
1491 					migrate_bytes += sctp_data_size(tchunk);
1492 				forward_progress = true;
1493 			}
1494 
1495 			if (TSN_lte(tsn, sack_ctsn)) {
1496 				/* RFC 2960  6.3.2 Retransmission Timer Rules
1497 				 *
1498 				 * R3) Whenever a SACK is received
1499 				 * that acknowledges the DATA chunk
1500 				 * with the earliest outstanding TSN
1501 				 * for that address, restart T3-rtx
1502 				 * timer for that address with its
1503 				 * current RTO.
1504 				 */
1505 				restart_timer = 1;
1506 				forward_progress = true;
1507 
1508 				if (!tchunk->tsn_gap_acked) {
1509 					/*
1510 					 * SFR-CACC algorithm:
1511 					 * 2) If the SACK contains gap acks
1512 					 * and the flag CHANGEOVER_ACTIVE is
1513 					 * set the receiver of the SACK MUST
1514 					 * take the following action:
1515 					 *
1516 					 * B) For each TSN t being acked that
1517 					 * has not been acked in any SACK so
1518 					 * far, set cacc_saw_newack to 1 for
1519 					 * the destination that the TSN was
1520 					 * sent to.
1521 					 */
1522 					if (transport &&
1523 					    sack->num_gap_ack_blocks &&
1524 					    q->asoc->peer.primary_path->cacc.
1525 					    changeover_active)
1526 						transport->cacc.cacc_saw_newack
1527 							= 1;
1528 				}
1529 
1530 				list_add_tail(&tchunk->transmitted_list,
1531 					      &q->sacked);
1532 			} else {
1533 				/* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1534 				 * M2) Each time a SACK arrives reporting
1535 				 * 'Stray DATA chunk(s)' record the highest TSN
1536 				 * reported as newly acknowledged, call this
1537 				 * value 'HighestTSNinSack'. A newly
1538 				 * acknowledged DATA chunk is one not
1539 				 * previously acknowledged in a SACK.
1540 				 *
1541 				 * When the SCTP sender of data receives a SACK
1542 				 * chunk that acknowledges, for the first time,
1543 				 * the receipt of a DATA chunk, all the still
1544 				 * unacknowledged DATA chunks whose TSN is
1545 				 * older than that newly acknowledged DATA
1546 				 * chunk, are qualified as 'Stray DATA chunks'.
1547 				 */
1548 				list_add_tail(lchunk, &tlist);
1549 			}
1550 		} else {
1551 			if (tchunk->tsn_gap_acked) {
1552 				pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1553 					 __func__, tsn);
1554 
1555 				tchunk->tsn_gap_acked = 0;
1556 
1557 				if (tchunk->transport)
1558 					bytes_acked -= sctp_data_size(tchunk);
1559 
1560 				/* RFC 2960 6.3.2 Retransmission Timer Rules
1561 				 *
1562 				 * R4) Whenever a SACK is received missing a
1563 				 * TSN that was previously acknowledged via a
1564 				 * Gap Ack Block, start T3-rtx for the
1565 				 * destination address to which the DATA
1566 				 * chunk was originally
1567 				 * transmitted if it is not already running.
1568 				 */
1569 				restart_timer = 1;
1570 			}
1571 
1572 			list_add_tail(lchunk, &tlist);
1573 		}
1574 	}
1575 
1576 	if (transport) {
1577 		if (bytes_acked) {
1578 			struct sctp_association *asoc = transport->asoc;
1579 
1580 			/* We may have counted DATA that was migrated
1581 			 * to this transport due to DEL-IP operation.
1582 			 * Subtract those bytes, since the were never
1583 			 * send on this transport and shouldn't be
1584 			 * credited to this transport.
1585 			 */
1586 			bytes_acked -= migrate_bytes;
1587 
1588 			/* 8.2. When an outstanding TSN is acknowledged,
1589 			 * the endpoint shall clear the error counter of
1590 			 * the destination transport address to which the
1591 			 * DATA chunk was last sent.
1592 			 * The association's overall error counter is
1593 			 * also cleared.
1594 			 */
1595 			transport->error_count = 0;
1596 			transport->asoc->overall_error_count = 0;
1597 			forward_progress = true;
1598 
1599 			/*
1600 			 * While in SHUTDOWN PENDING, we may have started
1601 			 * the T5 shutdown guard timer after reaching the
1602 			 * retransmission limit. Stop that timer as soon
1603 			 * as the receiver acknowledged any data.
1604 			 */
1605 			if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1606 			    del_timer(&asoc->timers
1607 				[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1608 					sctp_association_put(asoc);
1609 
1610 			/* Mark the destination transport address as
1611 			 * active if it is not so marked.
1612 			 */
1613 			if ((transport->state == SCTP_INACTIVE ||
1614 			     transport->state == SCTP_UNCONFIRMED) &&
1615 			    sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1616 				sctp_assoc_control_transport(
1617 					transport->asoc,
1618 					transport,
1619 					SCTP_TRANSPORT_UP,
1620 					SCTP_RECEIVED_SACK);
1621 			}
1622 
1623 			sctp_transport_raise_cwnd(transport, sack_ctsn,
1624 						  bytes_acked);
1625 
1626 			transport->flight_size -= bytes_acked;
1627 			if (transport->flight_size == 0)
1628 				transport->partial_bytes_acked = 0;
1629 			q->outstanding_bytes -= bytes_acked + migrate_bytes;
1630 		} else {
1631 			/* RFC 2960 6.1, sctpimpguide-06 2.15.2
1632 			 * When a sender is doing zero window probing, it
1633 			 * should not timeout the association if it continues
1634 			 * to receive new packets from the receiver. The
1635 			 * reason is that the receiver MAY keep its window
1636 			 * closed for an indefinite time.
1637 			 * A sender is doing zero window probing when the
1638 			 * receiver's advertised window is zero, and there is
1639 			 * only one data chunk in flight to the receiver.
1640 			 *
1641 			 * Allow the association to timeout while in SHUTDOWN
1642 			 * PENDING or SHUTDOWN RECEIVED in case the receiver
1643 			 * stays in zero window mode forever.
1644 			 */
1645 			if (!q->asoc->peer.rwnd &&
1646 			    !list_empty(&tlist) &&
1647 			    (sack_ctsn+2 == q->asoc->next_tsn) &&
1648 			    q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1649 				pr_debug("%s: sack received for zero window "
1650 					 "probe:%u\n", __func__, sack_ctsn);
1651 
1652 				q->asoc->overall_error_count = 0;
1653 				transport->error_count = 0;
1654 			}
1655 		}
1656 
1657 		/* RFC 2960 6.3.2 Retransmission Timer Rules
1658 		 *
1659 		 * R2) Whenever all outstanding data sent to an address have
1660 		 * been acknowledged, turn off the T3-rtx timer of that
1661 		 * address.
1662 		 */
1663 		if (!transport->flight_size) {
1664 			if (del_timer(&transport->T3_rtx_timer))
1665 				sctp_transport_put(transport);
1666 		} else if (restart_timer) {
1667 			if (!mod_timer(&transport->T3_rtx_timer,
1668 				       jiffies + transport->rto))
1669 				sctp_transport_hold(transport);
1670 		}
1671 
1672 		if (forward_progress) {
1673 			if (transport->dst)
1674 				dst_confirm(transport->dst);
1675 		}
1676 	}
1677 
1678 	list_splice(&tlist, transmitted_queue);
1679 }
1680 
1681 /* Mark chunks as missing and consequently may get retransmitted. */
1682 static void sctp_mark_missing(struct sctp_outq *q,
1683 			      struct list_head *transmitted_queue,
1684 			      struct sctp_transport *transport,
1685 			      __u32 highest_new_tsn_in_sack,
1686 			      int count_of_newacks)
1687 {
1688 	struct sctp_chunk *chunk;
1689 	__u32 tsn;
1690 	char do_fast_retransmit = 0;
1691 	struct sctp_association *asoc = q->asoc;
1692 	struct sctp_transport *primary = asoc->peer.primary_path;
1693 
1694 	list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1695 
1696 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1697 
1698 		/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1699 		 * 'Unacknowledged TSN's', if the TSN number of an
1700 		 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1701 		 * value, increment the 'TSN.Missing.Report' count on that
1702 		 * chunk if it has NOT been fast retransmitted or marked for
1703 		 * fast retransmit already.
1704 		 */
1705 		if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1706 		    !chunk->tsn_gap_acked &&
1707 		    TSN_lt(tsn, highest_new_tsn_in_sack)) {
1708 
1709 			/* SFR-CACC may require us to skip marking
1710 			 * this chunk as missing.
1711 			 */
1712 			if (!transport || !sctp_cacc_skip(primary,
1713 						chunk->transport,
1714 						count_of_newacks, tsn)) {
1715 				chunk->tsn_missing_report++;
1716 
1717 				pr_debug("%s: tsn:0x%x missing counter:%d\n",
1718 					 __func__, tsn, chunk->tsn_missing_report);
1719 			}
1720 		}
1721 		/*
1722 		 * M4) If any DATA chunk is found to have a
1723 		 * 'TSN.Missing.Report'
1724 		 * value larger than or equal to 3, mark that chunk for
1725 		 * retransmission and start the fast retransmit procedure.
1726 		 */
1727 
1728 		if (chunk->tsn_missing_report >= 3) {
1729 			chunk->fast_retransmit = SCTP_NEED_FRTX;
1730 			do_fast_retransmit = 1;
1731 		}
1732 	}
1733 
1734 	if (transport) {
1735 		if (do_fast_retransmit)
1736 			sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1737 
1738 		pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1739 			 "flight_size:%d, pba:%d\n",  __func__, transport,
1740 			 transport->cwnd, transport->ssthresh,
1741 			 transport->flight_size, transport->partial_bytes_acked);
1742 	}
1743 }
1744 
1745 /* Is the given TSN acked by this packet?  */
1746 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1747 {
1748 	int i;
1749 	sctp_sack_variable_t *frags;
1750 	__u16 gap;
1751 	__u32 ctsn = ntohl(sack->cum_tsn_ack);
1752 
1753 	if (TSN_lte(tsn, ctsn))
1754 		goto pass;
1755 
1756 	/* 3.3.4 Selective Acknowledgement (SACK) (3):
1757 	 *
1758 	 * Gap Ack Blocks:
1759 	 *  These fields contain the Gap Ack Blocks. They are repeated
1760 	 *  for each Gap Ack Block up to the number of Gap Ack Blocks
1761 	 *  defined in the Number of Gap Ack Blocks field. All DATA
1762 	 *  chunks with TSNs greater than or equal to (Cumulative TSN
1763 	 *  Ack + Gap Ack Block Start) and less than or equal to
1764 	 *  (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1765 	 *  Block are assumed to have been received correctly.
1766 	 */
1767 
1768 	frags = sack->variable;
1769 	gap = tsn - ctsn;
1770 	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1771 		if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1772 		    TSN_lte(gap, ntohs(frags[i].gab.end)))
1773 			goto pass;
1774 	}
1775 
1776 	return 0;
1777 pass:
1778 	return 1;
1779 }
1780 
1781 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1782 				    int nskips, __be16 stream)
1783 {
1784 	int i;
1785 
1786 	for (i = 0; i < nskips; i++) {
1787 		if (skiplist[i].stream == stream)
1788 			return i;
1789 	}
1790 	return i;
1791 }
1792 
1793 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1794 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1795 {
1796 	struct sctp_association *asoc = q->asoc;
1797 	struct sctp_chunk *ftsn_chunk = NULL;
1798 	struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1799 	int nskips = 0;
1800 	int skip_pos = 0;
1801 	__u32 tsn;
1802 	struct sctp_chunk *chunk;
1803 	struct list_head *lchunk, *temp;
1804 
1805 	if (!asoc->peer.prsctp_capable)
1806 		return;
1807 
1808 	/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1809 	 * received SACK.
1810 	 *
1811 	 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1812 	 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1813 	 */
1814 	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1815 		asoc->adv_peer_ack_point = ctsn;
1816 
1817 	/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1818 	 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1819 	 * the chunk next in the out-queue space is marked as "abandoned" as
1820 	 * shown in the following example:
1821 	 *
1822 	 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1823 	 * and the Advanced.Peer.Ack.Point is updated to this value:
1824 	 *
1825 	 *   out-queue at the end of  ==>   out-queue after Adv.Ack.Point
1826 	 *   normal SACK processing           local advancement
1827 	 *                ...                           ...
1828 	 *   Adv.Ack.Pt-> 102 acked                     102 acked
1829 	 *                103 abandoned                 103 abandoned
1830 	 *                104 abandoned     Adv.Ack.P-> 104 abandoned
1831 	 *                105                           105
1832 	 *                106 acked                     106 acked
1833 	 *                ...                           ...
1834 	 *
1835 	 * In this example, the data sender successfully advanced the
1836 	 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1837 	 */
1838 	list_for_each_safe(lchunk, temp, &q->abandoned) {
1839 		chunk = list_entry(lchunk, struct sctp_chunk,
1840 					transmitted_list);
1841 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1842 
1843 		/* Remove any chunks in the abandoned queue that are acked by
1844 		 * the ctsn.
1845 		 */
1846 		if (TSN_lte(tsn, ctsn)) {
1847 			list_del_init(lchunk);
1848 			sctp_chunk_free(chunk);
1849 		} else {
1850 			if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1851 				asoc->adv_peer_ack_point = tsn;
1852 				if (chunk->chunk_hdr->flags &
1853 					 SCTP_DATA_UNORDERED)
1854 					continue;
1855 				skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1856 						nskips,
1857 						chunk->subh.data_hdr->stream);
1858 				ftsn_skip_arr[skip_pos].stream =
1859 					chunk->subh.data_hdr->stream;
1860 				ftsn_skip_arr[skip_pos].ssn =
1861 					 chunk->subh.data_hdr->ssn;
1862 				if (skip_pos == nskips)
1863 					nskips++;
1864 				if (nskips == 10)
1865 					break;
1866 			} else
1867 				break;
1868 		}
1869 	}
1870 
1871 	/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1872 	 * is greater than the Cumulative TSN ACK carried in the received
1873 	 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1874 	 * chunk containing the latest value of the
1875 	 * "Advanced.Peer.Ack.Point".
1876 	 *
1877 	 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1878 	 * list each stream and sequence number in the forwarded TSN. This
1879 	 * information will enable the receiver to easily find any
1880 	 * stranded TSN's waiting on stream reorder queues. Each stream
1881 	 * SHOULD only be reported once; this means that if multiple
1882 	 * abandoned messages occur in the same stream then only the
1883 	 * highest abandoned stream sequence number is reported. If the
1884 	 * total size of the FORWARD TSN does NOT fit in a single MTU then
1885 	 * the sender of the FORWARD TSN SHOULD lower the
1886 	 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1887 	 * single MTU.
1888 	 */
1889 	if (asoc->adv_peer_ack_point > ctsn)
1890 		ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1891 					      nskips, &ftsn_skip_arr[0]);
1892 
1893 	if (ftsn_chunk) {
1894 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1895 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1896 	}
1897 }
1898