xref: /openbmc/linux/net/sctp/stream_interleave.c (revision 45cc842d5b75ba8f9a958f2dd12b95c6dd0452bd)
1 /* SCTP kernel implementation
2  * (C) Copyright Red Hat Inc. 2017
3  *
4  * This file is part of the SCTP kernel implementation
5  *
6  * These functions manipulate sctp stream queue/scheduling.
7  *
8  * This SCTP implementation is free software;
9  * you can redistribute it and/or modify it under the terms of
10  * the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * This SCTP implementation is distributed in the hope that it
15  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16  *                 ************************
17  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
18  * See the GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with GNU CC; see the file COPYING.  If not, see
22  * <http://www.gnu.org/licenses/>.
23  *
24  * Please send any bug reports or fixes you make to the
25  * email addresched(es):
26  *    lksctp developers <linux-sctp@vger.kernel.org>
27  *
28  * Written or modified by:
29  *    Xin Long <lucien.xin@gmail.com>
30  */
31 
32 #include <net/busy_poll.h>
33 #include <net/sctp/sctp.h>
34 #include <net/sctp/sm.h>
35 #include <net/sctp/ulpevent.h>
36 #include <linux/sctp.h>
37 
38 static struct sctp_chunk *sctp_make_idatafrag_empty(
39 					const struct sctp_association *asoc,
40 					const struct sctp_sndrcvinfo *sinfo,
41 					int len, __u8 flags, gfp_t gfp)
42 {
43 	struct sctp_chunk *retval;
44 	struct sctp_idatahdr dp;
45 
46 	memset(&dp, 0, sizeof(dp));
47 	dp.stream = htons(sinfo->sinfo_stream);
48 
49 	if (sinfo->sinfo_flags & SCTP_UNORDERED)
50 		flags |= SCTP_DATA_UNORDERED;
51 
52 	retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
53 	if (!retval)
54 		return NULL;
55 
56 	retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
57 	memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
58 
59 	return retval;
60 }
61 
62 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
63 {
64 	struct sctp_stream *stream;
65 	struct sctp_chunk *lchunk;
66 	__u32 cfsn = 0;
67 	__u16 sid;
68 
69 	if (chunk->has_mid)
70 		return;
71 
72 	sid = sctp_chunk_stream_no(chunk);
73 	stream = &chunk->asoc->stream;
74 
75 	list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
76 		struct sctp_idatahdr *hdr;
77 		__u32 mid;
78 
79 		lchunk->has_mid = 1;
80 
81 		hdr = lchunk->subh.idata_hdr;
82 
83 		if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
84 			hdr->ppid = lchunk->sinfo.sinfo_ppid;
85 		else
86 			hdr->fsn = htonl(cfsn++);
87 
88 		if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
89 			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
90 				sctp_mid_uo_next(stream, out, sid) :
91 				sctp_mid_uo_peek(stream, out, sid);
92 		} else {
93 			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
94 				sctp_mid_next(stream, out, sid) :
95 				sctp_mid_peek(stream, out, sid);
96 		}
97 		hdr->mid = htonl(mid);
98 	}
99 }
100 
101 static bool sctp_validate_data(struct sctp_chunk *chunk)
102 {
103 	const struct sctp_stream *stream;
104 	__u16 sid, ssn;
105 
106 	if (chunk->chunk_hdr->type != SCTP_CID_DATA)
107 		return false;
108 
109 	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
110 		return true;
111 
112 	stream = &chunk->asoc->stream;
113 	sid = sctp_chunk_stream_no(chunk);
114 	ssn = ntohs(chunk->subh.data_hdr->ssn);
115 
116 	return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
117 }
118 
119 static bool sctp_validate_idata(struct sctp_chunk *chunk)
120 {
121 	struct sctp_stream *stream;
122 	__u32 mid;
123 	__u16 sid;
124 
125 	if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
126 		return false;
127 
128 	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
129 		return true;
130 
131 	stream = &chunk->asoc->stream;
132 	sid = sctp_chunk_stream_no(chunk);
133 	mid = ntohl(chunk->subh.idata_hdr->mid);
134 
135 	return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
136 }
137 
138 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
139 				  struct sctp_ulpevent *event)
140 {
141 	struct sctp_ulpevent *cevent;
142 	struct sk_buff *pos;
143 
144 	pos = skb_peek_tail(&ulpq->reasm);
145 	if (!pos) {
146 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
147 		return;
148 	}
149 
150 	cevent = sctp_skb2event(pos);
151 
152 	if (event->stream == cevent->stream &&
153 	    event->mid == cevent->mid &&
154 	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
155 	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
156 	      event->fsn > cevent->fsn))) {
157 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
158 		return;
159 	}
160 
161 	if ((event->stream == cevent->stream &&
162 	     MID_lt(cevent->mid, event->mid)) ||
163 	    event->stream > cevent->stream) {
164 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
165 		return;
166 	}
167 
168 	skb_queue_walk(&ulpq->reasm, pos) {
169 		cevent = sctp_skb2event(pos);
170 
171 		if (event->stream < cevent->stream ||
172 		    (event->stream == cevent->stream &&
173 		     MID_lt(event->mid, cevent->mid)))
174 			break;
175 
176 		if (event->stream == cevent->stream &&
177 		    event->mid == cevent->mid &&
178 		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
179 		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
180 		     event->fsn < cevent->fsn))
181 			break;
182 	}
183 
184 	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
185 }
186 
187 static struct sctp_ulpevent *sctp_intl_retrieve_partial(
188 						struct sctp_ulpq *ulpq,
189 						struct sctp_ulpevent *event)
190 {
191 	struct sk_buff *first_frag = NULL;
192 	struct sk_buff *last_frag = NULL;
193 	struct sctp_ulpevent *retval;
194 	struct sctp_stream_in *sin;
195 	struct sk_buff *pos;
196 	__u32 next_fsn = 0;
197 	int is_last = 0;
198 
199 	sin = sctp_stream_in(ulpq->asoc, event->stream);
200 
201 	skb_queue_walk(&ulpq->reasm, pos) {
202 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
203 
204 		if (cevent->stream < event->stream)
205 			continue;
206 
207 		if (cevent->stream > event->stream ||
208 		    cevent->mid != sin->mid)
209 			break;
210 
211 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
212 		case SCTP_DATA_FIRST_FRAG:
213 			goto out;
214 		case SCTP_DATA_MIDDLE_FRAG:
215 			if (!first_frag) {
216 				if (cevent->fsn == sin->fsn) {
217 					first_frag = pos;
218 					last_frag = pos;
219 					next_fsn = cevent->fsn + 1;
220 				}
221 			} else if (cevent->fsn == next_fsn) {
222 				last_frag = pos;
223 				next_fsn++;
224 			} else {
225 				goto out;
226 			}
227 			break;
228 		case SCTP_DATA_LAST_FRAG:
229 			if (!first_frag) {
230 				if (cevent->fsn == sin->fsn) {
231 					first_frag = pos;
232 					last_frag = pos;
233 					next_fsn = 0;
234 					is_last = 1;
235 				}
236 			} else if (cevent->fsn == next_fsn) {
237 				last_frag = pos;
238 				next_fsn = 0;
239 				is_last = 1;
240 			}
241 			goto out;
242 		default:
243 			goto out;
244 		}
245 	}
246 
247 out:
248 	if (!first_frag)
249 		return NULL;
250 
251 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
252 					     &ulpq->reasm, first_frag,
253 					     last_frag);
254 	if (retval) {
255 		sin->fsn = next_fsn;
256 		if (is_last) {
257 			retval->msg_flags |= MSG_EOR;
258 			sin->pd_mode = 0;
259 		}
260 	}
261 
262 	return retval;
263 }
264 
265 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
266 						struct sctp_ulpq *ulpq,
267 						struct sctp_ulpevent *event)
268 {
269 	struct sctp_association *asoc = ulpq->asoc;
270 	struct sk_buff *pos, *first_frag = NULL;
271 	struct sctp_ulpevent *retval = NULL;
272 	struct sk_buff *pd_first = NULL;
273 	struct sk_buff *pd_last = NULL;
274 	struct sctp_stream_in *sin;
275 	__u32 next_fsn = 0;
276 	__u32 pd_point = 0;
277 	__u32 pd_len = 0;
278 	__u32 mid = 0;
279 
280 	sin = sctp_stream_in(ulpq->asoc, event->stream);
281 
282 	skb_queue_walk(&ulpq->reasm, pos) {
283 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
284 
285 		if (cevent->stream < event->stream)
286 			continue;
287 		if (cevent->stream > event->stream)
288 			break;
289 
290 		if (MID_lt(cevent->mid, event->mid))
291 			continue;
292 		if (MID_lt(event->mid, cevent->mid))
293 			break;
294 
295 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
296 		case SCTP_DATA_FIRST_FRAG:
297 			if (cevent->mid == sin->mid) {
298 				pd_first = pos;
299 				pd_last = pos;
300 				pd_len = pos->len;
301 			}
302 
303 			first_frag = pos;
304 			next_fsn = 0;
305 			mid = cevent->mid;
306 			break;
307 
308 		case SCTP_DATA_MIDDLE_FRAG:
309 			if (first_frag && cevent->mid == mid &&
310 			    cevent->fsn == next_fsn) {
311 				next_fsn++;
312 				if (pd_first) {
313 					pd_last = pos;
314 					pd_len += pos->len;
315 				}
316 			} else {
317 				first_frag = NULL;
318 			}
319 			break;
320 
321 		case SCTP_DATA_LAST_FRAG:
322 			if (first_frag && cevent->mid == mid &&
323 			    cevent->fsn == next_fsn)
324 				goto found;
325 			else
326 				first_frag = NULL;
327 			break;
328 		}
329 	}
330 
331 	if (!pd_first)
332 		goto out;
333 
334 	pd_point = sctp_sk(asoc->base.sk)->pd_point;
335 	if (pd_point && pd_point <= pd_len) {
336 		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
337 						     &ulpq->reasm,
338 						     pd_first, pd_last);
339 		if (retval) {
340 			sin->fsn = next_fsn;
341 			sin->pd_mode = 1;
342 		}
343 	}
344 	goto out;
345 
346 found:
347 	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
348 					     &ulpq->reasm,
349 					     first_frag, pos);
350 	if (retval)
351 		retval->msg_flags |= MSG_EOR;
352 
353 out:
354 	return retval;
355 }
356 
357 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
358 					     struct sctp_ulpevent *event)
359 {
360 	struct sctp_ulpevent *retval = NULL;
361 	struct sctp_stream_in *sin;
362 
363 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
364 		event->msg_flags |= MSG_EOR;
365 		return event;
366 	}
367 
368 	sctp_intl_store_reasm(ulpq, event);
369 
370 	sin = sctp_stream_in(ulpq->asoc, event->stream);
371 	if (sin->pd_mode && event->mid == sin->mid &&
372 	    event->fsn == sin->fsn)
373 		retval = sctp_intl_retrieve_partial(ulpq, event);
374 
375 	if (!retval)
376 		retval = sctp_intl_retrieve_reassembled(ulpq, event);
377 
378 	return retval;
379 }
380 
381 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
382 				    struct sctp_ulpevent *event)
383 {
384 	struct sctp_ulpevent *cevent;
385 	struct sk_buff *pos;
386 
387 	pos = skb_peek_tail(&ulpq->lobby);
388 	if (!pos) {
389 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
390 		return;
391 	}
392 
393 	cevent = (struct sctp_ulpevent *)pos->cb;
394 	if (event->stream == cevent->stream &&
395 	    MID_lt(cevent->mid, event->mid)) {
396 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
397 		return;
398 	}
399 
400 	if (event->stream > cevent->stream) {
401 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
402 		return;
403 	}
404 
405 	skb_queue_walk(&ulpq->lobby, pos) {
406 		cevent = (struct sctp_ulpevent *)pos->cb;
407 
408 		if (cevent->stream > event->stream)
409 			break;
410 
411 		if (cevent->stream == event->stream &&
412 		    MID_lt(event->mid, cevent->mid))
413 			break;
414 	}
415 
416 	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
417 }
418 
419 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
420 				       struct sctp_ulpevent *event)
421 {
422 	struct sk_buff_head *event_list;
423 	struct sctp_stream *stream;
424 	struct sk_buff *pos, *tmp;
425 	__u16 sid = event->stream;
426 
427 	stream  = &ulpq->asoc->stream;
428 	event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
429 
430 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
431 		struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
432 
433 		if (cevent->stream > sid)
434 			break;
435 
436 		if (cevent->stream < sid)
437 			continue;
438 
439 		if (cevent->mid != sctp_mid_peek(stream, in, sid))
440 			break;
441 
442 		sctp_mid_next(stream, in, sid);
443 
444 		__skb_unlink(pos, &ulpq->lobby);
445 
446 		__skb_queue_tail(event_list, pos);
447 	}
448 }
449 
450 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
451 					     struct sctp_ulpevent *event)
452 {
453 	struct sctp_stream *stream;
454 	__u16 sid;
455 
456 	stream  = &ulpq->asoc->stream;
457 	sid = event->stream;
458 
459 	if (event->mid != sctp_mid_peek(stream, in, sid)) {
460 		sctp_intl_store_ordered(ulpq, event);
461 		return NULL;
462 	}
463 
464 	sctp_mid_next(stream, in, sid);
465 
466 	sctp_intl_retrieve_ordered(ulpq, event);
467 
468 	return event;
469 }
470 
471 static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
472 			      struct sctp_ulpevent *event)
473 {
474 	struct sk_buff *skb = sctp_event2skb(event);
475 	struct sock *sk = ulpq->asoc->base.sk;
476 	struct sctp_sock *sp = sctp_sk(sk);
477 	struct sk_buff_head *skb_list;
478 
479 	skb_list = (struct sk_buff_head *)skb->prev;
480 
481 	if (sk->sk_shutdown & RCV_SHUTDOWN &&
482 	    (sk->sk_shutdown & SEND_SHUTDOWN ||
483 	     !sctp_ulpevent_is_notification(event)))
484 		goto out_free;
485 
486 	if (!sctp_ulpevent_is_notification(event)) {
487 		sk_mark_napi_id(sk, skb);
488 		sk_incoming_cpu_update(sk);
489 	}
490 
491 	if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
492 		goto out_free;
493 
494 	if (skb_list)
495 		skb_queue_splice_tail_init(skb_list,
496 					   &sk->sk_receive_queue);
497 	else
498 		__skb_queue_tail(&sk->sk_receive_queue, skb);
499 
500 	if (!sp->data_ready_signalled) {
501 		sp->data_ready_signalled = 1;
502 		sk->sk_data_ready(sk);
503 	}
504 
505 	return 1;
506 
507 out_free:
508 	if (skb_list)
509 		sctp_queue_purge_ulpevents(skb_list);
510 	else
511 		sctp_ulpevent_free(event);
512 
513 	return 0;
514 }
515 
516 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
517 				     struct sctp_ulpevent *event)
518 {
519 	struct sctp_ulpevent *cevent;
520 	struct sk_buff *pos;
521 
522 	pos = skb_peek_tail(&ulpq->reasm_uo);
523 	if (!pos) {
524 		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
525 		return;
526 	}
527 
528 	cevent = sctp_skb2event(pos);
529 
530 	if (event->stream == cevent->stream &&
531 	    event->mid == cevent->mid &&
532 	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
533 	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
534 	      event->fsn > cevent->fsn))) {
535 		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
536 		return;
537 	}
538 
539 	if ((event->stream == cevent->stream &&
540 	     MID_lt(cevent->mid, event->mid)) ||
541 	    event->stream > cevent->stream) {
542 		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
543 		return;
544 	}
545 
546 	skb_queue_walk(&ulpq->reasm_uo, pos) {
547 		cevent = sctp_skb2event(pos);
548 
549 		if (event->stream < cevent->stream ||
550 		    (event->stream == cevent->stream &&
551 		     MID_lt(event->mid, cevent->mid)))
552 			break;
553 
554 		if (event->stream == cevent->stream &&
555 		    event->mid == cevent->mid &&
556 		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
557 		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
558 		     event->fsn < cevent->fsn))
559 			break;
560 	}
561 
562 	__skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
563 }
564 
565 static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
566 						struct sctp_ulpq *ulpq,
567 						struct sctp_ulpevent *event)
568 {
569 	struct sk_buff *first_frag = NULL;
570 	struct sk_buff *last_frag = NULL;
571 	struct sctp_ulpevent *retval;
572 	struct sctp_stream_in *sin;
573 	struct sk_buff *pos;
574 	__u32 next_fsn = 0;
575 	int is_last = 0;
576 
577 	sin = sctp_stream_in(ulpq->asoc, event->stream);
578 
579 	skb_queue_walk(&ulpq->reasm_uo, pos) {
580 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
581 
582 		if (cevent->stream < event->stream)
583 			continue;
584 		if (cevent->stream > event->stream)
585 			break;
586 
587 		if (MID_lt(cevent->mid, sin->mid_uo))
588 			continue;
589 		if (MID_lt(sin->mid_uo, cevent->mid))
590 			break;
591 
592 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
593 		case SCTP_DATA_FIRST_FRAG:
594 			goto out;
595 		case SCTP_DATA_MIDDLE_FRAG:
596 			if (!first_frag) {
597 				if (cevent->fsn == sin->fsn_uo) {
598 					first_frag = pos;
599 					last_frag = pos;
600 					next_fsn = cevent->fsn + 1;
601 				}
602 			} else if (cevent->fsn == next_fsn) {
603 				last_frag = pos;
604 				next_fsn++;
605 			} else {
606 				goto out;
607 			}
608 			break;
609 		case SCTP_DATA_LAST_FRAG:
610 			if (!first_frag) {
611 				if (cevent->fsn == sin->fsn_uo) {
612 					first_frag = pos;
613 					last_frag = pos;
614 					next_fsn = 0;
615 					is_last = 1;
616 				}
617 			} else if (cevent->fsn == next_fsn) {
618 				last_frag = pos;
619 				next_fsn = 0;
620 				is_last = 1;
621 			}
622 			goto out;
623 		default:
624 			goto out;
625 		}
626 	}
627 
628 out:
629 	if (!first_frag)
630 		return NULL;
631 
632 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
633 					     &ulpq->reasm_uo, first_frag,
634 					     last_frag);
635 	if (retval) {
636 		sin->fsn_uo = next_fsn;
637 		if (is_last) {
638 			retval->msg_flags |= MSG_EOR;
639 			sin->pd_mode_uo = 0;
640 		}
641 	}
642 
643 	return retval;
644 }
645 
646 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
647 						struct sctp_ulpq *ulpq,
648 						struct sctp_ulpevent *event)
649 {
650 	struct sctp_association *asoc = ulpq->asoc;
651 	struct sk_buff *pos, *first_frag = NULL;
652 	struct sctp_ulpevent *retval = NULL;
653 	struct sk_buff *pd_first = NULL;
654 	struct sk_buff *pd_last = NULL;
655 	struct sctp_stream_in *sin;
656 	__u32 next_fsn = 0;
657 	__u32 pd_point = 0;
658 	__u32 pd_len = 0;
659 	__u32 mid = 0;
660 
661 	sin = sctp_stream_in(ulpq->asoc, event->stream);
662 
663 	skb_queue_walk(&ulpq->reasm_uo, pos) {
664 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
665 
666 		if (cevent->stream < event->stream)
667 			continue;
668 		if (cevent->stream > event->stream)
669 			break;
670 
671 		if (MID_lt(cevent->mid, event->mid))
672 			continue;
673 		if (MID_lt(event->mid, cevent->mid))
674 			break;
675 
676 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
677 		case SCTP_DATA_FIRST_FRAG:
678 			if (!sin->pd_mode_uo) {
679 				sin->mid_uo = cevent->mid;
680 				pd_first = pos;
681 				pd_last = pos;
682 				pd_len = pos->len;
683 			}
684 
685 			first_frag = pos;
686 			next_fsn = 0;
687 			mid = cevent->mid;
688 			break;
689 
690 		case SCTP_DATA_MIDDLE_FRAG:
691 			if (first_frag && cevent->mid == mid &&
692 			    cevent->fsn == next_fsn) {
693 				next_fsn++;
694 				if (pd_first) {
695 					pd_last = pos;
696 					pd_len += pos->len;
697 				}
698 			} else {
699 				first_frag = NULL;
700 			}
701 			break;
702 
703 		case SCTP_DATA_LAST_FRAG:
704 			if (first_frag && cevent->mid == mid &&
705 			    cevent->fsn == next_fsn)
706 				goto found;
707 			else
708 				first_frag = NULL;
709 			break;
710 		}
711 	}
712 
713 	if (!pd_first)
714 		goto out;
715 
716 	pd_point = sctp_sk(asoc->base.sk)->pd_point;
717 	if (pd_point && pd_point <= pd_len) {
718 		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
719 						     &ulpq->reasm_uo,
720 						     pd_first, pd_last);
721 		if (retval) {
722 			sin->fsn_uo = next_fsn;
723 			sin->pd_mode_uo = 1;
724 		}
725 	}
726 	goto out;
727 
728 found:
729 	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
730 					     &ulpq->reasm_uo,
731 					     first_frag, pos);
732 	if (retval)
733 		retval->msg_flags |= MSG_EOR;
734 
735 out:
736 	return retval;
737 }
738 
739 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
740 						struct sctp_ulpevent *event)
741 {
742 	struct sctp_ulpevent *retval = NULL;
743 	struct sctp_stream_in *sin;
744 
745 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
746 		event->msg_flags |= MSG_EOR;
747 		return event;
748 	}
749 
750 	sctp_intl_store_reasm_uo(ulpq, event);
751 
752 	sin = sctp_stream_in(ulpq->asoc, event->stream);
753 	if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
754 	    event->fsn == sin->fsn_uo)
755 		retval = sctp_intl_retrieve_partial_uo(ulpq, event);
756 
757 	if (!retval)
758 		retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
759 
760 	return retval;
761 }
762 
763 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
764 {
765 	struct sctp_stream_in *csin, *sin = NULL;
766 	struct sk_buff *first_frag = NULL;
767 	struct sk_buff *last_frag = NULL;
768 	struct sctp_ulpevent *retval;
769 	struct sk_buff *pos;
770 	__u32 next_fsn = 0;
771 	__u16 sid = 0;
772 
773 	skb_queue_walk(&ulpq->reasm_uo, pos) {
774 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
775 
776 		csin = sctp_stream_in(ulpq->asoc, cevent->stream);
777 		if (csin->pd_mode_uo)
778 			continue;
779 
780 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
781 		case SCTP_DATA_FIRST_FRAG:
782 			if (first_frag)
783 				goto out;
784 			first_frag = pos;
785 			last_frag = pos;
786 			next_fsn = 0;
787 			sin = csin;
788 			sid = cevent->stream;
789 			sin->mid_uo = cevent->mid;
790 			break;
791 		case SCTP_DATA_MIDDLE_FRAG:
792 			if (!first_frag)
793 				break;
794 			if (cevent->stream == sid &&
795 			    cevent->mid == sin->mid_uo &&
796 			    cevent->fsn == next_fsn) {
797 				next_fsn++;
798 				last_frag = pos;
799 			} else {
800 				goto out;
801 			}
802 			break;
803 		case SCTP_DATA_LAST_FRAG:
804 			if (first_frag)
805 				goto out;
806 			break;
807 		default:
808 			break;
809 		}
810 	}
811 
812 	if (!first_frag)
813 		return NULL;
814 
815 out:
816 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
817 					     &ulpq->reasm_uo, first_frag,
818 					     last_frag);
819 	if (retval) {
820 		sin->fsn_uo = next_fsn;
821 		sin->pd_mode_uo = 1;
822 	}
823 
824 	return retval;
825 }
826 
827 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
828 			       struct sctp_chunk *chunk, gfp_t gfp)
829 {
830 	struct sctp_ulpevent *event;
831 	struct sk_buff_head temp;
832 	int event_eor = 0;
833 
834 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
835 	if (!event)
836 		return -ENOMEM;
837 
838 	event->mid = ntohl(chunk->subh.idata_hdr->mid);
839 	if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
840 		event->ppid = chunk->subh.idata_hdr->ppid;
841 	else
842 		event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
843 
844 	if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
845 		event = sctp_intl_reasm(ulpq, event);
846 		if (event && event->msg_flags & MSG_EOR) {
847 			skb_queue_head_init(&temp);
848 			__skb_queue_tail(&temp, sctp_event2skb(event));
849 
850 			event = sctp_intl_order(ulpq, event);
851 		}
852 	} else {
853 		event = sctp_intl_reasm_uo(ulpq, event);
854 	}
855 
856 	if (event) {
857 		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
858 		sctp_enqueue_event(ulpq, event);
859 	}
860 
861 	return event_eor;
862 }
863 
864 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
865 {
866 	struct sctp_stream_in *csin, *sin = NULL;
867 	struct sk_buff *first_frag = NULL;
868 	struct sk_buff *last_frag = NULL;
869 	struct sctp_ulpevent *retval;
870 	struct sk_buff *pos;
871 	__u32 next_fsn = 0;
872 	__u16 sid = 0;
873 
874 	skb_queue_walk(&ulpq->reasm, pos) {
875 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
876 
877 		csin = sctp_stream_in(ulpq->asoc, cevent->stream);
878 		if (csin->pd_mode)
879 			continue;
880 
881 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
882 		case SCTP_DATA_FIRST_FRAG:
883 			if (first_frag)
884 				goto out;
885 			if (cevent->mid == csin->mid) {
886 				first_frag = pos;
887 				last_frag = pos;
888 				next_fsn = 0;
889 				sin = csin;
890 				sid = cevent->stream;
891 			}
892 			break;
893 		case SCTP_DATA_MIDDLE_FRAG:
894 			if (!first_frag)
895 				break;
896 			if (cevent->stream == sid &&
897 			    cevent->mid == sin->mid &&
898 			    cevent->fsn == next_fsn) {
899 				next_fsn++;
900 				last_frag = pos;
901 			} else {
902 				goto out;
903 			}
904 			break;
905 		case SCTP_DATA_LAST_FRAG:
906 			if (first_frag)
907 				goto out;
908 			break;
909 		default:
910 			break;
911 		}
912 	}
913 
914 	if (!first_frag)
915 		return NULL;
916 
917 out:
918 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
919 					     &ulpq->reasm, first_frag,
920 					     last_frag);
921 	if (retval) {
922 		sin->fsn = next_fsn;
923 		sin->pd_mode = 1;
924 	}
925 
926 	return retval;
927 }
928 
929 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
930 {
931 	struct sctp_ulpevent *event;
932 
933 	if (!skb_queue_empty(&ulpq->reasm)) {
934 		do {
935 			event = sctp_intl_retrieve_first(ulpq);
936 			if (event)
937 				sctp_enqueue_event(ulpq, event);
938 		} while (event);
939 	}
940 
941 	if (!skb_queue_empty(&ulpq->reasm_uo)) {
942 		do {
943 			event = sctp_intl_retrieve_first_uo(ulpq);
944 			if (event)
945 				sctp_enqueue_event(ulpq, event);
946 		} while (event);
947 	}
948 }
949 
950 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
951 			       gfp_t gfp)
952 {
953 	struct sctp_association *asoc = ulpq->asoc;
954 	__u32 freed = 0;
955 	__u16 needed;
956 
957 	if (chunk) {
958 		needed = ntohs(chunk->chunk_hdr->length);
959 		needed -= sizeof(struct sctp_idata_chunk);
960 	} else {
961 		needed = SCTP_DEFAULT_MAXWINDOW;
962 	}
963 
964 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
965 		freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
966 		if (freed < needed)
967 			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
968 						       needed);
969 		if (freed < needed)
970 			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
971 						       needed);
972 	}
973 
974 	if (chunk && freed >= needed)
975 		if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
976 			sctp_intl_start_pd(ulpq, gfp);
977 
978 	sk_mem_reclaim(asoc->base.sk);
979 }
980 
981 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
982 				      __u32 mid, __u16 flags, gfp_t gfp)
983 {
984 	struct sock *sk = ulpq->asoc->base.sk;
985 	struct sctp_ulpevent *ev = NULL;
986 
987 	if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
988 					&sctp_sk(sk)->subscribe))
989 		return;
990 
991 	ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
992 				      sid, mid, flags, gfp);
993 	if (ev) {
994 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
995 
996 		if (!sctp_sk(sk)->data_ready_signalled) {
997 			sctp_sk(sk)->data_ready_signalled = 1;
998 			sk->sk_data_ready(sk);
999 		}
1000 	}
1001 }
1002 
1003 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1004 {
1005 	struct sctp_stream *stream = &ulpq->asoc->stream;
1006 	struct sctp_ulpevent *cevent, *event = NULL;
1007 	struct sk_buff_head *lobby = &ulpq->lobby;
1008 	struct sk_buff *pos, *tmp;
1009 	struct sk_buff_head temp;
1010 	__u16 csid;
1011 	__u32 cmid;
1012 
1013 	skb_queue_head_init(&temp);
1014 	sctp_skb_for_each(pos, lobby, tmp) {
1015 		cevent = (struct sctp_ulpevent *)pos->cb;
1016 		csid = cevent->stream;
1017 		cmid = cevent->mid;
1018 
1019 		if (csid > sid)
1020 			break;
1021 
1022 		if (csid < sid)
1023 			continue;
1024 
1025 		if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1026 			break;
1027 
1028 		__skb_unlink(pos, lobby);
1029 		if (!event)
1030 			event = sctp_skb2event(pos);
1031 
1032 		__skb_queue_tail(&temp, pos);
1033 	}
1034 
1035 	if (!event && pos != (struct sk_buff *)lobby) {
1036 		cevent = (struct sctp_ulpevent *)pos->cb;
1037 		csid = cevent->stream;
1038 		cmid = cevent->mid;
1039 
1040 		if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1041 			sctp_mid_next(stream, in, csid);
1042 			__skb_unlink(pos, lobby);
1043 			__skb_queue_tail(&temp, pos);
1044 			event = sctp_skb2event(pos);
1045 		}
1046 	}
1047 
1048 	if (event) {
1049 		sctp_intl_retrieve_ordered(ulpq, event);
1050 		sctp_enqueue_event(ulpq, event);
1051 	}
1052 }
1053 
1054 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1055 {
1056 	struct sctp_stream *stream = &ulpq->asoc->stream;
1057 	__u16 sid;
1058 
1059 	for (sid = 0; sid < stream->incnt; sid++) {
1060 		struct sctp_stream_in *sin = &stream->in[sid];
1061 		__u32 mid;
1062 
1063 		if (sin->pd_mode_uo) {
1064 			sin->pd_mode_uo = 0;
1065 
1066 			mid = sin->mid_uo;
1067 			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1068 		}
1069 
1070 		if (sin->pd_mode) {
1071 			sin->pd_mode = 0;
1072 
1073 			mid = sin->mid;
1074 			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1075 			sctp_mid_skip(stream, in, sid, mid);
1076 
1077 			sctp_intl_reap_ordered(ulpq, sid);
1078 		}
1079 	}
1080 
1081 	/* intl abort pd happens only when all data needs to be cleaned */
1082 	sctp_ulpq_flush(ulpq);
1083 }
1084 
1085 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1086 				    int nskips, __be16 stream, __u8 flags)
1087 {
1088 	int i;
1089 
1090 	for (i = 0; i < nskips; i++)
1091 		if (skiplist[i].stream == stream &&
1092 		    skiplist[i].flags == flags)
1093 			return i;
1094 
1095 	return i;
1096 }
1097 
1098 #define SCTP_FTSN_U_BIT	0x1
1099 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1100 {
1101 	struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1102 	struct sctp_association *asoc = q->asoc;
1103 	struct sctp_chunk *ftsn_chunk = NULL;
1104 	struct list_head *lchunk, *temp;
1105 	int nskips = 0, skip_pos;
1106 	struct sctp_chunk *chunk;
1107 	__u32 tsn;
1108 
1109 	if (!asoc->peer.prsctp_capable)
1110 		return;
1111 
1112 	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1113 		asoc->adv_peer_ack_point = ctsn;
1114 
1115 	list_for_each_safe(lchunk, temp, &q->abandoned) {
1116 		chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1117 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1118 
1119 		if (TSN_lte(tsn, ctsn)) {
1120 			list_del_init(lchunk);
1121 			sctp_chunk_free(chunk);
1122 		} else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1123 			__be16 sid = chunk->subh.idata_hdr->stream;
1124 			__be32 mid = chunk->subh.idata_hdr->mid;
1125 			__u8 flags = 0;
1126 
1127 			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1128 				flags |= SCTP_FTSN_U_BIT;
1129 
1130 			asoc->adv_peer_ack_point = tsn;
1131 			skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1132 						     sid, flags);
1133 			ftsn_skip_arr[skip_pos].stream = sid;
1134 			ftsn_skip_arr[skip_pos].reserved = 0;
1135 			ftsn_skip_arr[skip_pos].flags = flags;
1136 			ftsn_skip_arr[skip_pos].mid = mid;
1137 			if (skip_pos == nskips)
1138 				nskips++;
1139 			if (nskips == 10)
1140 				break;
1141 		} else {
1142 			break;
1143 		}
1144 	}
1145 
1146 	if (asoc->adv_peer_ack_point > ctsn)
1147 		ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1148 					       nskips, &ftsn_skip_arr[0]);
1149 
1150 	if (ftsn_chunk) {
1151 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1152 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1153 	}
1154 }
1155 
1156 #define _sctp_walk_ifwdtsn(pos, chunk, end) \
1157 	for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1158 	     (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1159 
1160 #define sctp_walk_ifwdtsn(pos, ch) \
1161 	_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1162 					sizeof(struct sctp_ifwdtsn_chunk))
1163 
1164 static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1165 {
1166 	struct sctp_fwdtsn_skip *skip;
1167 	__u16 incnt;
1168 
1169 	if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1170 		return false;
1171 
1172 	incnt = chunk->asoc->stream.incnt;
1173 	sctp_walk_fwdtsn(skip, chunk)
1174 		if (ntohs(skip->stream) >= incnt)
1175 			return false;
1176 
1177 	return true;
1178 }
1179 
1180 static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1181 {
1182 	struct sctp_ifwdtsn_skip *skip;
1183 	__u16 incnt;
1184 
1185 	if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1186 		return false;
1187 
1188 	incnt = chunk->asoc->stream.incnt;
1189 	sctp_walk_ifwdtsn(skip, chunk)
1190 		if (ntohs(skip->stream) >= incnt)
1191 			return false;
1192 
1193 	return true;
1194 }
1195 
1196 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1197 {
1198 	/* Move the Cumulattive TSN Ack ahead. */
1199 	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1200 	/* purge the fragmentation queue */
1201 	sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1202 	/* Abort any in progress partial delivery. */
1203 	sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1204 }
1205 
1206 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1207 {
1208 	struct sk_buff *pos, *tmp;
1209 
1210 	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1211 		struct sctp_ulpevent *event = sctp_skb2event(pos);
1212 		__u32 tsn = event->tsn;
1213 
1214 		if (TSN_lte(tsn, ftsn)) {
1215 			__skb_unlink(pos, &ulpq->reasm);
1216 			sctp_ulpevent_free(event);
1217 		}
1218 	}
1219 
1220 	skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1221 		struct sctp_ulpevent *event = sctp_skb2event(pos);
1222 		__u32 tsn = event->tsn;
1223 
1224 		if (TSN_lte(tsn, ftsn)) {
1225 			__skb_unlink(pos, &ulpq->reasm_uo);
1226 			sctp_ulpevent_free(event);
1227 		}
1228 	}
1229 }
1230 
1231 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1232 {
1233 	/* Move the Cumulattive TSN Ack ahead. */
1234 	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1235 	/* purge the fragmentation queue */
1236 	sctp_intl_reasm_flushtsn(ulpq, ftsn);
1237 	/* abort only when it's for all data */
1238 	if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1239 		sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1240 }
1241 
1242 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1243 {
1244 	struct sctp_fwdtsn_skip *skip;
1245 
1246 	/* Walk through all the skipped SSNs */
1247 	sctp_walk_fwdtsn(skip, chunk)
1248 		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1249 }
1250 
1251 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1252 			   __u8 flags)
1253 {
1254 	struct sctp_stream_in *sin = sctp_stream_in(ulpq->asoc, sid);
1255 	struct sctp_stream *stream  = &ulpq->asoc->stream;
1256 
1257 	if (flags & SCTP_FTSN_U_BIT) {
1258 		if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1259 			sin->pd_mode_uo = 0;
1260 			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1261 						  GFP_ATOMIC);
1262 		}
1263 		return;
1264 	}
1265 
1266 	if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1267 		return;
1268 
1269 	if (sin->pd_mode) {
1270 		sin->pd_mode = 0;
1271 		sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1272 	}
1273 
1274 	sctp_mid_skip(stream, in, sid, mid);
1275 
1276 	sctp_intl_reap_ordered(ulpq, sid);
1277 }
1278 
1279 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1280 {
1281 	struct sctp_ifwdtsn_skip *skip;
1282 
1283 	/* Walk through all the skipped MIDs and abort stream pd if possible */
1284 	sctp_walk_ifwdtsn(skip, chunk)
1285 		sctp_intl_skip(ulpq, ntohs(skip->stream),
1286 			       ntohl(skip->mid), skip->flags);
1287 }
1288 
1289 static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1290 	.data_chunk_len		= sizeof(struct sctp_data_chunk),
1291 	.ftsn_chunk_len		= sizeof(struct sctp_fwdtsn_chunk),
1292 	/* DATA process functions */
1293 	.make_datafrag		= sctp_make_datafrag_empty,
1294 	.assign_number		= sctp_chunk_assign_ssn,
1295 	.validate_data		= sctp_validate_data,
1296 	.ulpevent_data		= sctp_ulpq_tail_data,
1297 	.enqueue_event		= sctp_ulpq_tail_event,
1298 	.renege_events		= sctp_ulpq_renege,
1299 	.start_pd		= sctp_ulpq_partial_delivery,
1300 	.abort_pd		= sctp_ulpq_abort_pd,
1301 	/* FORWARD-TSN process functions */
1302 	.generate_ftsn		= sctp_generate_fwdtsn,
1303 	.validate_ftsn		= sctp_validate_fwdtsn,
1304 	.report_ftsn		= sctp_report_fwdtsn,
1305 	.handle_ftsn		= sctp_handle_fwdtsn,
1306 };
1307 
1308 static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1309 	.data_chunk_len		= sizeof(struct sctp_idata_chunk),
1310 	.ftsn_chunk_len		= sizeof(struct sctp_ifwdtsn_chunk),
1311 	/* I-DATA process functions */
1312 	.make_datafrag		= sctp_make_idatafrag_empty,
1313 	.assign_number		= sctp_chunk_assign_mid,
1314 	.validate_data		= sctp_validate_idata,
1315 	.ulpevent_data		= sctp_ulpevent_idata,
1316 	.enqueue_event		= sctp_enqueue_event,
1317 	.renege_events		= sctp_renege_events,
1318 	.start_pd		= sctp_intl_start_pd,
1319 	.abort_pd		= sctp_intl_abort_pd,
1320 	/* I-FORWARD-TSN process functions */
1321 	.generate_ftsn		= sctp_generate_iftsn,
1322 	.validate_ftsn		= sctp_validate_iftsn,
1323 	.report_ftsn		= sctp_report_iftsn,
1324 	.handle_ftsn		= sctp_handle_iftsn,
1325 };
1326 
1327 void sctp_stream_interleave_init(struct sctp_stream *stream)
1328 {
1329 	struct sctp_association *asoc;
1330 
1331 	asoc = container_of(stream, struct sctp_association, stream);
1332 	stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
1333 				       : &sctp_stream_interleave_0;
1334 }
1335