xref: /openbmc/linux/net/sctp/stream_interleave.c (revision 74ba9207)
1 /* SCTP kernel implementation
2  * (C) Copyright Red Hat Inc. 2017
3  *
4  * This file is part of the SCTP kernel implementation
5  *
6  * These functions implement sctp stream message interleaving, mostly
7  * including I-DATA and I-FORWARD-TSN chunks process.
8  *
9  * This SCTP implementation is free software;
10  * you can redistribute it and/or modify it under the terms of
11  * the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This SCTP implementation is distributed in the hope that it
16  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
17  *                 ************************
18  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19  * See the GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with GNU CC; see the file COPYING.  If not, see
23  * <http://www.gnu.org/licenses/>.
24  *
25  * Please send any bug reports or fixes you make to the
26  * email addresched(es):
27  *    lksctp developers <linux-sctp@vger.kernel.org>
28  *
29  * Written or modified by:
30  *    Xin Long <lucien.xin@gmail.com>
31  */
32 
33 #include <net/busy_poll.h>
34 #include <net/sctp/sctp.h>
35 #include <net/sctp/sm.h>
36 #include <net/sctp/ulpevent.h>
37 #include <linux/sctp.h>
38 
39 static struct sctp_chunk *sctp_make_idatafrag_empty(
40 					const struct sctp_association *asoc,
41 					const struct sctp_sndrcvinfo *sinfo,
42 					int len, __u8 flags, gfp_t gfp)
43 {
44 	struct sctp_chunk *retval;
45 	struct sctp_idatahdr dp;
46 
47 	memset(&dp, 0, sizeof(dp));
48 	dp.stream = htons(sinfo->sinfo_stream);
49 
50 	if (sinfo->sinfo_flags & SCTP_UNORDERED)
51 		flags |= SCTP_DATA_UNORDERED;
52 
53 	retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
54 	if (!retval)
55 		return NULL;
56 
57 	retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
58 	memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
59 
60 	return retval;
61 }
62 
63 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
64 {
65 	struct sctp_stream *stream;
66 	struct sctp_chunk *lchunk;
67 	__u32 cfsn = 0;
68 	__u16 sid;
69 
70 	if (chunk->has_mid)
71 		return;
72 
73 	sid = sctp_chunk_stream_no(chunk);
74 	stream = &chunk->asoc->stream;
75 
76 	list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
77 		struct sctp_idatahdr *hdr;
78 		__u32 mid;
79 
80 		lchunk->has_mid = 1;
81 
82 		hdr = lchunk->subh.idata_hdr;
83 
84 		if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
85 			hdr->ppid = lchunk->sinfo.sinfo_ppid;
86 		else
87 			hdr->fsn = htonl(cfsn++);
88 
89 		if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
90 			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
91 				sctp_mid_uo_next(stream, out, sid) :
92 				sctp_mid_uo_peek(stream, out, sid);
93 		} else {
94 			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
95 				sctp_mid_next(stream, out, sid) :
96 				sctp_mid_peek(stream, out, sid);
97 		}
98 		hdr->mid = htonl(mid);
99 	}
100 }
101 
102 static bool sctp_validate_data(struct sctp_chunk *chunk)
103 {
104 	struct sctp_stream *stream;
105 	__u16 sid, ssn;
106 
107 	if (chunk->chunk_hdr->type != SCTP_CID_DATA)
108 		return false;
109 
110 	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
111 		return true;
112 
113 	stream = &chunk->asoc->stream;
114 	sid = sctp_chunk_stream_no(chunk);
115 	ssn = ntohs(chunk->subh.data_hdr->ssn);
116 
117 	return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
118 }
119 
120 static bool sctp_validate_idata(struct sctp_chunk *chunk)
121 {
122 	struct sctp_stream *stream;
123 	__u32 mid;
124 	__u16 sid;
125 
126 	if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
127 		return false;
128 
129 	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
130 		return true;
131 
132 	stream = &chunk->asoc->stream;
133 	sid = sctp_chunk_stream_no(chunk);
134 	mid = ntohl(chunk->subh.idata_hdr->mid);
135 
136 	return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
137 }
138 
139 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
140 				  struct sctp_ulpevent *event)
141 {
142 	struct sctp_ulpevent *cevent;
143 	struct sk_buff *pos, *loc;
144 
145 	pos = skb_peek_tail(&ulpq->reasm);
146 	if (!pos) {
147 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
148 		return;
149 	}
150 
151 	cevent = sctp_skb2event(pos);
152 
153 	if (event->stream == cevent->stream &&
154 	    event->mid == cevent->mid &&
155 	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
156 	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
157 	      event->fsn > cevent->fsn))) {
158 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
159 		return;
160 	}
161 
162 	if ((event->stream == cevent->stream &&
163 	     MID_lt(cevent->mid, event->mid)) ||
164 	    event->stream > cevent->stream) {
165 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
166 		return;
167 	}
168 
169 	loc = NULL;
170 	skb_queue_walk(&ulpq->reasm, pos) {
171 		cevent = sctp_skb2event(pos);
172 
173 		if (event->stream < cevent->stream ||
174 		    (event->stream == cevent->stream &&
175 		     MID_lt(event->mid, cevent->mid))) {
176 			loc = pos;
177 			break;
178 		}
179 		if (event->stream == cevent->stream &&
180 		    event->mid == cevent->mid &&
181 		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
182 		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
183 		     event->fsn < cevent->fsn)) {
184 			loc = pos;
185 			break;
186 		}
187 	}
188 
189 	if (!loc)
190 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
191 	else
192 		__skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
193 }
194 
195 static struct sctp_ulpevent *sctp_intl_retrieve_partial(
196 						struct sctp_ulpq *ulpq,
197 						struct sctp_ulpevent *event)
198 {
199 	struct sk_buff *first_frag = NULL;
200 	struct sk_buff *last_frag = NULL;
201 	struct sctp_ulpevent *retval;
202 	struct sctp_stream_in *sin;
203 	struct sk_buff *pos;
204 	__u32 next_fsn = 0;
205 	int is_last = 0;
206 
207 	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
208 
209 	skb_queue_walk(&ulpq->reasm, pos) {
210 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
211 
212 		if (cevent->stream < event->stream)
213 			continue;
214 
215 		if (cevent->stream > event->stream ||
216 		    cevent->mid != sin->mid)
217 			break;
218 
219 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
220 		case SCTP_DATA_FIRST_FRAG:
221 			goto out;
222 		case SCTP_DATA_MIDDLE_FRAG:
223 			if (!first_frag) {
224 				if (cevent->fsn == sin->fsn) {
225 					first_frag = pos;
226 					last_frag = pos;
227 					next_fsn = cevent->fsn + 1;
228 				}
229 			} else if (cevent->fsn == next_fsn) {
230 				last_frag = pos;
231 				next_fsn++;
232 			} else {
233 				goto out;
234 			}
235 			break;
236 		case SCTP_DATA_LAST_FRAG:
237 			if (!first_frag) {
238 				if (cevent->fsn == sin->fsn) {
239 					first_frag = pos;
240 					last_frag = pos;
241 					next_fsn = 0;
242 					is_last = 1;
243 				}
244 			} else if (cevent->fsn == next_fsn) {
245 				last_frag = pos;
246 				next_fsn = 0;
247 				is_last = 1;
248 			}
249 			goto out;
250 		default:
251 			goto out;
252 		}
253 	}
254 
255 out:
256 	if (!first_frag)
257 		return NULL;
258 
259 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
260 					     &ulpq->reasm, first_frag,
261 					     last_frag);
262 	if (retval) {
263 		sin->fsn = next_fsn;
264 		if (is_last) {
265 			retval->msg_flags |= MSG_EOR;
266 			sin->pd_mode = 0;
267 		}
268 	}
269 
270 	return retval;
271 }
272 
273 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
274 						struct sctp_ulpq *ulpq,
275 						struct sctp_ulpevent *event)
276 {
277 	struct sctp_association *asoc = ulpq->asoc;
278 	struct sk_buff *pos, *first_frag = NULL;
279 	struct sctp_ulpevent *retval = NULL;
280 	struct sk_buff *pd_first = NULL;
281 	struct sk_buff *pd_last = NULL;
282 	struct sctp_stream_in *sin;
283 	__u32 next_fsn = 0;
284 	__u32 pd_point = 0;
285 	__u32 pd_len = 0;
286 	__u32 mid = 0;
287 
288 	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
289 
290 	skb_queue_walk(&ulpq->reasm, pos) {
291 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
292 
293 		if (cevent->stream < event->stream)
294 			continue;
295 		if (cevent->stream > event->stream)
296 			break;
297 
298 		if (MID_lt(cevent->mid, event->mid))
299 			continue;
300 		if (MID_lt(event->mid, cevent->mid))
301 			break;
302 
303 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
304 		case SCTP_DATA_FIRST_FRAG:
305 			if (cevent->mid == sin->mid) {
306 				pd_first = pos;
307 				pd_last = pos;
308 				pd_len = pos->len;
309 			}
310 
311 			first_frag = pos;
312 			next_fsn = 0;
313 			mid = cevent->mid;
314 			break;
315 
316 		case SCTP_DATA_MIDDLE_FRAG:
317 			if (first_frag && cevent->mid == mid &&
318 			    cevent->fsn == next_fsn) {
319 				next_fsn++;
320 				if (pd_first) {
321 					pd_last = pos;
322 					pd_len += pos->len;
323 				}
324 			} else {
325 				first_frag = NULL;
326 			}
327 			break;
328 
329 		case SCTP_DATA_LAST_FRAG:
330 			if (first_frag && cevent->mid == mid &&
331 			    cevent->fsn == next_fsn)
332 				goto found;
333 			else
334 				first_frag = NULL;
335 			break;
336 		}
337 	}
338 
339 	if (!pd_first)
340 		goto out;
341 
342 	pd_point = sctp_sk(asoc->base.sk)->pd_point;
343 	if (pd_point && pd_point <= pd_len) {
344 		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
345 						     &ulpq->reasm,
346 						     pd_first, pd_last);
347 		if (retval) {
348 			sin->fsn = next_fsn;
349 			sin->pd_mode = 1;
350 		}
351 	}
352 	goto out;
353 
354 found:
355 	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
356 					     &ulpq->reasm,
357 					     first_frag, pos);
358 	if (retval)
359 		retval->msg_flags |= MSG_EOR;
360 
361 out:
362 	return retval;
363 }
364 
365 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
366 					     struct sctp_ulpevent *event)
367 {
368 	struct sctp_ulpevent *retval = NULL;
369 	struct sctp_stream_in *sin;
370 
371 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
372 		event->msg_flags |= MSG_EOR;
373 		return event;
374 	}
375 
376 	sctp_intl_store_reasm(ulpq, event);
377 
378 	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
379 	if (sin->pd_mode && event->mid == sin->mid &&
380 	    event->fsn == sin->fsn)
381 		retval = sctp_intl_retrieve_partial(ulpq, event);
382 
383 	if (!retval)
384 		retval = sctp_intl_retrieve_reassembled(ulpq, event);
385 
386 	return retval;
387 }
388 
389 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
390 				    struct sctp_ulpevent *event)
391 {
392 	struct sctp_ulpevent *cevent;
393 	struct sk_buff *pos, *loc;
394 
395 	pos = skb_peek_tail(&ulpq->lobby);
396 	if (!pos) {
397 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
398 		return;
399 	}
400 
401 	cevent = (struct sctp_ulpevent *)pos->cb;
402 	if (event->stream == cevent->stream &&
403 	    MID_lt(cevent->mid, event->mid)) {
404 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
405 		return;
406 	}
407 
408 	if (event->stream > cevent->stream) {
409 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
410 		return;
411 	}
412 
413 	loc = NULL;
414 	skb_queue_walk(&ulpq->lobby, pos) {
415 		cevent = (struct sctp_ulpevent *)pos->cb;
416 
417 		if (cevent->stream > event->stream) {
418 			loc = pos;
419 			break;
420 		}
421 		if (cevent->stream == event->stream &&
422 		    MID_lt(event->mid, cevent->mid)) {
423 			loc = pos;
424 			break;
425 		}
426 	}
427 
428 	if (!loc)
429 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
430 	else
431 		__skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
432 }
433 
434 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
435 				       struct sctp_ulpevent *event)
436 {
437 	struct sk_buff_head *event_list;
438 	struct sctp_stream *stream;
439 	struct sk_buff *pos, *tmp;
440 	__u16 sid = event->stream;
441 
442 	stream  = &ulpq->asoc->stream;
443 	event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
444 
445 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
446 		struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
447 
448 		if (cevent->stream > sid)
449 			break;
450 
451 		if (cevent->stream < sid)
452 			continue;
453 
454 		if (cevent->mid != sctp_mid_peek(stream, in, sid))
455 			break;
456 
457 		sctp_mid_next(stream, in, sid);
458 
459 		__skb_unlink(pos, &ulpq->lobby);
460 
461 		__skb_queue_tail(event_list, pos);
462 	}
463 }
464 
465 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
466 					     struct sctp_ulpevent *event)
467 {
468 	struct sctp_stream *stream;
469 	__u16 sid;
470 
471 	stream  = &ulpq->asoc->stream;
472 	sid = event->stream;
473 
474 	if (event->mid != sctp_mid_peek(stream, in, sid)) {
475 		sctp_intl_store_ordered(ulpq, event);
476 		return NULL;
477 	}
478 
479 	sctp_mid_next(stream, in, sid);
480 
481 	sctp_intl_retrieve_ordered(ulpq, event);
482 
483 	return event;
484 }
485 
486 static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
487 			      struct sk_buff_head *skb_list)
488 {
489 	struct sock *sk = ulpq->asoc->base.sk;
490 	struct sctp_sock *sp = sctp_sk(sk);
491 	struct sctp_ulpevent *event;
492 	struct sk_buff *skb;
493 
494 	skb = __skb_peek(skb_list);
495 	event = sctp_skb2event(skb);
496 
497 	if (sk->sk_shutdown & RCV_SHUTDOWN &&
498 	    (sk->sk_shutdown & SEND_SHUTDOWN ||
499 	     !sctp_ulpevent_is_notification(event)))
500 		goto out_free;
501 
502 	if (!sctp_ulpevent_is_notification(event)) {
503 		sk_mark_napi_id(sk, skb);
504 		sk_incoming_cpu_update(sk);
505 	}
506 
507 	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
508 		goto out_free;
509 
510 	if (skb_list)
511 		skb_queue_splice_tail_init(skb_list,
512 					   &sk->sk_receive_queue);
513 	else
514 		__skb_queue_tail(&sk->sk_receive_queue, skb);
515 
516 	if (!sp->data_ready_signalled) {
517 		sp->data_ready_signalled = 1;
518 		sk->sk_data_ready(sk);
519 	}
520 
521 	return 1;
522 
523 out_free:
524 	if (skb_list)
525 		sctp_queue_purge_ulpevents(skb_list);
526 	else
527 		sctp_ulpevent_free(event);
528 
529 	return 0;
530 }
531 
532 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
533 				     struct sctp_ulpevent *event)
534 {
535 	struct sctp_ulpevent *cevent;
536 	struct sk_buff *pos;
537 
538 	pos = skb_peek_tail(&ulpq->reasm_uo);
539 	if (!pos) {
540 		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
541 		return;
542 	}
543 
544 	cevent = sctp_skb2event(pos);
545 
546 	if (event->stream == cevent->stream &&
547 	    event->mid == cevent->mid &&
548 	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
549 	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
550 	      event->fsn > cevent->fsn))) {
551 		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
552 		return;
553 	}
554 
555 	if ((event->stream == cevent->stream &&
556 	     MID_lt(cevent->mid, event->mid)) ||
557 	    event->stream > cevent->stream) {
558 		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
559 		return;
560 	}
561 
562 	skb_queue_walk(&ulpq->reasm_uo, pos) {
563 		cevent = sctp_skb2event(pos);
564 
565 		if (event->stream < cevent->stream ||
566 		    (event->stream == cevent->stream &&
567 		     MID_lt(event->mid, cevent->mid)))
568 			break;
569 
570 		if (event->stream == cevent->stream &&
571 		    event->mid == cevent->mid &&
572 		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
573 		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
574 		     event->fsn < cevent->fsn))
575 			break;
576 	}
577 
578 	__skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
579 }
580 
581 static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
582 						struct sctp_ulpq *ulpq,
583 						struct sctp_ulpevent *event)
584 {
585 	struct sk_buff *first_frag = NULL;
586 	struct sk_buff *last_frag = NULL;
587 	struct sctp_ulpevent *retval;
588 	struct sctp_stream_in *sin;
589 	struct sk_buff *pos;
590 	__u32 next_fsn = 0;
591 	int is_last = 0;
592 
593 	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
594 
595 	skb_queue_walk(&ulpq->reasm_uo, pos) {
596 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
597 
598 		if (cevent->stream < event->stream)
599 			continue;
600 		if (cevent->stream > event->stream)
601 			break;
602 
603 		if (MID_lt(cevent->mid, sin->mid_uo))
604 			continue;
605 		if (MID_lt(sin->mid_uo, cevent->mid))
606 			break;
607 
608 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
609 		case SCTP_DATA_FIRST_FRAG:
610 			goto out;
611 		case SCTP_DATA_MIDDLE_FRAG:
612 			if (!first_frag) {
613 				if (cevent->fsn == sin->fsn_uo) {
614 					first_frag = pos;
615 					last_frag = pos;
616 					next_fsn = cevent->fsn + 1;
617 				}
618 			} else if (cevent->fsn == next_fsn) {
619 				last_frag = pos;
620 				next_fsn++;
621 			} else {
622 				goto out;
623 			}
624 			break;
625 		case SCTP_DATA_LAST_FRAG:
626 			if (!first_frag) {
627 				if (cevent->fsn == sin->fsn_uo) {
628 					first_frag = pos;
629 					last_frag = pos;
630 					next_fsn = 0;
631 					is_last = 1;
632 				}
633 			} else if (cevent->fsn == next_fsn) {
634 				last_frag = pos;
635 				next_fsn = 0;
636 				is_last = 1;
637 			}
638 			goto out;
639 		default:
640 			goto out;
641 		}
642 	}
643 
644 out:
645 	if (!first_frag)
646 		return NULL;
647 
648 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
649 					     &ulpq->reasm_uo, first_frag,
650 					     last_frag);
651 	if (retval) {
652 		sin->fsn_uo = next_fsn;
653 		if (is_last) {
654 			retval->msg_flags |= MSG_EOR;
655 			sin->pd_mode_uo = 0;
656 		}
657 	}
658 
659 	return retval;
660 }
661 
662 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
663 						struct sctp_ulpq *ulpq,
664 						struct sctp_ulpevent *event)
665 {
666 	struct sctp_association *asoc = ulpq->asoc;
667 	struct sk_buff *pos, *first_frag = NULL;
668 	struct sctp_ulpevent *retval = NULL;
669 	struct sk_buff *pd_first = NULL;
670 	struct sk_buff *pd_last = NULL;
671 	struct sctp_stream_in *sin;
672 	__u32 next_fsn = 0;
673 	__u32 pd_point = 0;
674 	__u32 pd_len = 0;
675 	__u32 mid = 0;
676 
677 	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
678 
679 	skb_queue_walk(&ulpq->reasm_uo, pos) {
680 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
681 
682 		if (cevent->stream < event->stream)
683 			continue;
684 		if (cevent->stream > event->stream)
685 			break;
686 
687 		if (MID_lt(cevent->mid, event->mid))
688 			continue;
689 		if (MID_lt(event->mid, cevent->mid))
690 			break;
691 
692 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
693 		case SCTP_DATA_FIRST_FRAG:
694 			if (!sin->pd_mode_uo) {
695 				sin->mid_uo = cevent->mid;
696 				pd_first = pos;
697 				pd_last = pos;
698 				pd_len = pos->len;
699 			}
700 
701 			first_frag = pos;
702 			next_fsn = 0;
703 			mid = cevent->mid;
704 			break;
705 
706 		case SCTP_DATA_MIDDLE_FRAG:
707 			if (first_frag && cevent->mid == mid &&
708 			    cevent->fsn == next_fsn) {
709 				next_fsn++;
710 				if (pd_first) {
711 					pd_last = pos;
712 					pd_len += pos->len;
713 				}
714 			} else {
715 				first_frag = NULL;
716 			}
717 			break;
718 
719 		case SCTP_DATA_LAST_FRAG:
720 			if (first_frag && cevent->mid == mid &&
721 			    cevent->fsn == next_fsn)
722 				goto found;
723 			else
724 				first_frag = NULL;
725 			break;
726 		}
727 	}
728 
729 	if (!pd_first)
730 		goto out;
731 
732 	pd_point = sctp_sk(asoc->base.sk)->pd_point;
733 	if (pd_point && pd_point <= pd_len) {
734 		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
735 						     &ulpq->reasm_uo,
736 						     pd_first, pd_last);
737 		if (retval) {
738 			sin->fsn_uo = next_fsn;
739 			sin->pd_mode_uo = 1;
740 		}
741 	}
742 	goto out;
743 
744 found:
745 	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
746 					     &ulpq->reasm_uo,
747 					     first_frag, pos);
748 	if (retval)
749 		retval->msg_flags |= MSG_EOR;
750 
751 out:
752 	return retval;
753 }
754 
755 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
756 						struct sctp_ulpevent *event)
757 {
758 	struct sctp_ulpevent *retval = NULL;
759 	struct sctp_stream_in *sin;
760 
761 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
762 		event->msg_flags |= MSG_EOR;
763 		return event;
764 	}
765 
766 	sctp_intl_store_reasm_uo(ulpq, event);
767 
768 	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
769 	if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
770 	    event->fsn == sin->fsn_uo)
771 		retval = sctp_intl_retrieve_partial_uo(ulpq, event);
772 
773 	if (!retval)
774 		retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
775 
776 	return retval;
777 }
778 
779 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
780 {
781 	struct sctp_stream_in *csin, *sin = NULL;
782 	struct sk_buff *first_frag = NULL;
783 	struct sk_buff *last_frag = NULL;
784 	struct sctp_ulpevent *retval;
785 	struct sk_buff *pos;
786 	__u32 next_fsn = 0;
787 	__u16 sid = 0;
788 
789 	skb_queue_walk(&ulpq->reasm_uo, pos) {
790 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
791 
792 		csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
793 		if (csin->pd_mode_uo)
794 			continue;
795 
796 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
797 		case SCTP_DATA_FIRST_FRAG:
798 			if (first_frag)
799 				goto out;
800 			first_frag = pos;
801 			last_frag = pos;
802 			next_fsn = 0;
803 			sin = csin;
804 			sid = cevent->stream;
805 			sin->mid_uo = cevent->mid;
806 			break;
807 		case SCTP_DATA_MIDDLE_FRAG:
808 			if (!first_frag)
809 				break;
810 			if (cevent->stream == sid &&
811 			    cevent->mid == sin->mid_uo &&
812 			    cevent->fsn == next_fsn) {
813 				next_fsn++;
814 				last_frag = pos;
815 			} else {
816 				goto out;
817 			}
818 			break;
819 		case SCTP_DATA_LAST_FRAG:
820 			if (first_frag)
821 				goto out;
822 			break;
823 		default:
824 			break;
825 		}
826 	}
827 
828 	if (!first_frag)
829 		return NULL;
830 
831 out:
832 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
833 					     &ulpq->reasm_uo, first_frag,
834 					     last_frag);
835 	if (retval) {
836 		sin->fsn_uo = next_fsn;
837 		sin->pd_mode_uo = 1;
838 	}
839 
840 	return retval;
841 }
842 
843 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
844 			       struct sctp_chunk *chunk, gfp_t gfp)
845 {
846 	struct sctp_ulpevent *event;
847 	struct sk_buff_head temp;
848 	int event_eor = 0;
849 
850 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
851 	if (!event)
852 		return -ENOMEM;
853 
854 	event->mid = ntohl(chunk->subh.idata_hdr->mid);
855 	if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
856 		event->ppid = chunk->subh.idata_hdr->ppid;
857 	else
858 		event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
859 
860 	if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
861 		event = sctp_intl_reasm(ulpq, event);
862 		if (event) {
863 			skb_queue_head_init(&temp);
864 			__skb_queue_tail(&temp, sctp_event2skb(event));
865 
866 			if (event->msg_flags & MSG_EOR)
867 				event = sctp_intl_order(ulpq, event);
868 		}
869 	} else {
870 		event = sctp_intl_reasm_uo(ulpq, event);
871 		if (event) {
872 			skb_queue_head_init(&temp);
873 			__skb_queue_tail(&temp, sctp_event2skb(event));
874 		}
875 	}
876 
877 	if (event) {
878 		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
879 		sctp_enqueue_event(ulpq, &temp);
880 	}
881 
882 	return event_eor;
883 }
884 
885 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
886 {
887 	struct sctp_stream_in *csin, *sin = NULL;
888 	struct sk_buff *first_frag = NULL;
889 	struct sk_buff *last_frag = NULL;
890 	struct sctp_ulpevent *retval;
891 	struct sk_buff *pos;
892 	__u32 next_fsn = 0;
893 	__u16 sid = 0;
894 
895 	skb_queue_walk(&ulpq->reasm, pos) {
896 		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
897 
898 		csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
899 		if (csin->pd_mode)
900 			continue;
901 
902 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
903 		case SCTP_DATA_FIRST_FRAG:
904 			if (first_frag)
905 				goto out;
906 			if (cevent->mid == csin->mid) {
907 				first_frag = pos;
908 				last_frag = pos;
909 				next_fsn = 0;
910 				sin = csin;
911 				sid = cevent->stream;
912 			}
913 			break;
914 		case SCTP_DATA_MIDDLE_FRAG:
915 			if (!first_frag)
916 				break;
917 			if (cevent->stream == sid &&
918 			    cevent->mid == sin->mid &&
919 			    cevent->fsn == next_fsn) {
920 				next_fsn++;
921 				last_frag = pos;
922 			} else {
923 				goto out;
924 			}
925 			break;
926 		case SCTP_DATA_LAST_FRAG:
927 			if (first_frag)
928 				goto out;
929 			break;
930 		default:
931 			break;
932 		}
933 	}
934 
935 	if (!first_frag)
936 		return NULL;
937 
938 out:
939 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
940 					     &ulpq->reasm, first_frag,
941 					     last_frag);
942 	if (retval) {
943 		sin->fsn = next_fsn;
944 		sin->pd_mode = 1;
945 	}
946 
947 	return retval;
948 }
949 
950 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
951 {
952 	struct sctp_ulpevent *event;
953 	struct sk_buff_head temp;
954 
955 	if (!skb_queue_empty(&ulpq->reasm)) {
956 		do {
957 			event = sctp_intl_retrieve_first(ulpq);
958 			if (event) {
959 				skb_queue_head_init(&temp);
960 				__skb_queue_tail(&temp, sctp_event2skb(event));
961 				sctp_enqueue_event(ulpq, &temp);
962 			}
963 		} while (event);
964 	}
965 
966 	if (!skb_queue_empty(&ulpq->reasm_uo)) {
967 		do {
968 			event = sctp_intl_retrieve_first_uo(ulpq);
969 			if (event) {
970 				skb_queue_head_init(&temp);
971 				__skb_queue_tail(&temp, sctp_event2skb(event));
972 				sctp_enqueue_event(ulpq, &temp);
973 			}
974 		} while (event);
975 	}
976 }
977 
978 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
979 			       gfp_t gfp)
980 {
981 	struct sctp_association *asoc = ulpq->asoc;
982 	__u32 freed = 0;
983 	__u16 needed;
984 
985 	needed = ntohs(chunk->chunk_hdr->length) -
986 		 sizeof(struct sctp_idata_chunk);
987 
988 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
989 		freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
990 		if (freed < needed)
991 			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
992 						       needed);
993 		if (freed < needed)
994 			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
995 						       needed);
996 	}
997 
998 	if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
999 		sctp_intl_start_pd(ulpq, gfp);
1000 
1001 	sk_mem_reclaim(asoc->base.sk);
1002 }
1003 
1004 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
1005 				      __u32 mid, __u16 flags, gfp_t gfp)
1006 {
1007 	struct sock *sk = ulpq->asoc->base.sk;
1008 	struct sctp_ulpevent *ev = NULL;
1009 
1010 	if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1011 					SCTP_PARTIAL_DELIVERY_EVENT))
1012 		return;
1013 
1014 	ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
1015 				      sid, mid, flags, gfp);
1016 	if (ev) {
1017 		struct sctp_sock *sp = sctp_sk(sk);
1018 
1019 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1020 
1021 		if (!sp->data_ready_signalled) {
1022 			sp->data_ready_signalled = 1;
1023 			sk->sk_data_ready(sk);
1024 		}
1025 	}
1026 }
1027 
1028 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1029 {
1030 	struct sctp_stream *stream = &ulpq->asoc->stream;
1031 	struct sctp_ulpevent *cevent, *event = NULL;
1032 	struct sk_buff_head *lobby = &ulpq->lobby;
1033 	struct sk_buff *pos, *tmp;
1034 	struct sk_buff_head temp;
1035 	__u16 csid;
1036 	__u32 cmid;
1037 
1038 	skb_queue_head_init(&temp);
1039 	sctp_skb_for_each(pos, lobby, tmp) {
1040 		cevent = (struct sctp_ulpevent *)pos->cb;
1041 		csid = cevent->stream;
1042 		cmid = cevent->mid;
1043 
1044 		if (csid > sid)
1045 			break;
1046 
1047 		if (csid < sid)
1048 			continue;
1049 
1050 		if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1051 			break;
1052 
1053 		__skb_unlink(pos, lobby);
1054 		if (!event)
1055 			event = sctp_skb2event(pos);
1056 
1057 		__skb_queue_tail(&temp, pos);
1058 	}
1059 
1060 	if (!event && pos != (struct sk_buff *)lobby) {
1061 		cevent = (struct sctp_ulpevent *)pos->cb;
1062 		csid = cevent->stream;
1063 		cmid = cevent->mid;
1064 
1065 		if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1066 			sctp_mid_next(stream, in, csid);
1067 			__skb_unlink(pos, lobby);
1068 			__skb_queue_tail(&temp, pos);
1069 			event = sctp_skb2event(pos);
1070 		}
1071 	}
1072 
1073 	if (event) {
1074 		sctp_intl_retrieve_ordered(ulpq, event);
1075 		sctp_enqueue_event(ulpq, &temp);
1076 	}
1077 }
1078 
1079 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1080 {
1081 	struct sctp_stream *stream = &ulpq->asoc->stream;
1082 	__u16 sid;
1083 
1084 	for (sid = 0; sid < stream->incnt; sid++) {
1085 		struct sctp_stream_in *sin = SCTP_SI(stream, sid);
1086 		__u32 mid;
1087 
1088 		if (sin->pd_mode_uo) {
1089 			sin->pd_mode_uo = 0;
1090 
1091 			mid = sin->mid_uo;
1092 			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1093 		}
1094 
1095 		if (sin->pd_mode) {
1096 			sin->pd_mode = 0;
1097 
1098 			mid = sin->mid;
1099 			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1100 			sctp_mid_skip(stream, in, sid, mid);
1101 
1102 			sctp_intl_reap_ordered(ulpq, sid);
1103 		}
1104 	}
1105 
1106 	/* intl abort pd happens only when all data needs to be cleaned */
1107 	sctp_ulpq_flush(ulpq);
1108 }
1109 
1110 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1111 				    int nskips, __be16 stream, __u8 flags)
1112 {
1113 	int i;
1114 
1115 	for (i = 0; i < nskips; i++)
1116 		if (skiplist[i].stream == stream &&
1117 		    skiplist[i].flags == flags)
1118 			return i;
1119 
1120 	return i;
1121 }
1122 
1123 #define SCTP_FTSN_U_BIT	0x1
1124 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1125 {
1126 	struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1127 	struct sctp_association *asoc = q->asoc;
1128 	struct sctp_chunk *ftsn_chunk = NULL;
1129 	struct list_head *lchunk, *temp;
1130 	int nskips = 0, skip_pos;
1131 	struct sctp_chunk *chunk;
1132 	__u32 tsn;
1133 
1134 	if (!asoc->peer.prsctp_capable)
1135 		return;
1136 
1137 	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1138 		asoc->adv_peer_ack_point = ctsn;
1139 
1140 	list_for_each_safe(lchunk, temp, &q->abandoned) {
1141 		chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1142 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1143 
1144 		if (TSN_lte(tsn, ctsn)) {
1145 			list_del_init(lchunk);
1146 			sctp_chunk_free(chunk);
1147 		} else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1148 			__be16 sid = chunk->subh.idata_hdr->stream;
1149 			__be32 mid = chunk->subh.idata_hdr->mid;
1150 			__u8 flags = 0;
1151 
1152 			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1153 				flags |= SCTP_FTSN_U_BIT;
1154 
1155 			asoc->adv_peer_ack_point = tsn;
1156 			skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1157 						     sid, flags);
1158 			ftsn_skip_arr[skip_pos].stream = sid;
1159 			ftsn_skip_arr[skip_pos].reserved = 0;
1160 			ftsn_skip_arr[skip_pos].flags = flags;
1161 			ftsn_skip_arr[skip_pos].mid = mid;
1162 			if (skip_pos == nskips)
1163 				nskips++;
1164 			if (nskips == 10)
1165 				break;
1166 		} else {
1167 			break;
1168 		}
1169 	}
1170 
1171 	if (asoc->adv_peer_ack_point > ctsn)
1172 		ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1173 					       nskips, &ftsn_skip_arr[0]);
1174 
1175 	if (ftsn_chunk) {
1176 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1177 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1178 	}
1179 }
1180 
1181 #define _sctp_walk_ifwdtsn(pos, chunk, end) \
1182 	for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1183 	     (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1184 
1185 #define sctp_walk_ifwdtsn(pos, ch) \
1186 	_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1187 					sizeof(struct sctp_ifwdtsn_chunk))
1188 
1189 static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1190 {
1191 	struct sctp_fwdtsn_skip *skip;
1192 	__u16 incnt;
1193 
1194 	if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1195 		return false;
1196 
1197 	incnt = chunk->asoc->stream.incnt;
1198 	sctp_walk_fwdtsn(skip, chunk)
1199 		if (ntohs(skip->stream) >= incnt)
1200 			return false;
1201 
1202 	return true;
1203 }
1204 
1205 static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1206 {
1207 	struct sctp_ifwdtsn_skip *skip;
1208 	__u16 incnt;
1209 
1210 	if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1211 		return false;
1212 
1213 	incnt = chunk->asoc->stream.incnt;
1214 	sctp_walk_ifwdtsn(skip, chunk)
1215 		if (ntohs(skip->stream) >= incnt)
1216 			return false;
1217 
1218 	return true;
1219 }
1220 
1221 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1222 {
1223 	/* Move the Cumulattive TSN Ack ahead. */
1224 	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1225 	/* purge the fragmentation queue */
1226 	sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1227 	/* Abort any in progress partial delivery. */
1228 	sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1229 }
1230 
1231 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1232 {
1233 	struct sk_buff *pos, *tmp;
1234 
1235 	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1236 		struct sctp_ulpevent *event = sctp_skb2event(pos);
1237 		__u32 tsn = event->tsn;
1238 
1239 		if (TSN_lte(tsn, ftsn)) {
1240 			__skb_unlink(pos, &ulpq->reasm);
1241 			sctp_ulpevent_free(event);
1242 		}
1243 	}
1244 
1245 	skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1246 		struct sctp_ulpevent *event = sctp_skb2event(pos);
1247 		__u32 tsn = event->tsn;
1248 
1249 		if (TSN_lte(tsn, ftsn)) {
1250 			__skb_unlink(pos, &ulpq->reasm_uo);
1251 			sctp_ulpevent_free(event);
1252 		}
1253 	}
1254 }
1255 
1256 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1257 {
1258 	/* Move the Cumulattive TSN Ack ahead. */
1259 	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1260 	/* purge the fragmentation queue */
1261 	sctp_intl_reasm_flushtsn(ulpq, ftsn);
1262 	/* abort only when it's for all data */
1263 	if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1264 		sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1265 }
1266 
1267 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1268 {
1269 	struct sctp_fwdtsn_skip *skip;
1270 
1271 	/* Walk through all the skipped SSNs */
1272 	sctp_walk_fwdtsn(skip, chunk)
1273 		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1274 }
1275 
1276 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1277 			   __u8 flags)
1278 {
1279 	struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1280 	struct sctp_stream *stream  = &ulpq->asoc->stream;
1281 
1282 	if (flags & SCTP_FTSN_U_BIT) {
1283 		if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1284 			sin->pd_mode_uo = 0;
1285 			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1286 						  GFP_ATOMIC);
1287 		}
1288 		return;
1289 	}
1290 
1291 	if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1292 		return;
1293 
1294 	if (sin->pd_mode) {
1295 		sin->pd_mode = 0;
1296 		sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1297 	}
1298 
1299 	sctp_mid_skip(stream, in, sid, mid);
1300 
1301 	sctp_intl_reap_ordered(ulpq, sid);
1302 }
1303 
1304 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1305 {
1306 	struct sctp_ifwdtsn_skip *skip;
1307 
1308 	/* Walk through all the skipped MIDs and abort stream pd if possible */
1309 	sctp_walk_ifwdtsn(skip, chunk)
1310 		sctp_intl_skip(ulpq, ntohs(skip->stream),
1311 			       ntohl(skip->mid), skip->flags);
1312 }
1313 
1314 static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
1315 {
1316 	struct sk_buff_head temp;
1317 
1318 	skb_queue_head_init(&temp);
1319 	__skb_queue_tail(&temp, sctp_event2skb(event));
1320 	return sctp_ulpq_tail_event(ulpq, &temp);
1321 }
1322 
1323 static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1324 	.data_chunk_len		= sizeof(struct sctp_data_chunk),
1325 	.ftsn_chunk_len		= sizeof(struct sctp_fwdtsn_chunk),
1326 	/* DATA process functions */
1327 	.make_datafrag		= sctp_make_datafrag_empty,
1328 	.assign_number		= sctp_chunk_assign_ssn,
1329 	.validate_data		= sctp_validate_data,
1330 	.ulpevent_data		= sctp_ulpq_tail_data,
1331 	.enqueue_event		= do_ulpq_tail_event,
1332 	.renege_events		= sctp_ulpq_renege,
1333 	.start_pd		= sctp_ulpq_partial_delivery,
1334 	.abort_pd		= sctp_ulpq_abort_pd,
1335 	/* FORWARD-TSN process functions */
1336 	.generate_ftsn		= sctp_generate_fwdtsn,
1337 	.validate_ftsn		= sctp_validate_fwdtsn,
1338 	.report_ftsn		= sctp_report_fwdtsn,
1339 	.handle_ftsn		= sctp_handle_fwdtsn,
1340 };
1341 
1342 static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
1343 				 struct sctp_ulpevent *event)
1344 {
1345 	struct sk_buff_head temp;
1346 
1347 	skb_queue_head_init(&temp);
1348 	__skb_queue_tail(&temp, sctp_event2skb(event));
1349 	return sctp_enqueue_event(ulpq, &temp);
1350 }
1351 
1352 static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1353 	.data_chunk_len		= sizeof(struct sctp_idata_chunk),
1354 	.ftsn_chunk_len		= sizeof(struct sctp_ifwdtsn_chunk),
1355 	/* I-DATA process functions */
1356 	.make_datafrag		= sctp_make_idatafrag_empty,
1357 	.assign_number		= sctp_chunk_assign_mid,
1358 	.validate_data		= sctp_validate_idata,
1359 	.ulpevent_data		= sctp_ulpevent_idata,
1360 	.enqueue_event		= do_sctp_enqueue_event,
1361 	.renege_events		= sctp_renege_events,
1362 	.start_pd		= sctp_intl_start_pd,
1363 	.abort_pd		= sctp_intl_abort_pd,
1364 	/* I-FORWARD-TSN process functions */
1365 	.generate_ftsn		= sctp_generate_iftsn,
1366 	.validate_ftsn		= sctp_validate_iftsn,
1367 	.report_ftsn		= sctp_report_iftsn,
1368 	.handle_ftsn		= sctp_handle_iftsn,
1369 };
1370 
1371 void sctp_stream_interleave_init(struct sctp_stream *stream)
1372 {
1373 	struct sctp_association *asoc;
1374 
1375 	asoc = container_of(stream, struct sctp_association, stream);
1376 	stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
1377 				       : &sctp_stream_interleave_0;
1378 }
1379