xref: /openbmc/linux/net/sctp/ulpqueue.c (revision cb1aaebe)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 Nokia, Inc.
7  * Copyright (c) 2001 La Monte H.P. Yarroll
8  *
9  * This abstraction carries sctp events to the ULP (sockets).
10  *
11  * This SCTP implementation is free software;
12  * you can redistribute it and/or modify it under the terms of
13  * the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This SCTP implementation is distributed in the hope that it
18  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19  *                 ************************
20  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21  * See the GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with GNU CC; see the file COPYING.  If not, see
25  * <http://www.gnu.org/licenses/>.
26  *
27  * Please send any bug reports or fixes you make to the
28  * email address(es):
29  *    lksctp developers <linux-sctp@vger.kernel.org>
30  *
31  * Written or modified by:
32  *    Jon Grimm             <jgrimm@us.ibm.com>
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Sridhar Samudrala     <sri@us.ibm.com>
35  */
36 
37 #include <linux/slab.h>
38 #include <linux/types.h>
39 #include <linux/skbuff.h>
40 #include <net/sock.h>
41 #include <net/busy_poll.h>
42 #include <net/sctp/structs.h>
43 #include <net/sctp/sctp.h>
44 #include <net/sctp/sm.h>
45 
46 /* Forward declarations for internal helpers.  */
47 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
48 					      struct sctp_ulpevent *);
49 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
50 					      struct sctp_ulpevent *);
51 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
52 
53 /* 1st Level Abstractions */
54 
55 /* Initialize a ULP queue from a block of memory.  */
56 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
57 				 struct sctp_association *asoc)
58 {
59 	memset(ulpq, 0, sizeof(struct sctp_ulpq));
60 
61 	ulpq->asoc = asoc;
62 	skb_queue_head_init(&ulpq->reasm);
63 	skb_queue_head_init(&ulpq->reasm_uo);
64 	skb_queue_head_init(&ulpq->lobby);
65 	ulpq->pd_mode  = 0;
66 
67 	return ulpq;
68 }
69 
70 
71 /* Flush the reassembly and ordering queues.  */
72 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
73 {
74 	struct sk_buff *skb;
75 	struct sctp_ulpevent *event;
76 
77 	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
78 		event = sctp_skb2event(skb);
79 		sctp_ulpevent_free(event);
80 	}
81 
82 	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
83 		event = sctp_skb2event(skb);
84 		sctp_ulpevent_free(event);
85 	}
86 
87 	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
88 		event = sctp_skb2event(skb);
89 		sctp_ulpevent_free(event);
90 	}
91 }
92 
93 /* Dispose of a ulpqueue.  */
94 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
95 {
96 	sctp_ulpq_flush(ulpq);
97 }
98 
99 /* Process an incoming DATA chunk.  */
100 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
101 			gfp_t gfp)
102 {
103 	struct sk_buff_head temp;
104 	struct sctp_ulpevent *event;
105 	int event_eor = 0;
106 
107 	/* Create an event from the incoming chunk. */
108 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
109 	if (!event)
110 		return -ENOMEM;
111 
112 	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
113 	event->ppid = chunk->subh.data_hdr->ppid;
114 
115 	/* Do reassembly if needed.  */
116 	event = sctp_ulpq_reasm(ulpq, event);
117 
118 	/* Do ordering if needed.  */
119 	if (event) {
120 		/* Create a temporary list to collect chunks on.  */
121 		skb_queue_head_init(&temp);
122 		__skb_queue_tail(&temp, sctp_event2skb(event));
123 
124 		if (event->msg_flags & MSG_EOR)
125 			event = sctp_ulpq_order(ulpq, event);
126 	}
127 
128 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
129 	 * very first SKB on the 'temp' list.
130 	 */
131 	if (event) {
132 		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
133 		sctp_ulpq_tail_event(ulpq, &temp);
134 	}
135 
136 	return event_eor;
137 }
138 
139 /* Add a new event for propagation to the ULP.  */
140 /* Clear the partial delivery mode for this socket.   Note: This
141  * assumes that no association is currently in partial delivery mode.
142  */
143 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
144 {
145 	struct sctp_sock *sp = sctp_sk(sk);
146 
147 	if (atomic_dec_and_test(&sp->pd_mode)) {
148 		/* This means there are no other associations in PD, so
149 		 * we can go ahead and clear out the lobby in one shot
150 		 */
151 		if (!skb_queue_empty(&sp->pd_lobby)) {
152 			skb_queue_splice_tail_init(&sp->pd_lobby,
153 						   &sk->sk_receive_queue);
154 			return 1;
155 		}
156 	} else {
157 		/* There are other associations in PD, so we only need to
158 		 * pull stuff out of the lobby that belongs to the
159 		 * associations that is exiting PD (all of its notifications
160 		 * are posted here).
161 		 */
162 		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
163 			struct sk_buff *skb, *tmp;
164 			struct sctp_ulpevent *event;
165 
166 			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
167 				event = sctp_skb2event(skb);
168 				if (event->asoc == asoc) {
169 					__skb_unlink(skb, &sp->pd_lobby);
170 					__skb_queue_tail(&sk->sk_receive_queue,
171 							 skb);
172 				}
173 			}
174 		}
175 	}
176 
177 	return 0;
178 }
179 
180 /* Set the pd_mode on the socket and ulpq */
181 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
182 {
183 	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
184 
185 	atomic_inc(&sp->pd_mode);
186 	ulpq->pd_mode = 1;
187 }
188 
189 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
190 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
191 {
192 	ulpq->pd_mode = 0;
193 	sctp_ulpq_reasm_drain(ulpq);
194 	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
195 }
196 
197 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
198 {
199 	struct sock *sk = ulpq->asoc->base.sk;
200 	struct sctp_sock *sp = sctp_sk(sk);
201 	struct sctp_ulpevent *event;
202 	struct sk_buff_head *queue;
203 	struct sk_buff *skb;
204 	int clear_pd = 0;
205 
206 	skb = __skb_peek(skb_list);
207 	event = sctp_skb2event(skb);
208 
209 	/* If the socket is just going to throw this away, do not
210 	 * even try to deliver it.
211 	 */
212 	if (sk->sk_shutdown & RCV_SHUTDOWN &&
213 	    (sk->sk_shutdown & SEND_SHUTDOWN ||
214 	     !sctp_ulpevent_is_notification(event)))
215 		goto out_free;
216 
217 	if (!sctp_ulpevent_is_notification(event)) {
218 		sk_mark_napi_id(sk, skb);
219 		sk_incoming_cpu_update(sk);
220 	}
221 	/* Check if the user wishes to receive this event.  */
222 	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
223 		goto out_free;
224 
225 	/* If we are in partial delivery mode, post to the lobby until
226 	 * partial delivery is cleared, unless, of course _this_ is
227 	 * the association the cause of the partial delivery.
228 	 */
229 
230 	if (atomic_read(&sp->pd_mode) == 0) {
231 		queue = &sk->sk_receive_queue;
232 	} else {
233 		if (ulpq->pd_mode) {
234 			/* If the association is in partial delivery, we
235 			 * need to finish delivering the partially processed
236 			 * packet before passing any other data.  This is
237 			 * because we don't truly support stream interleaving.
238 			 */
239 			if ((event->msg_flags & MSG_NOTIFICATION) ||
240 			    (SCTP_DATA_NOT_FRAG ==
241 				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
242 				queue = &sp->pd_lobby;
243 			else {
244 				clear_pd = event->msg_flags & MSG_EOR;
245 				queue = &sk->sk_receive_queue;
246 			}
247 		} else {
248 			/*
249 			 * If fragment interleave is enabled, we
250 			 * can queue this to the receive queue instead
251 			 * of the lobby.
252 			 */
253 			if (sp->frag_interleave)
254 				queue = &sk->sk_receive_queue;
255 			else
256 				queue = &sp->pd_lobby;
257 		}
258 	}
259 
260 	skb_queue_splice_tail_init(skb_list, queue);
261 
262 	/* Did we just complete partial delivery and need to get
263 	 * rolling again?  Move pending data to the receive
264 	 * queue.
265 	 */
266 	if (clear_pd)
267 		sctp_ulpq_clear_pd(ulpq);
268 
269 	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
270 		if (!sock_owned_by_user(sk))
271 			sp->data_ready_signalled = 1;
272 		sk->sk_data_ready(sk);
273 	}
274 	return 1;
275 
276 out_free:
277 	if (skb_list)
278 		sctp_queue_purge_ulpevents(skb_list);
279 	else
280 		sctp_ulpevent_free(event);
281 
282 	return 0;
283 }
284 
285 /* 2nd Level Abstractions */
286 
287 /* Helper function to store chunks that need to be reassembled.  */
288 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
289 					 struct sctp_ulpevent *event)
290 {
291 	struct sk_buff *pos;
292 	struct sctp_ulpevent *cevent;
293 	__u32 tsn, ctsn;
294 
295 	tsn = event->tsn;
296 
297 	/* See if it belongs at the end. */
298 	pos = skb_peek_tail(&ulpq->reasm);
299 	if (!pos) {
300 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
301 		return;
302 	}
303 
304 	/* Short circuit just dropping it at the end. */
305 	cevent = sctp_skb2event(pos);
306 	ctsn = cevent->tsn;
307 	if (TSN_lt(ctsn, tsn)) {
308 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
309 		return;
310 	}
311 
312 	/* Find the right place in this list. We store them by TSN.  */
313 	skb_queue_walk(&ulpq->reasm, pos) {
314 		cevent = sctp_skb2event(pos);
315 		ctsn = cevent->tsn;
316 
317 		if (TSN_lt(tsn, ctsn))
318 			break;
319 	}
320 
321 	/* Insert before pos. */
322 	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
323 
324 }
325 
326 /* Helper function to return an event corresponding to the reassembled
327  * datagram.
328  * This routine creates a re-assembled skb given the first and last skb's
329  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
330  * payload was fragmented on the way and ip had to reassemble them.
331  * We add the rest of skb's to the first skb's fraglist.
332  */
333 struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
334 						  struct sk_buff_head *queue,
335 						  struct sk_buff *f_frag,
336 						  struct sk_buff *l_frag)
337 {
338 	struct sk_buff *pos;
339 	struct sk_buff *new = NULL;
340 	struct sctp_ulpevent *event;
341 	struct sk_buff *pnext, *last;
342 	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
343 
344 	/* Store the pointer to the 2nd skb */
345 	if (f_frag == l_frag)
346 		pos = NULL;
347 	else
348 		pos = f_frag->next;
349 
350 	/* Get the last skb in the f_frag's frag_list if present. */
351 	for (last = list; list; last = list, list = list->next)
352 		;
353 
354 	/* Add the list of remaining fragments to the first fragments
355 	 * frag_list.
356 	 */
357 	if (last)
358 		last->next = pos;
359 	else {
360 		if (skb_cloned(f_frag)) {
361 			/* This is a cloned skb, we can't just modify
362 			 * the frag_list.  We need a new skb to do that.
363 			 * Instead of calling skb_unshare(), we'll do it
364 			 * ourselves since we need to delay the free.
365 			 */
366 			new = skb_copy(f_frag, GFP_ATOMIC);
367 			if (!new)
368 				return NULL;	/* try again later */
369 
370 			sctp_skb_set_owner_r(new, f_frag->sk);
371 
372 			skb_shinfo(new)->frag_list = pos;
373 		} else
374 			skb_shinfo(f_frag)->frag_list = pos;
375 	}
376 
377 	/* Remove the first fragment from the reassembly queue.  */
378 	__skb_unlink(f_frag, queue);
379 
380 	/* if we did unshare, then free the old skb and re-assign */
381 	if (new) {
382 		kfree_skb(f_frag);
383 		f_frag = new;
384 	}
385 
386 	while (pos) {
387 
388 		pnext = pos->next;
389 
390 		/* Update the len and data_len fields of the first fragment. */
391 		f_frag->len += pos->len;
392 		f_frag->data_len += pos->len;
393 
394 		/* Remove the fragment from the reassembly queue.  */
395 		__skb_unlink(pos, queue);
396 
397 		/* Break if we have reached the last fragment.  */
398 		if (pos == l_frag)
399 			break;
400 		pos->next = pnext;
401 		pos = pnext;
402 	}
403 
404 	event = sctp_skb2event(f_frag);
405 	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
406 
407 	return event;
408 }
409 
410 
411 /* Helper function to check if an incoming chunk has filled up the last
412  * missing fragment in a SCTP datagram and return the corresponding event.
413  */
414 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
415 {
416 	struct sk_buff *pos;
417 	struct sctp_ulpevent *cevent;
418 	struct sk_buff *first_frag = NULL;
419 	__u32 ctsn, next_tsn;
420 	struct sctp_ulpevent *retval = NULL;
421 	struct sk_buff *pd_first = NULL;
422 	struct sk_buff *pd_last = NULL;
423 	size_t pd_len = 0;
424 	struct sctp_association *asoc;
425 	u32 pd_point;
426 
427 	/* Initialized to 0 just to avoid compiler warning message.  Will
428 	 * never be used with this value. It is referenced only after it
429 	 * is set when we find the first fragment of a message.
430 	 */
431 	next_tsn = 0;
432 
433 	/* The chunks are held in the reasm queue sorted by TSN.
434 	 * Walk through the queue sequentially and look for a sequence of
435 	 * fragmented chunks that complete a datagram.
436 	 * 'first_frag' and next_tsn are reset when we find a chunk which
437 	 * is the first fragment of a datagram. Once these 2 fields are set
438 	 * we expect to find the remaining middle fragments and the last
439 	 * fragment in order. If not, first_frag is reset to NULL and we
440 	 * start the next pass when we find another first fragment.
441 	 *
442 	 * There is a potential to do partial delivery if user sets
443 	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
444 	 * to see if can do PD.
445 	 */
446 	skb_queue_walk(&ulpq->reasm, pos) {
447 		cevent = sctp_skb2event(pos);
448 		ctsn = cevent->tsn;
449 
450 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
451 		case SCTP_DATA_FIRST_FRAG:
452 			/* If this "FIRST_FRAG" is the first
453 			 * element in the queue, then count it towards
454 			 * possible PD.
455 			 */
456 			if (skb_queue_is_first(&ulpq->reasm, pos)) {
457 			    pd_first = pos;
458 			    pd_last = pos;
459 			    pd_len = pos->len;
460 			} else {
461 			    pd_first = NULL;
462 			    pd_last = NULL;
463 			    pd_len = 0;
464 			}
465 
466 			first_frag = pos;
467 			next_tsn = ctsn + 1;
468 			break;
469 
470 		case SCTP_DATA_MIDDLE_FRAG:
471 			if ((first_frag) && (ctsn == next_tsn)) {
472 				next_tsn++;
473 				if (pd_first) {
474 				    pd_last = pos;
475 				    pd_len += pos->len;
476 				}
477 			} else
478 				first_frag = NULL;
479 			break;
480 
481 		case SCTP_DATA_LAST_FRAG:
482 			if (first_frag && (ctsn == next_tsn))
483 				goto found;
484 			else
485 				first_frag = NULL;
486 			break;
487 		}
488 	}
489 
490 	asoc = ulpq->asoc;
491 	if (pd_first) {
492 		/* Make sure we can enter partial deliver.
493 		 * We can trigger partial delivery only if framgent
494 		 * interleave is set, or the socket is not already
495 		 * in  partial delivery.
496 		 */
497 		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
498 		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
499 			goto done;
500 
501 		cevent = sctp_skb2event(pd_first);
502 		pd_point = sctp_sk(asoc->base.sk)->pd_point;
503 		if (pd_point && pd_point <= pd_len) {
504 			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
505 							     &ulpq->reasm,
506 							     pd_first,
507 							     pd_last);
508 			if (retval)
509 				sctp_ulpq_set_pd(ulpq);
510 		}
511 	}
512 done:
513 	return retval;
514 found:
515 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
516 					     &ulpq->reasm, first_frag, pos);
517 	if (retval)
518 		retval->msg_flags |= MSG_EOR;
519 	goto done;
520 }
521 
522 /* Retrieve the next set of fragments of a partial message. */
523 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
524 {
525 	struct sk_buff *pos, *last_frag, *first_frag;
526 	struct sctp_ulpevent *cevent;
527 	__u32 ctsn, next_tsn;
528 	int is_last;
529 	struct sctp_ulpevent *retval;
530 
531 	/* The chunks are held in the reasm queue sorted by TSN.
532 	 * Walk through the queue sequentially and look for the first
533 	 * sequence of fragmented chunks.
534 	 */
535 
536 	if (skb_queue_empty(&ulpq->reasm))
537 		return NULL;
538 
539 	last_frag = first_frag = NULL;
540 	retval = NULL;
541 	next_tsn = 0;
542 	is_last = 0;
543 
544 	skb_queue_walk(&ulpq->reasm, pos) {
545 		cevent = sctp_skb2event(pos);
546 		ctsn = cevent->tsn;
547 
548 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
549 		case SCTP_DATA_FIRST_FRAG:
550 			if (!first_frag)
551 				return NULL;
552 			goto done;
553 		case SCTP_DATA_MIDDLE_FRAG:
554 			if (!first_frag) {
555 				first_frag = pos;
556 				next_tsn = ctsn + 1;
557 				last_frag = pos;
558 			} else if (next_tsn == ctsn) {
559 				next_tsn++;
560 				last_frag = pos;
561 			} else
562 				goto done;
563 			break;
564 		case SCTP_DATA_LAST_FRAG:
565 			if (!first_frag)
566 				first_frag = pos;
567 			else if (ctsn != next_tsn)
568 				goto done;
569 			last_frag = pos;
570 			is_last = 1;
571 			goto done;
572 		default:
573 			return NULL;
574 		}
575 	}
576 
577 	/* We have the reassembled event. There is no need to look
578 	 * further.
579 	 */
580 done:
581 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
582 					&ulpq->reasm, first_frag, last_frag);
583 	if (retval && is_last)
584 		retval->msg_flags |= MSG_EOR;
585 
586 	return retval;
587 }
588 
589 
590 /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
591  * need reassembling.
592  */
593 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
594 						struct sctp_ulpevent *event)
595 {
596 	struct sctp_ulpevent *retval = NULL;
597 
598 	/* Check if this is part of a fragmented message.  */
599 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
600 		event->msg_flags |= MSG_EOR;
601 		return event;
602 	}
603 
604 	sctp_ulpq_store_reasm(ulpq, event);
605 	if (!ulpq->pd_mode)
606 		retval = sctp_ulpq_retrieve_reassembled(ulpq);
607 	else {
608 		__u32 ctsn, ctsnap;
609 
610 		/* Do not even bother unless this is the next tsn to
611 		 * be delivered.
612 		 */
613 		ctsn = event->tsn;
614 		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
615 		if (TSN_lte(ctsn, ctsnap))
616 			retval = sctp_ulpq_retrieve_partial(ulpq);
617 	}
618 
619 	return retval;
620 }
621 
622 /* Retrieve the first part (sequential fragments) for partial delivery.  */
623 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
624 {
625 	struct sk_buff *pos, *last_frag, *first_frag;
626 	struct sctp_ulpevent *cevent;
627 	__u32 ctsn, next_tsn;
628 	struct sctp_ulpevent *retval;
629 
630 	/* The chunks are held in the reasm queue sorted by TSN.
631 	 * Walk through the queue sequentially and look for a sequence of
632 	 * fragmented chunks that start a datagram.
633 	 */
634 
635 	if (skb_queue_empty(&ulpq->reasm))
636 		return NULL;
637 
638 	last_frag = first_frag = NULL;
639 	retval = NULL;
640 	next_tsn = 0;
641 
642 	skb_queue_walk(&ulpq->reasm, pos) {
643 		cevent = sctp_skb2event(pos);
644 		ctsn = cevent->tsn;
645 
646 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
647 		case SCTP_DATA_FIRST_FRAG:
648 			if (!first_frag) {
649 				first_frag = pos;
650 				next_tsn = ctsn + 1;
651 				last_frag = pos;
652 			} else
653 				goto done;
654 			break;
655 
656 		case SCTP_DATA_MIDDLE_FRAG:
657 			if (!first_frag)
658 				return NULL;
659 			if (ctsn == next_tsn) {
660 				next_tsn++;
661 				last_frag = pos;
662 			} else
663 				goto done;
664 			break;
665 
666 		case SCTP_DATA_LAST_FRAG:
667 			if (!first_frag)
668 				return NULL;
669 			else
670 				goto done;
671 			break;
672 
673 		default:
674 			return NULL;
675 		}
676 	}
677 
678 	/* We have the reassembled event. There is no need to look
679 	 * further.
680 	 */
681 done:
682 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
683 					&ulpq->reasm, first_frag, last_frag);
684 	return retval;
685 }
686 
687 /*
688  * Flush out stale fragments from the reassembly queue when processing
689  * a Forward TSN.
690  *
691  * RFC 3758, Section 3.6
692  *
693  * After receiving and processing a FORWARD TSN, the data receiver MUST
694  * take cautions in updating its re-assembly queue.  The receiver MUST
695  * remove any partially reassembled message, which is still missing one
696  * or more TSNs earlier than or equal to the new cumulative TSN point.
697  * In the event that the receiver has invoked the partial delivery API,
698  * a notification SHOULD also be generated to inform the upper layer API
699  * that the message being partially delivered will NOT be completed.
700  */
701 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
702 {
703 	struct sk_buff *pos, *tmp;
704 	struct sctp_ulpevent *event;
705 	__u32 tsn;
706 
707 	if (skb_queue_empty(&ulpq->reasm))
708 		return;
709 
710 	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
711 		event = sctp_skb2event(pos);
712 		tsn = event->tsn;
713 
714 		/* Since the entire message must be abandoned by the
715 		 * sender (item A3 in Section 3.5, RFC 3758), we can
716 		 * free all fragments on the list that are less then
717 		 * or equal to ctsn_point
718 		 */
719 		if (TSN_lte(tsn, fwd_tsn)) {
720 			__skb_unlink(pos, &ulpq->reasm);
721 			sctp_ulpevent_free(event);
722 		} else
723 			break;
724 	}
725 }
726 
727 /*
728  * Drain the reassembly queue.  If we just cleared parted delivery, it
729  * is possible that the reassembly queue will contain already reassembled
730  * messages.  Retrieve any such messages and give them to the user.
731  */
732 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
733 {
734 	struct sctp_ulpevent *event = NULL;
735 
736 	if (skb_queue_empty(&ulpq->reasm))
737 		return;
738 
739 	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
740 		struct sk_buff_head temp;
741 
742 		skb_queue_head_init(&temp);
743 		__skb_queue_tail(&temp, sctp_event2skb(event));
744 
745 		/* Do ordering if needed.  */
746 		if (event->msg_flags & MSG_EOR)
747 			event = sctp_ulpq_order(ulpq, event);
748 
749 		/* Send event to the ULP.  'event' is the
750 		 * sctp_ulpevent for  very first SKB on the  temp' list.
751 		 */
752 		if (event)
753 			sctp_ulpq_tail_event(ulpq, &temp);
754 	}
755 }
756 
757 
758 /* Helper function to gather skbs that have possibly become
759  * ordered by an an incoming chunk.
760  */
761 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
762 					      struct sctp_ulpevent *event)
763 {
764 	struct sk_buff_head *event_list;
765 	struct sk_buff *pos, *tmp;
766 	struct sctp_ulpevent *cevent;
767 	struct sctp_stream *stream;
768 	__u16 sid, csid, cssn;
769 
770 	sid = event->stream;
771 	stream  = &ulpq->asoc->stream;
772 
773 	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
774 
775 	/* We are holding the chunks by stream, by SSN.  */
776 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
777 		cevent = (struct sctp_ulpevent *) pos->cb;
778 		csid = cevent->stream;
779 		cssn = cevent->ssn;
780 
781 		/* Have we gone too far?  */
782 		if (csid > sid)
783 			break;
784 
785 		/* Have we not gone far enough?  */
786 		if (csid < sid)
787 			continue;
788 
789 		if (cssn != sctp_ssn_peek(stream, in, sid))
790 			break;
791 
792 		/* Found it, so mark in the stream. */
793 		sctp_ssn_next(stream, in, sid);
794 
795 		__skb_unlink(pos, &ulpq->lobby);
796 
797 		/* Attach all gathered skbs to the event.  */
798 		__skb_queue_tail(event_list, pos);
799 	}
800 }
801 
802 /* Helper function to store chunks needing ordering.  */
803 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
804 					   struct sctp_ulpevent *event)
805 {
806 	struct sk_buff *pos;
807 	struct sctp_ulpevent *cevent;
808 	__u16 sid, csid;
809 	__u16 ssn, cssn;
810 
811 	pos = skb_peek_tail(&ulpq->lobby);
812 	if (!pos) {
813 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
814 		return;
815 	}
816 
817 	sid = event->stream;
818 	ssn = event->ssn;
819 
820 	cevent = (struct sctp_ulpevent *) pos->cb;
821 	csid = cevent->stream;
822 	cssn = cevent->ssn;
823 	if (sid > csid) {
824 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
825 		return;
826 	}
827 
828 	if ((sid == csid) && SSN_lt(cssn, ssn)) {
829 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
830 		return;
831 	}
832 
833 	/* Find the right place in this list.  We store them by
834 	 * stream ID and then by SSN.
835 	 */
836 	skb_queue_walk(&ulpq->lobby, pos) {
837 		cevent = (struct sctp_ulpevent *) pos->cb;
838 		csid = cevent->stream;
839 		cssn = cevent->ssn;
840 
841 		if (csid > sid)
842 			break;
843 		if (csid == sid && SSN_lt(ssn, cssn))
844 			break;
845 	}
846 
847 
848 	/* Insert before pos. */
849 	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
850 }
851 
852 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
853 					     struct sctp_ulpevent *event)
854 {
855 	__u16 sid, ssn;
856 	struct sctp_stream *stream;
857 
858 	/* Check if this message needs ordering.  */
859 	if (event->msg_flags & SCTP_DATA_UNORDERED)
860 		return event;
861 
862 	/* Note: The stream ID must be verified before this routine.  */
863 	sid = event->stream;
864 	ssn = event->ssn;
865 	stream  = &ulpq->asoc->stream;
866 
867 	/* Is this the expected SSN for this stream ID?  */
868 	if (ssn != sctp_ssn_peek(stream, in, sid)) {
869 		/* We've received something out of order, so find where it
870 		 * needs to be placed.  We order by stream and then by SSN.
871 		 */
872 		sctp_ulpq_store_ordered(ulpq, event);
873 		return NULL;
874 	}
875 
876 	/* Mark that the next chunk has been found.  */
877 	sctp_ssn_next(stream, in, sid);
878 
879 	/* Go find any other chunks that were waiting for
880 	 * ordering.
881 	 */
882 	sctp_ulpq_retrieve_ordered(ulpq, event);
883 
884 	return event;
885 }
886 
887 /* Helper function to gather skbs that have possibly become
888  * ordered by forward tsn skipping their dependencies.
889  */
890 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
891 {
892 	struct sk_buff *pos, *tmp;
893 	struct sctp_ulpevent *cevent;
894 	struct sctp_ulpevent *event;
895 	struct sctp_stream *stream;
896 	struct sk_buff_head temp;
897 	struct sk_buff_head *lobby = &ulpq->lobby;
898 	__u16 csid, cssn;
899 
900 	stream = &ulpq->asoc->stream;
901 
902 	/* We are holding the chunks by stream, by SSN.  */
903 	skb_queue_head_init(&temp);
904 	event = NULL;
905 	sctp_skb_for_each(pos, lobby, tmp) {
906 		cevent = (struct sctp_ulpevent *) pos->cb;
907 		csid = cevent->stream;
908 		cssn = cevent->ssn;
909 
910 		/* Have we gone too far?  */
911 		if (csid > sid)
912 			break;
913 
914 		/* Have we not gone far enough?  */
915 		if (csid < sid)
916 			continue;
917 
918 		/* see if this ssn has been marked by skipping */
919 		if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
920 			break;
921 
922 		__skb_unlink(pos, lobby);
923 		if (!event)
924 			/* Create a temporary list to collect chunks on.  */
925 			event = sctp_skb2event(pos);
926 
927 		/* Attach all gathered skbs to the event.  */
928 		__skb_queue_tail(&temp, pos);
929 	}
930 
931 	/* If we didn't reap any data, see if the next expected SSN
932 	 * is next on the queue and if so, use that.
933 	 */
934 	if (event == NULL && pos != (struct sk_buff *)lobby) {
935 		cevent = (struct sctp_ulpevent *) pos->cb;
936 		csid = cevent->stream;
937 		cssn = cevent->ssn;
938 
939 		if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
940 			sctp_ssn_next(stream, in, csid);
941 			__skb_unlink(pos, lobby);
942 			__skb_queue_tail(&temp, pos);
943 			event = sctp_skb2event(pos);
944 		}
945 	}
946 
947 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
948 	 * very first SKB on the 'temp' list.
949 	 */
950 	if (event) {
951 		/* see if we have more ordered that we can deliver */
952 		sctp_ulpq_retrieve_ordered(ulpq, event);
953 		sctp_ulpq_tail_event(ulpq, &temp);
954 	}
955 }
956 
957 /* Skip over an SSN. This is used during the processing of
958  * Forwared TSN chunk to skip over the abandoned ordered data
959  */
960 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
961 {
962 	struct sctp_stream *stream;
963 
964 	/* Note: The stream ID must be verified before this routine.  */
965 	stream  = &ulpq->asoc->stream;
966 
967 	/* Is this an old SSN?  If so ignore. */
968 	if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
969 		return;
970 
971 	/* Mark that we are no longer expecting this SSN or lower. */
972 	sctp_ssn_skip(stream, in, sid, ssn);
973 
974 	/* Go find any other chunks that were waiting for
975 	 * ordering and deliver them if needed.
976 	 */
977 	sctp_ulpq_reap_ordered(ulpq, sid);
978 }
979 
980 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
981 			    __u16 needed)
982 {
983 	__u16 freed = 0;
984 	__u32 tsn, last_tsn;
985 	struct sk_buff *skb, *flist, *last;
986 	struct sctp_ulpevent *event;
987 	struct sctp_tsnmap *tsnmap;
988 
989 	tsnmap = &ulpq->asoc->peer.tsn_map;
990 
991 	while ((skb = skb_peek_tail(list)) != NULL) {
992 		event = sctp_skb2event(skb);
993 		tsn = event->tsn;
994 
995 		/* Don't renege below the Cumulative TSN ACK Point. */
996 		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
997 			break;
998 
999 		/* Events in ordering queue may have multiple fragments
1000 		 * corresponding to additional TSNs.  Sum the total
1001 		 * freed space; find the last TSN.
1002 		 */
1003 		freed += skb_headlen(skb);
1004 		flist = skb_shinfo(skb)->frag_list;
1005 		for (last = flist; flist; flist = flist->next) {
1006 			last = flist;
1007 			freed += skb_headlen(last);
1008 		}
1009 		if (last)
1010 			last_tsn = sctp_skb2event(last)->tsn;
1011 		else
1012 			last_tsn = tsn;
1013 
1014 		/* Unlink the event, then renege all applicable TSNs. */
1015 		__skb_unlink(skb, list);
1016 		sctp_ulpevent_free(event);
1017 		while (TSN_lte(tsn, last_tsn)) {
1018 			sctp_tsnmap_renege(tsnmap, tsn);
1019 			tsn++;
1020 		}
1021 		if (freed >= needed)
1022 			return freed;
1023 	}
1024 
1025 	return freed;
1026 }
1027 
1028 /* Renege 'needed' bytes from the ordering queue. */
1029 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1030 {
1031 	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1032 }
1033 
1034 /* Renege 'needed' bytes from the reassembly queue. */
1035 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1036 {
1037 	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1038 }
1039 
1040 /* Partial deliver the first message as there is pressure on rwnd. */
1041 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1042 				gfp_t gfp)
1043 {
1044 	struct sctp_ulpevent *event;
1045 	struct sctp_association *asoc;
1046 	struct sctp_sock *sp;
1047 	__u32 ctsn;
1048 	struct sk_buff *skb;
1049 
1050 	asoc = ulpq->asoc;
1051 	sp = sctp_sk(asoc->base.sk);
1052 
1053 	/* If the association is already in Partial Delivery mode
1054 	 * we have nothing to do.
1055 	 */
1056 	if (ulpq->pd_mode)
1057 		return;
1058 
1059 	/* Data must be at or below the Cumulative TSN ACK Point to
1060 	 * start partial delivery.
1061 	 */
1062 	skb = skb_peek(&asoc->ulpq.reasm);
1063 	if (skb != NULL) {
1064 		ctsn = sctp_skb2event(skb)->tsn;
1065 		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1066 			return;
1067 	}
1068 
1069 	/* If the user enabled fragment interleave socket option,
1070 	 * multiple associations can enter partial delivery.
1071 	 * Otherwise, we can only enter partial delivery if the
1072 	 * socket is not in partial deliver mode.
1073 	 */
1074 	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1075 		/* Is partial delivery possible?  */
1076 		event = sctp_ulpq_retrieve_first(ulpq);
1077 		/* Send event to the ULP.   */
1078 		if (event) {
1079 			struct sk_buff_head temp;
1080 
1081 			skb_queue_head_init(&temp);
1082 			__skb_queue_tail(&temp, sctp_event2skb(event));
1083 			sctp_ulpq_tail_event(ulpq, &temp);
1084 			sctp_ulpq_set_pd(ulpq);
1085 			return;
1086 		}
1087 	}
1088 }
1089 
1090 /* Renege some packets to make room for an incoming chunk.  */
1091 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1092 		      gfp_t gfp)
1093 {
1094 	struct sctp_association *asoc = ulpq->asoc;
1095 	__u32 freed = 0;
1096 	__u16 needed;
1097 
1098 	needed = ntohs(chunk->chunk_hdr->length) -
1099 		 sizeof(struct sctp_data_chunk);
1100 
1101 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1102 		freed = sctp_ulpq_renege_order(ulpq, needed);
1103 		if (freed < needed)
1104 			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1105 	}
1106 	/* If able to free enough room, accept this chunk. */
1107 	if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1108 	    freed >= needed) {
1109 		int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1110 		/*
1111 		 * Enter partial delivery if chunk has not been
1112 		 * delivered; otherwise, drain the reassembly queue.
1113 		 */
1114 		if (retval <= 0)
1115 			sctp_ulpq_partial_delivery(ulpq, gfp);
1116 		else if (retval == 1)
1117 			sctp_ulpq_reasm_drain(ulpq);
1118 	}
1119 
1120 	sk_mem_reclaim(asoc->base.sk);
1121 }
1122 
1123 
1124 
1125 /* Notify the application if an association is aborted and in
1126  * partial delivery mode.  Send up any pending received messages.
1127  */
1128 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1129 {
1130 	struct sctp_ulpevent *ev = NULL;
1131 	struct sctp_sock *sp;
1132 	struct sock *sk;
1133 
1134 	if (!ulpq->pd_mode)
1135 		return;
1136 
1137 	sk = ulpq->asoc->base.sk;
1138 	sp = sctp_sk(sk);
1139 	if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1140 				       SCTP_PARTIAL_DELIVERY_EVENT))
1141 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1142 					      SCTP_PARTIAL_DELIVERY_ABORTED,
1143 					      0, 0, 0, gfp);
1144 	if (ev)
1145 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1146 
1147 	/* If there is data waiting, send it up the socket now. */
1148 	if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1149 		sp->data_ready_signalled = 1;
1150 		sk->sk_data_ready(sk);
1151 	}
1152 }
1153