xref: /openbmc/linux/net/sctp/ulpqueue.c (revision 22246614)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 Nokia, Inc.
7  * Copyright (c) 2001 La Monte H.P. Yarroll
8  *
9  * This abstraction carries sctp events to the ULP (sockets).
10  *
11  * This SCTP implementation is free software;
12  * you can redistribute it and/or modify it under the terms of
13  * the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This SCTP implementation is distributed in the hope that it
18  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19  *                 ************************
20  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21  * See the GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with GNU CC; see the file COPYING.  If not, write to
25  * the Free Software Foundation, 59 Temple Place - Suite 330,
26  * Boston, MA 02111-1307, USA.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <lksctp-developers@lists.sourceforge.net>
31  *
32  * Or submit a bug report through the following website:
33  *    http://www.sf.net/projects/lksctp
34  *
35  * Written or modified by:
36  *    Jon Grimm             <jgrimm@us.ibm.com>
37  *    La Monte H.P. Yarroll <piggy@acm.org>
38  *    Sridhar Samudrala     <sri@us.ibm.com>
39  *
40  * Any bugs reported given to us we will try to fix... any fixes shared will
41  * be incorporated into the next SCTP release.
42  */
43 
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <net/sock.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
50 
51 /* Forward declarations for internal helpers.  */
52 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 					      struct sctp_ulpevent *);
54 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 					      struct sctp_ulpevent *);
56 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
57 
58 /* 1st Level Abstractions */
59 
60 /* Initialize a ULP queue from a block of memory.  */
61 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
62 				 struct sctp_association *asoc)
63 {
64 	memset(ulpq, 0, sizeof(struct sctp_ulpq));
65 
66 	ulpq->asoc = asoc;
67 	skb_queue_head_init(&ulpq->reasm);
68 	skb_queue_head_init(&ulpq->lobby);
69 	ulpq->pd_mode  = 0;
70 	ulpq->malloced = 0;
71 
72 	return ulpq;
73 }
74 
75 
76 /* Flush the reassembly and ordering queues.  */
77 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
78 {
79 	struct sk_buff *skb;
80 	struct sctp_ulpevent *event;
81 
82 	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
83 		event = sctp_skb2event(skb);
84 		sctp_ulpevent_free(event);
85 	}
86 
87 	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
88 		event = sctp_skb2event(skb);
89 		sctp_ulpevent_free(event);
90 	}
91 
92 }
93 
94 /* Dispose of a ulpqueue.  */
95 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
96 {
97 	sctp_ulpq_flush(ulpq);
98 	if (ulpq->malloced)
99 		kfree(ulpq);
100 }
101 
102 /* Process an incoming DATA chunk.  */
103 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
104 			gfp_t gfp)
105 {
106 	struct sk_buff_head temp;
107 	sctp_data_chunk_t *hdr;
108 	struct sctp_ulpevent *event;
109 
110 	hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
111 
112 	/* Create an event from the incoming chunk. */
113 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
114 	if (!event)
115 		return -ENOMEM;
116 
117 	/* Do reassembly if needed.  */
118 	event = sctp_ulpq_reasm(ulpq, event);
119 
120 	/* Do ordering if needed.  */
121 	if ((event) && (event->msg_flags & MSG_EOR)){
122 		/* Create a temporary list to collect chunks on.  */
123 		skb_queue_head_init(&temp);
124 		__skb_queue_tail(&temp, sctp_event2skb(event));
125 
126 		event = sctp_ulpq_order(ulpq, event);
127 	}
128 
129 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
130 	 * very first SKB on the 'temp' list.
131 	 */
132 	if (event)
133 		sctp_ulpq_tail_event(ulpq, event);
134 
135 	return 0;
136 }
137 
138 /* Add a new event for propagation to the ULP.  */
139 /* Clear the partial delivery mode for this socket.   Note: This
140  * assumes that no association is currently in partial delivery mode.
141  */
142 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
143 {
144 	struct sctp_sock *sp = sctp_sk(sk);
145 
146 	if (atomic_dec_and_test(&sp->pd_mode)) {
147 		/* This means there are no other associations in PD, so
148 		 * we can go ahead and clear out the lobby in one shot
149 		 */
150 		if (!skb_queue_empty(&sp->pd_lobby)) {
151 			struct list_head *list;
152 			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
153 			list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
154 			INIT_LIST_HEAD(list);
155 			return 1;
156 		}
157 	} else {
158 		/* There are other associations in PD, so we only need to
159 		 * pull stuff out of the lobby that belongs to the
160 		 * associations that is exiting PD (all of its notifications
161 		 * are posted here).
162 		 */
163 		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
164 			struct sk_buff *skb, *tmp;
165 			struct sctp_ulpevent *event;
166 
167 			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
168 				event = sctp_skb2event(skb);
169 				if (event->asoc == asoc) {
170 					__skb_unlink(skb, &sp->pd_lobby);
171 					__skb_queue_tail(&sk->sk_receive_queue,
172 							 skb);
173 				}
174 			}
175 		}
176 	}
177 
178 	return 0;
179 }
180 
181 /* Set the pd_mode on the socket and ulpq */
182 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
183 {
184 	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
185 
186 	atomic_inc(&sp->pd_mode);
187 	ulpq->pd_mode = 1;
188 }
189 
190 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
191 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
192 {
193 	ulpq->pd_mode = 0;
194 	sctp_ulpq_reasm_drain(ulpq);
195 	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
196 }
197 
198 /* If the SKB of 'event' is on a list, it is the first such member
199  * of that list.
200  */
201 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
202 {
203 	struct sock *sk = ulpq->asoc->base.sk;
204 	struct sk_buff_head *queue, *skb_list;
205 	struct sk_buff *skb = sctp_event2skb(event);
206 	int clear_pd = 0;
207 
208 	skb_list = (struct sk_buff_head *) skb->prev;
209 
210 	/* If the socket is just going to throw this away, do not
211 	 * even try to deliver it.
212 	 */
213 	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
214 		goto out_free;
215 
216 	/* Check if the user wishes to receive this event.  */
217 	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
218 		goto out_free;
219 
220 	/* If we are in partial delivery mode, post to the lobby until
221 	 * partial delivery is cleared, unless, of course _this_ is
222 	 * the association the cause of the partial delivery.
223 	 */
224 
225 	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
226 		queue = &sk->sk_receive_queue;
227 	} else {
228 		if (ulpq->pd_mode) {
229 			/* If the association is in partial delivery, we
230 			 * need to finish delivering the partially processed
231 			 * packet before passing any other data.  This is
232 			 * because we don't truly support stream interleaving.
233 			 */
234 			if ((event->msg_flags & MSG_NOTIFICATION) ||
235 			    (SCTP_DATA_NOT_FRAG ==
236 				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
237 				queue = &sctp_sk(sk)->pd_lobby;
238 			else {
239 				clear_pd = event->msg_flags & MSG_EOR;
240 				queue = &sk->sk_receive_queue;
241 			}
242 		} else {
243 			/*
244 			 * If fragment interleave is enabled, we
245 			 * can queue this to the recieve queue instead
246 			 * of the lobby.
247 			 */
248 			if (sctp_sk(sk)->frag_interleave)
249 				queue = &sk->sk_receive_queue;
250 			else
251 				queue = &sctp_sk(sk)->pd_lobby;
252 		}
253 	}
254 
255 	/* If we are harvesting multiple skbs they will be
256 	 * collected on a list.
257 	 */
258 	if (skb_list)
259 		sctp_skb_list_tail(skb_list, queue);
260 	else
261 		__skb_queue_tail(queue, skb);
262 
263 	/* Did we just complete partial delivery and need to get
264 	 * rolling again?  Move pending data to the receive
265 	 * queue.
266 	 */
267 	if (clear_pd)
268 		sctp_ulpq_clear_pd(ulpq);
269 
270 	if (queue == &sk->sk_receive_queue)
271 		sk->sk_data_ready(sk, 0);
272 	return 1;
273 
274 out_free:
275 	if (skb_list)
276 		sctp_queue_purge_ulpevents(skb_list);
277 	else
278 		sctp_ulpevent_free(event);
279 
280 	return 0;
281 }
282 
283 /* 2nd Level Abstractions */
284 
285 /* Helper function to store chunks that need to be reassembled.  */
286 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
287 					 struct sctp_ulpevent *event)
288 {
289 	struct sk_buff *pos;
290 	struct sctp_ulpevent *cevent;
291 	__u32 tsn, ctsn;
292 
293 	tsn = event->tsn;
294 
295 	/* See if it belongs at the end. */
296 	pos = skb_peek_tail(&ulpq->reasm);
297 	if (!pos) {
298 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
299 		return;
300 	}
301 
302 	/* Short circuit just dropping it at the end. */
303 	cevent = sctp_skb2event(pos);
304 	ctsn = cevent->tsn;
305 	if (TSN_lt(ctsn, tsn)) {
306 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
307 		return;
308 	}
309 
310 	/* Find the right place in this list. We store them by TSN.  */
311 	skb_queue_walk(&ulpq->reasm, pos) {
312 		cevent = sctp_skb2event(pos);
313 		ctsn = cevent->tsn;
314 
315 		if (TSN_lt(tsn, ctsn))
316 			break;
317 	}
318 
319 	/* Insert before pos. */
320 	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
321 
322 }
323 
324 /* Helper function to return an event corresponding to the reassembled
325  * datagram.
326  * This routine creates a re-assembled skb given the first and last skb's
327  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
328  * payload was fragmented on the way and ip had to reassemble them.
329  * We add the rest of skb's to the first skb's fraglist.
330  */
331 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
332 {
333 	struct sk_buff *pos;
334 	struct sk_buff *new = NULL;
335 	struct sctp_ulpevent *event;
336 	struct sk_buff *pnext, *last;
337 	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
338 
339 	/* Store the pointer to the 2nd skb */
340 	if (f_frag == l_frag)
341 		pos = NULL;
342 	else
343 		pos = f_frag->next;
344 
345 	/* Get the last skb in the f_frag's frag_list if present. */
346 	for (last = list; list; last = list, list = list->next);
347 
348 	/* Add the list of remaining fragments to the first fragments
349 	 * frag_list.
350 	 */
351 	if (last)
352 		last->next = pos;
353 	else {
354 		if (skb_cloned(f_frag)) {
355 			/* This is a cloned skb, we can't just modify
356 			 * the frag_list.  We need a new skb to do that.
357 			 * Instead of calling skb_unshare(), we'll do it
358 			 * ourselves since we need to delay the free.
359 			 */
360 			new = skb_copy(f_frag, GFP_ATOMIC);
361 			if (!new)
362 				return NULL;	/* try again later */
363 
364 			sctp_skb_set_owner_r(new, f_frag->sk);
365 
366 			skb_shinfo(new)->frag_list = pos;
367 		} else
368 			skb_shinfo(f_frag)->frag_list = pos;
369 	}
370 
371 	/* Remove the first fragment from the reassembly queue.  */
372 	__skb_unlink(f_frag, queue);
373 
374 	/* if we did unshare, then free the old skb and re-assign */
375 	if (new) {
376 		kfree_skb(f_frag);
377 		f_frag = new;
378 	}
379 
380 	while (pos) {
381 
382 		pnext = pos->next;
383 
384 		/* Update the len and data_len fields of the first fragment. */
385 		f_frag->len += pos->len;
386 		f_frag->data_len += pos->len;
387 
388 		/* Remove the fragment from the reassembly queue.  */
389 		__skb_unlink(pos, queue);
390 
391 		/* Break if we have reached the last fragment.  */
392 		if (pos == l_frag)
393 			break;
394 		pos->next = pnext;
395 		pos = pnext;
396 	}
397 
398 	event = sctp_skb2event(f_frag);
399 	SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
400 
401 	return event;
402 }
403 
404 
405 /* Helper function to check if an incoming chunk has filled up the last
406  * missing fragment in a SCTP datagram and return the corresponding event.
407  */
408 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
409 {
410 	struct sk_buff *pos;
411 	struct sctp_ulpevent *cevent;
412 	struct sk_buff *first_frag = NULL;
413 	__u32 ctsn, next_tsn;
414 	struct sctp_ulpevent *retval = NULL;
415 	struct sk_buff *pd_first = NULL;
416 	struct sk_buff *pd_last = NULL;
417 	size_t pd_len = 0;
418 	struct sctp_association *asoc;
419 	u32 pd_point;
420 
421 	/* Initialized to 0 just to avoid compiler warning message.  Will
422 	 * never be used with this value. It is referenced only after it
423 	 * is set when we find the first fragment of a message.
424 	 */
425 	next_tsn = 0;
426 
427 	/* The chunks are held in the reasm queue sorted by TSN.
428 	 * Walk through the queue sequentially and look for a sequence of
429 	 * fragmented chunks that complete a datagram.
430 	 * 'first_frag' and next_tsn are reset when we find a chunk which
431 	 * is the first fragment of a datagram. Once these 2 fields are set
432 	 * we expect to find the remaining middle fragments and the last
433 	 * fragment in order. If not, first_frag is reset to NULL and we
434 	 * start the next pass when we find another first fragment.
435 	 *
436 	 * There is a potential to do partial delivery if user sets
437 	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
438 	 * to see if can do PD.
439 	 */
440 	skb_queue_walk(&ulpq->reasm, pos) {
441 		cevent = sctp_skb2event(pos);
442 		ctsn = cevent->tsn;
443 
444 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
445 		case SCTP_DATA_FIRST_FRAG:
446 			/* If this "FIRST_FRAG" is the first
447 			 * element in the queue, then count it towards
448 			 * possible PD.
449 			 */
450 			if (pos == ulpq->reasm.next) {
451 			    pd_first = pos;
452 			    pd_last = pos;
453 			    pd_len = pos->len;
454 			} else {
455 			    pd_first = NULL;
456 			    pd_last = NULL;
457 			    pd_len = 0;
458 			}
459 
460 			first_frag = pos;
461 			next_tsn = ctsn + 1;
462 			break;
463 
464 		case SCTP_DATA_MIDDLE_FRAG:
465 			if ((first_frag) && (ctsn == next_tsn)) {
466 				next_tsn++;
467 				if (pd_first) {
468 				    pd_last = pos;
469 				    pd_len += pos->len;
470 				}
471 			} else
472 				first_frag = NULL;
473 			break;
474 
475 		case SCTP_DATA_LAST_FRAG:
476 			if (first_frag && (ctsn == next_tsn))
477 				goto found;
478 			else
479 				first_frag = NULL;
480 			break;
481 		}
482 	}
483 
484 	asoc = ulpq->asoc;
485 	if (pd_first) {
486 		/* Make sure we can enter partial deliver.
487 		 * We can trigger partial delivery only if framgent
488 		 * interleave is set, or the socket is not already
489 		 * in  partial delivery.
490 		 */
491 		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
492 		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
493 			goto done;
494 
495 		cevent = sctp_skb2event(pd_first);
496 		pd_point = sctp_sk(asoc->base.sk)->pd_point;
497 		if (pd_point && pd_point <= pd_len) {
498 			retval = sctp_make_reassembled_event(&ulpq->reasm,
499 							     pd_first,
500 							     pd_last);
501 			if (retval)
502 				sctp_ulpq_set_pd(ulpq);
503 		}
504 	}
505 done:
506 	return retval;
507 found:
508 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
509 	if (retval)
510 		retval->msg_flags |= MSG_EOR;
511 	goto done;
512 }
513 
514 /* Retrieve the next set of fragments of a partial message. */
515 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
516 {
517 	struct sk_buff *pos, *last_frag, *first_frag;
518 	struct sctp_ulpevent *cevent;
519 	__u32 ctsn, next_tsn;
520 	int is_last;
521 	struct sctp_ulpevent *retval;
522 
523 	/* The chunks are held in the reasm queue sorted by TSN.
524 	 * Walk through the queue sequentially and look for the first
525 	 * sequence of fragmented chunks.
526 	 */
527 
528 	if (skb_queue_empty(&ulpq->reasm))
529 		return NULL;
530 
531 	last_frag = first_frag = NULL;
532 	retval = NULL;
533 	next_tsn = 0;
534 	is_last = 0;
535 
536 	skb_queue_walk(&ulpq->reasm, pos) {
537 		cevent = sctp_skb2event(pos);
538 		ctsn = cevent->tsn;
539 
540 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
541 		case SCTP_DATA_MIDDLE_FRAG:
542 			if (!first_frag) {
543 				first_frag = pos;
544 				next_tsn = ctsn + 1;
545 				last_frag = pos;
546 			} else if (next_tsn == ctsn)
547 				next_tsn++;
548 			else
549 				goto done;
550 			break;
551 		case SCTP_DATA_LAST_FRAG:
552 			if (!first_frag)
553 				first_frag = pos;
554 			else if (ctsn != next_tsn)
555 				goto done;
556 			last_frag = pos;
557 			is_last = 1;
558 			goto done;
559 		default:
560 			return NULL;
561 		}
562 	}
563 
564 	/* We have the reassembled event. There is no need to look
565 	 * further.
566 	 */
567 done:
568 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
569 	if (retval && is_last)
570 		retval->msg_flags |= MSG_EOR;
571 
572 	return retval;
573 }
574 
575 
576 /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
577  * need reassembling.
578  */
579 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
580 						struct sctp_ulpevent *event)
581 {
582 	struct sctp_ulpevent *retval = NULL;
583 
584 	/* Check if this is part of a fragmented message.  */
585 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
586 		event->msg_flags |= MSG_EOR;
587 		return event;
588 	}
589 
590 	sctp_ulpq_store_reasm(ulpq, event);
591 	if (!ulpq->pd_mode)
592 		retval = sctp_ulpq_retrieve_reassembled(ulpq);
593 	else {
594 		__u32 ctsn, ctsnap;
595 
596 		/* Do not even bother unless this is the next tsn to
597 		 * be delivered.
598 		 */
599 		ctsn = event->tsn;
600 		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
601 		if (TSN_lte(ctsn, ctsnap))
602 			retval = sctp_ulpq_retrieve_partial(ulpq);
603 	}
604 
605 	return retval;
606 }
607 
608 /* Retrieve the first part (sequential fragments) for partial delivery.  */
609 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
610 {
611 	struct sk_buff *pos, *last_frag, *first_frag;
612 	struct sctp_ulpevent *cevent;
613 	__u32 ctsn, next_tsn;
614 	struct sctp_ulpevent *retval;
615 
616 	/* The chunks are held in the reasm queue sorted by TSN.
617 	 * Walk through the queue sequentially and look for a sequence of
618 	 * fragmented chunks that start a datagram.
619 	 */
620 
621 	if (skb_queue_empty(&ulpq->reasm))
622 		return NULL;
623 
624 	last_frag = first_frag = NULL;
625 	retval = NULL;
626 	next_tsn = 0;
627 
628 	skb_queue_walk(&ulpq->reasm, pos) {
629 		cevent = sctp_skb2event(pos);
630 		ctsn = cevent->tsn;
631 
632 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
633 		case SCTP_DATA_FIRST_FRAG:
634 			if (!first_frag) {
635 				first_frag = pos;
636 				next_tsn = ctsn + 1;
637 				last_frag = pos;
638 			} else
639 				goto done;
640 			break;
641 
642 		case SCTP_DATA_MIDDLE_FRAG:
643 			if (!first_frag)
644 				return NULL;
645 			if (ctsn == next_tsn) {
646 				next_tsn++;
647 				last_frag = pos;
648 			} else
649 				goto done;
650 			break;
651 		default:
652 			return NULL;
653 		}
654 	}
655 
656 	/* We have the reassembled event. There is no need to look
657 	 * further.
658 	 */
659 done:
660 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
661 	return retval;
662 }
663 
664 /*
665  * Flush out stale fragments from the reassembly queue when processing
666  * a Forward TSN.
667  *
668  * RFC 3758, Section 3.6
669  *
670  * After receiving and processing a FORWARD TSN, the data receiver MUST
671  * take cautions in updating its re-assembly queue.  The receiver MUST
672  * remove any partially reassembled message, which is still missing one
673  * or more TSNs earlier than or equal to the new cumulative TSN point.
674  * In the event that the receiver has invoked the partial delivery API,
675  * a notification SHOULD also be generated to inform the upper layer API
676  * that the message being partially delivered will NOT be completed.
677  */
678 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
679 {
680 	struct sk_buff *pos, *tmp;
681 	struct sctp_ulpevent *event;
682 	__u32 tsn;
683 
684 	if (skb_queue_empty(&ulpq->reasm))
685 		return;
686 
687 	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
688 		event = sctp_skb2event(pos);
689 		tsn = event->tsn;
690 
691 		/* Since the entire message must be abandoned by the
692 		 * sender (item A3 in Section 3.5, RFC 3758), we can
693 		 * free all fragments on the list that are less then
694 		 * or equal to ctsn_point
695 		 */
696 		if (TSN_lte(tsn, fwd_tsn)) {
697 			__skb_unlink(pos, &ulpq->reasm);
698 			sctp_ulpevent_free(event);
699 		} else
700 			break;
701 	}
702 }
703 
704 /*
705  * Drain the reassembly queue.  If we just cleared parted delivery, it
706  * is possible that the reassembly queue will contain already reassembled
707  * messages.  Retrieve any such messages and give them to the user.
708  */
709 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
710 {
711 	struct sctp_ulpevent *event = NULL;
712 	struct sk_buff_head temp;
713 
714 	if (skb_queue_empty(&ulpq->reasm))
715 		return;
716 
717 	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
718 		/* Do ordering if needed.  */
719 		if ((event) && (event->msg_flags & MSG_EOR)){
720 			skb_queue_head_init(&temp);
721 			__skb_queue_tail(&temp, sctp_event2skb(event));
722 
723 			event = sctp_ulpq_order(ulpq, event);
724 		}
725 
726 		/* Send event to the ULP.  'event' is the
727 		 * sctp_ulpevent for  very first SKB on the  temp' list.
728 		 */
729 		if (event)
730 			sctp_ulpq_tail_event(ulpq, event);
731 	}
732 }
733 
734 
735 /* Helper function to gather skbs that have possibly become
736  * ordered by an an incoming chunk.
737  */
738 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
739 					      struct sctp_ulpevent *event)
740 {
741 	struct sk_buff_head *event_list;
742 	struct sk_buff *pos, *tmp;
743 	struct sctp_ulpevent *cevent;
744 	struct sctp_stream *in;
745 	__u16 sid, csid;
746 	__u16 ssn, cssn;
747 
748 	sid = event->stream;
749 	ssn = event->ssn;
750 	in  = &ulpq->asoc->ssnmap->in;
751 
752 	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
753 
754 	/* We are holding the chunks by stream, by SSN.  */
755 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
756 		cevent = (struct sctp_ulpevent *) pos->cb;
757 		csid = cevent->stream;
758 		cssn = cevent->ssn;
759 
760 		/* Have we gone too far?  */
761 		if (csid > sid)
762 			break;
763 
764 		/* Have we not gone far enough?  */
765 		if (csid < sid)
766 			continue;
767 
768 		if (cssn != sctp_ssn_peek(in, sid))
769 			break;
770 
771 		/* Found it, so mark in the ssnmap. */
772 		sctp_ssn_next(in, sid);
773 
774 		__skb_unlink(pos, &ulpq->lobby);
775 
776 		/* Attach all gathered skbs to the event.  */
777 		__skb_queue_tail(event_list, pos);
778 	}
779 }
780 
781 /* Helper function to store chunks needing ordering.  */
782 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
783 					   struct sctp_ulpevent *event)
784 {
785 	struct sk_buff *pos;
786 	struct sctp_ulpevent *cevent;
787 	__u16 sid, csid;
788 	__u16 ssn, cssn;
789 
790 	pos = skb_peek_tail(&ulpq->lobby);
791 	if (!pos) {
792 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
793 		return;
794 	}
795 
796 	sid = event->stream;
797 	ssn = event->ssn;
798 
799 	cevent = (struct sctp_ulpevent *) pos->cb;
800 	csid = cevent->stream;
801 	cssn = cevent->ssn;
802 	if (sid > csid) {
803 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
804 		return;
805 	}
806 
807 	if ((sid == csid) && SSN_lt(cssn, ssn)) {
808 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
809 		return;
810 	}
811 
812 	/* Find the right place in this list.  We store them by
813 	 * stream ID and then by SSN.
814 	 */
815 	skb_queue_walk(&ulpq->lobby, pos) {
816 		cevent = (struct sctp_ulpevent *) pos->cb;
817 		csid = cevent->stream;
818 		cssn = cevent->ssn;
819 
820 		if (csid > sid)
821 			break;
822 		if (csid == sid && SSN_lt(ssn, cssn))
823 			break;
824 	}
825 
826 
827 	/* Insert before pos. */
828 	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
829 
830 }
831 
832 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
833 					     struct sctp_ulpevent *event)
834 {
835 	__u16 sid, ssn;
836 	struct sctp_stream *in;
837 
838 	/* Check if this message needs ordering.  */
839 	if (SCTP_DATA_UNORDERED & event->msg_flags)
840 		return event;
841 
842 	/* Note: The stream ID must be verified before this routine.  */
843 	sid = event->stream;
844 	ssn = event->ssn;
845 	in  = &ulpq->asoc->ssnmap->in;
846 
847 	/* Is this the expected SSN for this stream ID?  */
848 	if (ssn != sctp_ssn_peek(in, sid)) {
849 		/* We've received something out of order, so find where it
850 		 * needs to be placed.  We order by stream and then by SSN.
851 		 */
852 		sctp_ulpq_store_ordered(ulpq, event);
853 		return NULL;
854 	}
855 
856 	/* Mark that the next chunk has been found.  */
857 	sctp_ssn_next(in, sid);
858 
859 	/* Go find any other chunks that were waiting for
860 	 * ordering.
861 	 */
862 	sctp_ulpq_retrieve_ordered(ulpq, event);
863 
864 	return event;
865 }
866 
867 /* Helper function to gather skbs that have possibly become
868  * ordered by forward tsn skipping their dependencies.
869  */
870 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
871 {
872 	struct sk_buff *pos, *tmp;
873 	struct sctp_ulpevent *cevent;
874 	struct sctp_ulpevent *event;
875 	struct sctp_stream *in;
876 	struct sk_buff_head temp;
877 	struct sk_buff_head *lobby = &ulpq->lobby;
878 	__u16 csid, cssn;
879 
880 	in  = &ulpq->asoc->ssnmap->in;
881 
882 	/* We are holding the chunks by stream, by SSN.  */
883 	skb_queue_head_init(&temp);
884 	event = NULL;
885 	sctp_skb_for_each(pos, lobby, tmp) {
886 		cevent = (struct sctp_ulpevent *) pos->cb;
887 		csid = cevent->stream;
888 		cssn = cevent->ssn;
889 
890 		/* Have we gone too far?  */
891 		if (csid > sid)
892 			break;
893 
894 		/* Have we not gone far enough?  */
895 		if (csid < sid)
896 			continue;
897 
898 		/* see if this ssn has been marked by skipping */
899 		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
900 			break;
901 
902 		__skb_unlink(pos, lobby);
903 		if (!event)
904 			/* Create a temporary list to collect chunks on.  */
905 			event = sctp_skb2event(pos);
906 
907 		/* Attach all gathered skbs to the event.  */
908 		__skb_queue_tail(&temp, pos);
909 	}
910 
911 	/* If we didn't reap any data, see if the next expected SSN
912 	 * is next on the queue and if so, use that.
913 	 */
914 	if (event == NULL && pos != (struct sk_buff *)lobby) {
915 		cevent = (struct sctp_ulpevent *) pos->cb;
916 		csid = cevent->stream;
917 		cssn = cevent->ssn;
918 
919 		if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
920 			sctp_ssn_next(in, csid);
921 			__skb_unlink(pos, lobby);
922 			__skb_queue_tail(&temp, pos);
923 			event = sctp_skb2event(pos);
924 		}
925 	}
926 
927 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
928 	 * very first SKB on the 'temp' list.
929 	 */
930 	if (event) {
931 		/* see if we have more ordered that we can deliver */
932 		sctp_ulpq_retrieve_ordered(ulpq, event);
933 		sctp_ulpq_tail_event(ulpq, event);
934 	}
935 }
936 
937 /* Skip over an SSN. This is used during the processing of
938  * Forwared TSN chunk to skip over the abandoned ordered data
939  */
940 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
941 {
942 	struct sctp_stream *in;
943 
944 	/* Note: The stream ID must be verified before this routine.  */
945 	in  = &ulpq->asoc->ssnmap->in;
946 
947 	/* Is this an old SSN?  If so ignore. */
948 	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
949 		return;
950 
951 	/* Mark that we are no longer expecting this SSN or lower. */
952 	sctp_ssn_skip(in, sid, ssn);
953 
954 	/* Go find any other chunks that were waiting for
955 	 * ordering and deliver them if needed.
956 	 */
957 	sctp_ulpq_reap_ordered(ulpq, sid);
958 	return;
959 }
960 
961 static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
962 		struct sk_buff_head *list, __u16 needed)
963 {
964 	__u16 freed = 0;
965 	__u32 tsn;
966 	struct sk_buff *skb;
967 	struct sctp_ulpevent *event;
968 	struct sctp_tsnmap *tsnmap;
969 
970 	tsnmap = &ulpq->asoc->peer.tsn_map;
971 
972 	while ((skb = __skb_dequeue_tail(list)) != NULL) {
973 		freed += skb_headlen(skb);
974 		event = sctp_skb2event(skb);
975 		tsn = event->tsn;
976 
977 		sctp_ulpevent_free(event);
978 		sctp_tsnmap_renege(tsnmap, tsn);
979 		if (freed >= needed)
980 			return freed;
981 	}
982 
983 	return freed;
984 }
985 
986 /* Renege 'needed' bytes from the ordering queue. */
987 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
988 {
989 	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
990 }
991 
992 /* Renege 'needed' bytes from the reassembly queue. */
993 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
994 {
995 	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
996 }
997 
998 /* Partial deliver the first message as there is pressure on rwnd. */
999 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1000 				struct sctp_chunk *chunk,
1001 				gfp_t gfp)
1002 {
1003 	struct sctp_ulpevent *event;
1004 	struct sctp_association *asoc;
1005 	struct sctp_sock *sp;
1006 
1007 	asoc = ulpq->asoc;
1008 	sp = sctp_sk(asoc->base.sk);
1009 
1010 	/* If the association is already in Partial Delivery mode
1011 	 * we have noting to do.
1012 	 */
1013 	if (ulpq->pd_mode)
1014 		return;
1015 
1016 	/* If the user enabled fragment interleave socket option,
1017 	 * multiple associations can enter partial delivery.
1018 	 * Otherwise, we can only enter partial delivery if the
1019 	 * socket is not in partial deliver mode.
1020 	 */
1021 	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1022 		/* Is partial delivery possible?  */
1023 		event = sctp_ulpq_retrieve_first(ulpq);
1024 		/* Send event to the ULP.   */
1025 		if (event) {
1026 			sctp_ulpq_tail_event(ulpq, event);
1027 			sctp_ulpq_set_pd(ulpq);
1028 			return;
1029 		}
1030 	}
1031 }
1032 
1033 /* Renege some packets to make room for an incoming chunk.  */
1034 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1035 		      gfp_t gfp)
1036 {
1037 	struct sctp_association *asoc;
1038 	__u16 needed, freed;
1039 
1040 	asoc = ulpq->asoc;
1041 
1042 	if (chunk) {
1043 		needed = ntohs(chunk->chunk_hdr->length);
1044 		needed -= sizeof(sctp_data_chunk_t);
1045 	} else
1046 		needed = SCTP_DEFAULT_MAXWINDOW;
1047 
1048 	freed = 0;
1049 
1050 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1051 		freed = sctp_ulpq_renege_order(ulpq, needed);
1052 		if (freed < needed) {
1053 			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1054 		}
1055 	}
1056 	/* If able to free enough room, accept this chunk. */
1057 	if (chunk && (freed >= needed)) {
1058 		__u32 tsn;
1059 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1060 		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
1061 		sctp_ulpq_tail_data(ulpq, chunk, gfp);
1062 
1063 		sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
1064 	}
1065 
1066 	sk_mem_reclaim(asoc->base.sk);
1067 	return;
1068 }
1069 
1070 
1071 
1072 /* Notify the application if an association is aborted and in
1073  * partial delivery mode.  Send up any pending received messages.
1074  */
1075 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1076 {
1077 	struct sctp_ulpevent *ev = NULL;
1078 	struct sock *sk;
1079 
1080 	if (!ulpq->pd_mode)
1081 		return;
1082 
1083 	sk = ulpq->asoc->base.sk;
1084 	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1085 				       &sctp_sk(sk)->subscribe))
1086 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1087 					      SCTP_PARTIAL_DELIVERY_ABORTED,
1088 					      gfp);
1089 	if (ev)
1090 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1091 
1092 	/* If there is data waiting, send it up the socket now. */
1093 	if (sctp_ulpq_clear_pd(ulpq) || ev)
1094 		sk->sk_data_ready(sk, 0);
1095 }
1096