xref: /openbmc/linux/net/sctp/ulpqueue.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /* SCTP kernel reference Implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 Nokia, Inc.
7  * Copyright (c) 2001 La Monte H.P. Yarroll
8  *
9  * This abstraction carries sctp events to the ULP (sockets).
10  *
11  * The SCTP reference implementation is free software;
12  * you can redistribute it and/or modify it under the terms of
13  * the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * The SCTP reference implementation is distributed in the hope that it
18  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19  *                 ************************
20  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21  * See the GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with GNU CC; see the file COPYING.  If not, write to
25  * the Free Software Foundation, 59 Temple Place - Suite 330,
26  * Boston, MA 02111-1307, USA.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <lksctp-developers@lists.sourceforge.net>
31  *
32  * Or submit a bug report through the following website:
33  *    http://www.sf.net/projects/lksctp
34  *
35  * Written or modified by:
36  *    Jon Grimm             <jgrimm@us.ibm.com>
37  *    La Monte H.P. Yarroll <piggy@acm.org>
38  *    Sridhar Samudrala     <sri@us.ibm.com>
39  *
40  * Any bugs reported given to us we will try to fix... any fixes shared will
41  * be incorporated into the next SCTP release.
42  */
43 
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <net/sock.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
50 
51 /* Forward declarations for internal helpers.  */
52 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 					      struct sctp_ulpevent *);
54 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 					      struct sctp_ulpevent *);
56 
57 /* 1st Level Abstractions */
58 
59 /* Initialize a ULP queue from a block of memory.  */
60 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
61 				 struct sctp_association *asoc)
62 {
63 	memset(ulpq, 0, sizeof(struct sctp_ulpq));
64 
65 	ulpq->asoc = asoc;
66 	skb_queue_head_init(&ulpq->reasm);
67 	skb_queue_head_init(&ulpq->lobby);
68 	ulpq->pd_mode  = 0;
69 	ulpq->malloced = 0;
70 
71 	return ulpq;
72 }
73 
74 
75 /* Flush the reassembly and ordering queues.  */
76 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
77 {
78 	struct sk_buff *skb;
79 	struct sctp_ulpevent *event;
80 
81 	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
82 		event = sctp_skb2event(skb);
83 		sctp_ulpevent_free(event);
84 	}
85 
86 	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
87 		event = sctp_skb2event(skb);
88 		sctp_ulpevent_free(event);
89 	}
90 
91 }
92 
93 /* Dispose of a ulpqueue.  */
94 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
95 {
96 	sctp_ulpq_flush(ulpq);
97 	if (ulpq->malloced)
98 		kfree(ulpq);
99 }
100 
101 /* Process an incoming DATA chunk.  */
102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
103 			gfp_t gfp)
104 {
105 	struct sk_buff_head temp;
106 	sctp_data_chunk_t *hdr;
107 	struct sctp_ulpevent *event;
108 
109 	hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
110 
111 	/* Create an event from the incoming chunk. */
112 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
113 	if (!event)
114 		return -ENOMEM;
115 
116 	/* Do reassembly if needed.  */
117 	event = sctp_ulpq_reasm(ulpq, event);
118 
119 	/* Do ordering if needed.  */
120 	if ((event) && (event->msg_flags & MSG_EOR)){
121 		/* Create a temporary list to collect chunks on.  */
122 		skb_queue_head_init(&temp);
123 		__skb_queue_tail(&temp, sctp_event2skb(event));
124 
125 		event = sctp_ulpq_order(ulpq, event);
126 	}
127 
128 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
129 	 * very first SKB on the 'temp' list.
130 	 */
131 	if (event)
132 		sctp_ulpq_tail_event(ulpq, event);
133 
134 	return 0;
135 }
136 
137 /* Add a new event for propagation to the ULP.  */
138 /* Clear the partial delivery mode for this socket.   Note: This
139  * assumes that no association is currently in partial delivery mode.
140  */
141 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
142 {
143 	struct sctp_sock *sp = sctp_sk(sk);
144 
145 	if (atomic_dec_and_test(&sp->pd_mode)) {
146 		/* This means there are no other associations in PD, so
147 		 * we can go ahead and clear out the lobby in one shot
148 		 */
149 		if (!skb_queue_empty(&sp->pd_lobby)) {
150 			struct list_head *list;
151 			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
152 			list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
153 			INIT_LIST_HEAD(list);
154 			return 1;
155 		}
156 	} else {
157 		/* There are other associations in PD, so we only need to
158 		 * pull stuff out of the lobby that belongs to the
159 		 * associations that is exiting PD (all of its notifications
160 		 * are posted here).
161 		 */
162 		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
163 			struct sk_buff *skb, *tmp;
164 			struct sctp_ulpevent *event;
165 
166 			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
167 				event = sctp_skb2event(skb);
168 				if (event->asoc == asoc) {
169 					__skb_unlink(skb, &sp->pd_lobby);
170 					__skb_queue_tail(&sk->sk_receive_queue,
171 							 skb);
172 				}
173 			}
174 		}
175 	}
176 
177 	return 0;
178 }
179 
180 /* Set the pd_mode on the socket and ulpq */
181 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
182 {
183 	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
184 
185 	atomic_inc(&sp->pd_mode);
186 	ulpq->pd_mode = 1;
187 }
188 
189 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
190 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
191 {
192 	ulpq->pd_mode = 0;
193 	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
194 }
195 
196 /* If the SKB of 'event' is on a list, it is the first such member
197  * of that list.
198  */
199 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
200 {
201 	struct sock *sk = ulpq->asoc->base.sk;
202 	struct sk_buff_head *queue, *skb_list;
203 	struct sk_buff *skb = sctp_event2skb(event);
204 	int clear_pd = 0;
205 
206 	skb_list = (struct sk_buff_head *) skb->prev;
207 
208 	/* If the socket is just going to throw this away, do not
209 	 * even try to deliver it.
210 	 */
211 	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
212 		goto out_free;
213 
214 	/* Check if the user wishes to receive this event.  */
215 	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
216 		goto out_free;
217 
218 	/* If we are in partial delivery mode, post to the lobby until
219 	 * partial delivery is cleared, unless, of course _this_ is
220 	 * the association the cause of the partial delivery.
221 	 */
222 
223 	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
224 		queue = &sk->sk_receive_queue;
225 	} else {
226 		if (ulpq->pd_mode) {
227 			/* If the association is in partial delivery, we
228 			 * need to finish delivering the partially processed
229 			 * packet before passing any other data.  This is
230 			 * because we don't truly support stream interleaving.
231 			 */
232 			if ((event->msg_flags & MSG_NOTIFICATION) ||
233 			    (SCTP_DATA_NOT_FRAG ==
234 				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
235 				queue = &sctp_sk(sk)->pd_lobby;
236 			else {
237 				clear_pd = event->msg_flags & MSG_EOR;
238 				queue = &sk->sk_receive_queue;
239 			}
240 		} else {
241 			/*
242 			 * If fragment interleave is enabled, we
243 			 * can queue this to the recieve queue instead
244 			 * of the lobby.
245 			 */
246 			if (sctp_sk(sk)->frag_interleave)
247 				queue = &sk->sk_receive_queue;
248 			else
249 				queue = &sctp_sk(sk)->pd_lobby;
250 		}
251 	}
252 
253 	/* If we are harvesting multiple skbs they will be
254 	 * collected on a list.
255 	 */
256 	if (skb_list)
257 		sctp_skb_list_tail(skb_list, queue);
258 	else
259 		__skb_queue_tail(queue, skb);
260 
261 	/* Did we just complete partial delivery and need to get
262 	 * rolling again?  Move pending data to the receive
263 	 * queue.
264 	 */
265 	if (clear_pd)
266 		sctp_ulpq_clear_pd(ulpq);
267 
268 	if (queue == &sk->sk_receive_queue)
269 		sk->sk_data_ready(sk, 0);
270 	return 1;
271 
272 out_free:
273 	if (skb_list)
274 		sctp_queue_purge_ulpevents(skb_list);
275 	else
276 		sctp_ulpevent_free(event);
277 
278 	return 0;
279 }
280 
281 /* 2nd Level Abstractions */
282 
283 /* Helper function to store chunks that need to be reassembled.  */
284 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
285 					 struct sctp_ulpevent *event)
286 {
287 	struct sk_buff *pos;
288 	struct sctp_ulpevent *cevent;
289 	__u32 tsn, ctsn;
290 
291 	tsn = event->tsn;
292 
293 	/* See if it belongs at the end. */
294 	pos = skb_peek_tail(&ulpq->reasm);
295 	if (!pos) {
296 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
297 		return;
298 	}
299 
300 	/* Short circuit just dropping it at the end. */
301 	cevent = sctp_skb2event(pos);
302 	ctsn = cevent->tsn;
303 	if (TSN_lt(ctsn, tsn)) {
304 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
305 		return;
306 	}
307 
308 	/* Find the right place in this list. We store them by TSN.  */
309 	skb_queue_walk(&ulpq->reasm, pos) {
310 		cevent = sctp_skb2event(pos);
311 		ctsn = cevent->tsn;
312 
313 		if (TSN_lt(tsn, ctsn))
314 			break;
315 	}
316 
317 	/* Insert before pos. */
318 	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
319 
320 }
321 
322 /* Helper function to return an event corresponding to the reassembled
323  * datagram.
324  * This routine creates a re-assembled skb given the first and last skb's
325  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
326  * payload was fragmented on the way and ip had to reassemble them.
327  * We add the rest of skb's to the first skb's fraglist.
328  */
329 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
330 {
331 	struct sk_buff *pos;
332 	struct sk_buff *new = NULL;
333 	struct sctp_ulpevent *event;
334 	struct sk_buff *pnext, *last;
335 	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
336 
337 	/* Store the pointer to the 2nd skb */
338 	if (f_frag == l_frag)
339 		pos = NULL;
340 	else
341 		pos = f_frag->next;
342 
343 	/* Get the last skb in the f_frag's frag_list if present. */
344 	for (last = list; list; last = list, list = list->next);
345 
346 	/* Add the list of remaining fragments to the first fragments
347 	 * frag_list.
348 	 */
349 	if (last)
350 		last->next = pos;
351 	else {
352 		if (skb_cloned(f_frag)) {
353 			/* This is a cloned skb, we can't just modify
354 			 * the frag_list.  We need a new skb to do that.
355 			 * Instead of calling skb_unshare(), we'll do it
356 			 * ourselves since we need to delay the free.
357 			 */
358 			new = skb_copy(f_frag, GFP_ATOMIC);
359 			if (!new)
360 				return NULL;	/* try again later */
361 
362 			sctp_skb_set_owner_r(new, f_frag->sk);
363 
364 			skb_shinfo(new)->frag_list = pos;
365 		} else
366 			skb_shinfo(f_frag)->frag_list = pos;
367 	}
368 
369 	/* Remove the first fragment from the reassembly queue.  */
370 	__skb_unlink(f_frag, queue);
371 
372 	/* if we did unshare, then free the old skb and re-assign */
373 	if (new) {
374 		kfree_skb(f_frag);
375 		f_frag = new;
376 	}
377 
378 	while (pos) {
379 
380 		pnext = pos->next;
381 
382 		/* Update the len and data_len fields of the first fragment. */
383 		f_frag->len += pos->len;
384 		f_frag->data_len += pos->len;
385 
386 		/* Remove the fragment from the reassembly queue.  */
387 		__skb_unlink(pos, queue);
388 
389 		/* Break if we have reached the last fragment.  */
390 		if (pos == l_frag)
391 			break;
392 		pos->next = pnext;
393 		pos = pnext;
394 	}
395 
396 	event = sctp_skb2event(f_frag);
397 	SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
398 
399 	return event;
400 }
401 
402 
403 /* Helper function to check if an incoming chunk has filled up the last
404  * missing fragment in a SCTP datagram and return the corresponding event.
405  */
406 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
407 {
408 	struct sk_buff *pos;
409 	struct sctp_ulpevent *cevent;
410 	struct sk_buff *first_frag = NULL;
411 	__u32 ctsn, next_tsn;
412 	struct sctp_ulpevent *retval = NULL;
413 	struct sk_buff *pd_first = NULL;
414 	struct sk_buff *pd_last = NULL;
415 	size_t pd_len = 0;
416 	struct sctp_association *asoc;
417 	u32 pd_point;
418 
419 	/* Initialized to 0 just to avoid compiler warning message.  Will
420 	 * never be used with this value. It is referenced only after it
421 	 * is set when we find the first fragment of a message.
422 	 */
423 	next_tsn = 0;
424 
425 	/* The chunks are held in the reasm queue sorted by TSN.
426 	 * Walk through the queue sequentially and look for a sequence of
427 	 * fragmented chunks that complete a datagram.
428 	 * 'first_frag' and next_tsn are reset when we find a chunk which
429 	 * is the first fragment of a datagram. Once these 2 fields are set
430 	 * we expect to find the remaining middle fragments and the last
431 	 * fragment in order. If not, first_frag is reset to NULL and we
432 	 * start the next pass when we find another first fragment.
433 	 *
434 	 * There is a potential to do partial delivery if user sets
435 	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
436 	 * to see if can do PD.
437 	 */
438 	skb_queue_walk(&ulpq->reasm, pos) {
439 		cevent = sctp_skb2event(pos);
440 		ctsn = cevent->tsn;
441 
442 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
443 		case SCTP_DATA_FIRST_FRAG:
444 			/* If this "FIRST_FRAG" is the first
445 			 * element in the queue, then count it towards
446 			 * possible PD.
447 			 */
448 			if (pos == ulpq->reasm.next) {
449 			    pd_first = pos;
450 			    pd_last = pos;
451 			    pd_len = pos->len;
452 			} else {
453 			    pd_first = NULL;
454 			    pd_last = NULL;
455 			    pd_len = 0;
456 			}
457 
458 			first_frag = pos;
459 			next_tsn = ctsn + 1;
460 			break;
461 
462 		case SCTP_DATA_MIDDLE_FRAG:
463 			if ((first_frag) && (ctsn == next_tsn)) {
464 				next_tsn++;
465 				if (pd_first) {
466 				    pd_last = pos;
467 				    pd_len += pos->len;
468 				}
469 			} else
470 				first_frag = NULL;
471 			break;
472 
473 		case SCTP_DATA_LAST_FRAG:
474 			if (first_frag && (ctsn == next_tsn))
475 				goto found;
476 			else
477 				first_frag = NULL;
478 			break;
479 		}
480 	}
481 
482 	asoc = ulpq->asoc;
483 	if (pd_first) {
484 		/* Make sure we can enter partial deliver.
485 		 * We can trigger partial delivery only if framgent
486 		 * interleave is set, or the socket is not already
487 		 * in  partial delivery.
488 		 */
489 		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
490 		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
491 			goto done;
492 
493 		cevent = sctp_skb2event(pd_first);
494 		pd_point = sctp_sk(asoc->base.sk)->pd_point;
495 		if (pd_point && pd_point <= pd_len) {
496 			retval = sctp_make_reassembled_event(&ulpq->reasm,
497 							     pd_first,
498 							     pd_last);
499 			if (retval)
500 				sctp_ulpq_set_pd(ulpq);
501 		}
502 	}
503 done:
504 	return retval;
505 found:
506 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
507 	if (retval)
508 		retval->msg_flags |= MSG_EOR;
509 	goto done;
510 }
511 
512 /* Retrieve the next set of fragments of a partial message. */
513 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
514 {
515 	struct sk_buff *pos, *last_frag, *first_frag;
516 	struct sctp_ulpevent *cevent;
517 	__u32 ctsn, next_tsn;
518 	int is_last;
519 	struct sctp_ulpevent *retval;
520 
521 	/* The chunks are held in the reasm queue sorted by TSN.
522 	 * Walk through the queue sequentially and look for the first
523 	 * sequence of fragmented chunks.
524 	 */
525 
526 	if (skb_queue_empty(&ulpq->reasm))
527 		return NULL;
528 
529 	last_frag = first_frag = NULL;
530 	retval = NULL;
531 	next_tsn = 0;
532 	is_last = 0;
533 
534 	skb_queue_walk(&ulpq->reasm, pos) {
535 		cevent = sctp_skb2event(pos);
536 		ctsn = cevent->tsn;
537 
538 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
539 		case SCTP_DATA_MIDDLE_FRAG:
540 			if (!first_frag) {
541 				first_frag = pos;
542 				next_tsn = ctsn + 1;
543 				last_frag = pos;
544 			} else if (next_tsn == ctsn)
545 				next_tsn++;
546 			else
547 				goto done;
548 			break;
549 		case SCTP_DATA_LAST_FRAG:
550 			if (!first_frag)
551 				first_frag = pos;
552 			else if (ctsn != next_tsn)
553 				goto done;
554 			last_frag = pos;
555 			is_last = 1;
556 			goto done;
557 		default:
558 			return NULL;
559 		}
560 	}
561 
562 	/* We have the reassembled event. There is no need to look
563 	 * further.
564 	 */
565 done:
566 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
567 	if (retval && is_last)
568 		retval->msg_flags |= MSG_EOR;
569 
570 	return retval;
571 }
572 
573 
574 /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
575  * need reassembling.
576  */
577 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
578 						struct sctp_ulpevent *event)
579 {
580 	struct sctp_ulpevent *retval = NULL;
581 
582 	/* Check if this is part of a fragmented message.  */
583 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
584 		event->msg_flags |= MSG_EOR;
585 		return event;
586 	}
587 
588 	sctp_ulpq_store_reasm(ulpq, event);
589 	if (!ulpq->pd_mode)
590 		retval = sctp_ulpq_retrieve_reassembled(ulpq);
591 	else {
592 		__u32 ctsn, ctsnap;
593 
594 		/* Do not even bother unless this is the next tsn to
595 		 * be delivered.
596 		 */
597 		ctsn = event->tsn;
598 		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
599 		if (TSN_lte(ctsn, ctsnap))
600 			retval = sctp_ulpq_retrieve_partial(ulpq);
601 	}
602 
603 	return retval;
604 }
605 
606 /* Retrieve the first part (sequential fragments) for partial delivery.  */
607 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
608 {
609 	struct sk_buff *pos, *last_frag, *first_frag;
610 	struct sctp_ulpevent *cevent;
611 	__u32 ctsn, next_tsn;
612 	struct sctp_ulpevent *retval;
613 
614 	/* The chunks are held in the reasm queue sorted by TSN.
615 	 * Walk through the queue sequentially and look for a sequence of
616 	 * fragmented chunks that start a datagram.
617 	 */
618 
619 	if (skb_queue_empty(&ulpq->reasm))
620 		return NULL;
621 
622 	last_frag = first_frag = NULL;
623 	retval = NULL;
624 	next_tsn = 0;
625 
626 	skb_queue_walk(&ulpq->reasm, pos) {
627 		cevent = sctp_skb2event(pos);
628 		ctsn = cevent->tsn;
629 
630 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
631 		case SCTP_DATA_FIRST_FRAG:
632 			if (!first_frag) {
633 				first_frag = pos;
634 				next_tsn = ctsn + 1;
635 				last_frag = pos;
636 			} else
637 				goto done;
638 			break;
639 
640 		case SCTP_DATA_MIDDLE_FRAG:
641 			if (!first_frag)
642 				return NULL;
643 			if (ctsn == next_tsn) {
644 				next_tsn++;
645 				last_frag = pos;
646 			} else
647 				goto done;
648 			break;
649 		default:
650 			return NULL;
651 		}
652 	}
653 
654 	/* We have the reassembled event. There is no need to look
655 	 * further.
656 	 */
657 done:
658 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
659 	return retval;
660 }
661 
662 /*
663  * Flush out stale fragments from the reassembly queue when processing
664  * a Forward TSN.
665  *
666  * RFC 3758, Section 3.6
667  *
668  * After receiving and processing a FORWARD TSN, the data receiver MUST
669  * take cautions in updating its re-assembly queue.  The receiver MUST
670  * remove any partially reassembled message, which is still missing one
671  * or more TSNs earlier than or equal to the new cumulative TSN point.
672  * In the event that the receiver has invoked the partial delivery API,
673  * a notification SHOULD also be generated to inform the upper layer API
674  * that the message being partially delivered will NOT be completed.
675  */
676 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
677 {
678 	struct sk_buff *pos, *tmp;
679 	struct sctp_ulpevent *event;
680 	__u32 tsn;
681 
682 	if (skb_queue_empty(&ulpq->reasm))
683 		return;
684 
685 	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
686 		event = sctp_skb2event(pos);
687 		tsn = event->tsn;
688 
689 		/* Since the entire message must be abandoned by the
690 		 * sender (item A3 in Section 3.5, RFC 3758), we can
691 		 * free all fragments on the list that are less then
692 		 * or equal to ctsn_point
693 		 */
694 		if (TSN_lte(tsn, fwd_tsn)) {
695 			__skb_unlink(pos, &ulpq->reasm);
696 			sctp_ulpevent_free(event);
697 		} else
698 			break;
699 	}
700 }
701 
702 /* Helper function to gather skbs that have possibly become
703  * ordered by an an incoming chunk.
704  */
705 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
706 					      struct sctp_ulpevent *event)
707 {
708 	struct sk_buff_head *event_list;
709 	struct sk_buff *pos, *tmp;
710 	struct sctp_ulpevent *cevent;
711 	struct sctp_stream *in;
712 	__u16 sid, csid;
713 	__u16 ssn, cssn;
714 
715 	sid = event->stream;
716 	ssn = event->ssn;
717 	in  = &ulpq->asoc->ssnmap->in;
718 
719 	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
720 
721 	/* We are holding the chunks by stream, by SSN.  */
722 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
723 		cevent = (struct sctp_ulpevent *) pos->cb;
724 		csid = cevent->stream;
725 		cssn = cevent->ssn;
726 
727 		/* Have we gone too far?  */
728 		if (csid > sid)
729 			break;
730 
731 		/* Have we not gone far enough?  */
732 		if (csid < sid)
733 			continue;
734 
735 		if (cssn != sctp_ssn_peek(in, sid))
736 			break;
737 
738 		/* Found it, so mark in the ssnmap. */
739 		sctp_ssn_next(in, sid);
740 
741 		__skb_unlink(pos, &ulpq->lobby);
742 
743 		/* Attach all gathered skbs to the event.  */
744 		__skb_queue_tail(event_list, pos);
745 	}
746 }
747 
748 /* Helper function to store chunks needing ordering.  */
749 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
750 					   struct sctp_ulpevent *event)
751 {
752 	struct sk_buff *pos;
753 	struct sctp_ulpevent *cevent;
754 	__u16 sid, csid;
755 	__u16 ssn, cssn;
756 
757 	pos = skb_peek_tail(&ulpq->lobby);
758 	if (!pos) {
759 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
760 		return;
761 	}
762 
763 	sid = event->stream;
764 	ssn = event->ssn;
765 
766 	cevent = (struct sctp_ulpevent *) pos->cb;
767 	csid = cevent->stream;
768 	cssn = cevent->ssn;
769 	if (sid > csid) {
770 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
771 		return;
772 	}
773 
774 	if ((sid == csid) && SSN_lt(cssn, ssn)) {
775 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
776 		return;
777 	}
778 
779 	/* Find the right place in this list.  We store them by
780 	 * stream ID and then by SSN.
781 	 */
782 	skb_queue_walk(&ulpq->lobby, pos) {
783 		cevent = (struct sctp_ulpevent *) pos->cb;
784 		csid = cevent->stream;
785 		cssn = cevent->ssn;
786 
787 		if (csid > sid)
788 			break;
789 		if (csid == sid && SSN_lt(ssn, cssn))
790 			break;
791 	}
792 
793 
794 	/* Insert before pos. */
795 	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
796 
797 }
798 
799 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
800 					     struct sctp_ulpevent *event)
801 {
802 	__u16 sid, ssn;
803 	struct sctp_stream *in;
804 
805 	/* Check if this message needs ordering.  */
806 	if (SCTP_DATA_UNORDERED & event->msg_flags)
807 		return event;
808 
809 	/* Note: The stream ID must be verified before this routine.  */
810 	sid = event->stream;
811 	ssn = event->ssn;
812 	in  = &ulpq->asoc->ssnmap->in;
813 
814 	/* Is this the expected SSN for this stream ID?  */
815 	if (ssn != sctp_ssn_peek(in, sid)) {
816 		/* We've received something out of order, so find where it
817 		 * needs to be placed.  We order by stream and then by SSN.
818 		 */
819 		sctp_ulpq_store_ordered(ulpq, event);
820 		return NULL;
821 	}
822 
823 	/* Mark that the next chunk has been found.  */
824 	sctp_ssn_next(in, sid);
825 
826 	/* Go find any other chunks that were waiting for
827 	 * ordering.
828 	 */
829 	sctp_ulpq_retrieve_ordered(ulpq, event);
830 
831 	return event;
832 }
833 
834 /* Helper function to gather skbs that have possibly become
835  * ordered by forward tsn skipping their dependencies.
836  */
837 static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
838 {
839 	struct sk_buff *pos, *tmp;
840 	struct sctp_ulpevent *cevent;
841 	struct sctp_ulpevent *event;
842 	struct sctp_stream *in;
843 	struct sk_buff_head temp;
844 	__u16 csid, cssn;
845 
846 	in  = &ulpq->asoc->ssnmap->in;
847 
848 	/* We are holding the chunks by stream, by SSN.  */
849 	skb_queue_head_init(&temp);
850 	event = NULL;
851 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
852 		cevent = (struct sctp_ulpevent *) pos->cb;
853 		csid = cevent->stream;
854 		cssn = cevent->ssn;
855 
856 		/* Have we gone too far?  */
857 		if (csid > sid)
858 			break;
859 
860 		/* Have we not gone far enough?  */
861 		if (csid < sid)
862 			continue;
863 
864 		/* see if this ssn has been marked by skipping */
865 		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
866 			break;
867 
868 		__skb_unlink(pos, &ulpq->lobby);
869 		if (!event)
870 			/* Create a temporary list to collect chunks on.  */
871 			event = sctp_skb2event(pos);
872 
873 		/* Attach all gathered skbs to the event.  */
874 		__skb_queue_tail(&temp, pos);
875 	}
876 
877 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
878 	 * very first SKB on the 'temp' list.
879 	 */
880 	if (event) {
881 		/* see if we have more ordered that we can deliver */
882 		sctp_ulpq_retrieve_ordered(ulpq, event);
883 		sctp_ulpq_tail_event(ulpq, event);
884 	}
885 }
886 
887 /* Skip over an SSN. This is used during the processing of
888  * Forwared TSN chunk to skip over the abandoned ordered data
889  */
890 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
891 {
892 	struct sctp_stream *in;
893 
894 	/* Note: The stream ID must be verified before this routine.  */
895 	in  = &ulpq->asoc->ssnmap->in;
896 
897 	/* Is this an old SSN?  If so ignore. */
898 	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
899 		return;
900 
901 	/* Mark that we are no longer expecting this SSN or lower. */
902 	sctp_ssn_skip(in, sid, ssn);
903 
904 	/* Go find any other chunks that were waiting for
905 	 * ordering and deliver them if needed.
906 	 */
907 	sctp_ulpq_reap_ordered(ulpq, sid);
908 	return;
909 }
910 
911 /* Renege 'needed' bytes from the ordering queue. */
912 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
913 {
914 	__u16 freed = 0;
915 	__u32 tsn;
916 	struct sk_buff *skb;
917 	struct sctp_ulpevent *event;
918 	struct sctp_tsnmap *tsnmap;
919 
920 	tsnmap = &ulpq->asoc->peer.tsn_map;
921 
922 	while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
923 		freed += skb_headlen(skb);
924 		event = sctp_skb2event(skb);
925 		tsn = event->tsn;
926 
927 		sctp_ulpevent_free(event);
928 		sctp_tsnmap_renege(tsnmap, tsn);
929 		if (freed >= needed)
930 			return freed;
931 	}
932 
933 	return freed;
934 }
935 
936 /* Renege 'needed' bytes from the reassembly queue. */
937 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
938 {
939 	__u16 freed = 0;
940 	__u32 tsn;
941 	struct sk_buff *skb;
942 	struct sctp_ulpevent *event;
943 	struct sctp_tsnmap *tsnmap;
944 
945 	tsnmap = &ulpq->asoc->peer.tsn_map;
946 
947 	/* Walk backwards through the list, reneges the newest tsns. */
948 	while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
949 		freed += skb_headlen(skb);
950 		event = sctp_skb2event(skb);
951 		tsn = event->tsn;
952 
953 		sctp_ulpevent_free(event);
954 		sctp_tsnmap_renege(tsnmap, tsn);
955 		if (freed >= needed)
956 			return freed;
957 	}
958 
959 	return freed;
960 }
961 
962 /* Partial deliver the first message as there is pressure on rwnd. */
963 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
964 				struct sctp_chunk *chunk,
965 				gfp_t gfp)
966 {
967 	struct sctp_ulpevent *event;
968 	struct sctp_association *asoc;
969 	struct sctp_sock *sp;
970 
971 	asoc = ulpq->asoc;
972 	sp = sctp_sk(asoc->base.sk);
973 
974 	/* If the association is already in Partial Delivery mode
975 	 * we have noting to do.
976 	 */
977 	if (ulpq->pd_mode)
978 		return;
979 
980 	/* If the user enabled fragment interleave socket option,
981 	 * multiple associations can enter partial delivery.
982 	 * Otherwise, we can only enter partial delivery if the
983 	 * socket is not in partial deliver mode.
984 	 */
985 	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
986 		/* Is partial delivery possible?  */
987 		event = sctp_ulpq_retrieve_first(ulpq);
988 		/* Send event to the ULP.   */
989 		if (event) {
990 			sctp_ulpq_tail_event(ulpq, event);
991 			sctp_ulpq_set_pd(ulpq);
992 			return;
993 		}
994 	}
995 }
996 
997 /* Renege some packets to make room for an incoming chunk.  */
998 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
999 		      gfp_t gfp)
1000 {
1001 	struct sctp_association *asoc;
1002 	__u16 needed, freed;
1003 
1004 	asoc = ulpq->asoc;
1005 
1006 	if (chunk) {
1007 		needed = ntohs(chunk->chunk_hdr->length);
1008 		needed -= sizeof(sctp_data_chunk_t);
1009 	} else
1010 		needed = SCTP_DEFAULT_MAXWINDOW;
1011 
1012 	freed = 0;
1013 
1014 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1015 		freed = sctp_ulpq_renege_order(ulpq, needed);
1016 		if (freed < needed) {
1017 			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1018 		}
1019 	}
1020 	/* If able to free enough room, accept this chunk. */
1021 	if (chunk && (freed >= needed)) {
1022 		__u32 tsn;
1023 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1024 		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
1025 		sctp_ulpq_tail_data(ulpq, chunk, gfp);
1026 
1027 		sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
1028 	}
1029 
1030 	sk_stream_mem_reclaim(asoc->base.sk);
1031 	return;
1032 }
1033 
1034 
1035 
1036 /* Notify the application if an association is aborted and in
1037  * partial delivery mode.  Send up any pending received messages.
1038  */
1039 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1040 {
1041 	struct sctp_ulpevent *ev = NULL;
1042 	struct sock *sk;
1043 
1044 	if (!ulpq->pd_mode)
1045 		return;
1046 
1047 	sk = ulpq->asoc->base.sk;
1048 	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1049 				       &sctp_sk(sk)->subscribe))
1050 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1051 					      SCTP_PARTIAL_DELIVERY_ABORTED,
1052 					      gfp);
1053 	if (ev)
1054 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1055 
1056 	/* If there is data waiting, send it up the socket now. */
1057 	if (sctp_ulpq_clear_pd(ulpq) || ev)
1058 		sk->sk_data_ready(sk, 0);
1059 }
1060