Lines Matching +full:ulp +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
10 * This abstraction carries sctp events to the ULP (sockets).
14 * lksctp developers <linux-sctp@vger.kernel.org>
40 /* Initialize a ULP queue from a block of memory. */
43 memset(ulpq, 0, sizeof(struct sctp_ulpq)); in sctp_ulpq_init()
45 ulpq->asoc = asoc; in sctp_ulpq_init()
46 skb_queue_head_init(&ulpq->reasm); in sctp_ulpq_init()
47 skb_queue_head_init(&ulpq->reasm_uo); in sctp_ulpq_init()
48 skb_queue_head_init(&ulpq->lobby); in sctp_ulpq_init()
49 ulpq->pd_mode = 0; in sctp_ulpq_init()
59 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { in sctp_ulpq_flush()
64 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { in sctp_ulpq_flush()
69 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) { in sctp_ulpq_flush()
87 int event_eor = 0; in sctp_ulpq_tail_data()
90 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); in sctp_ulpq_tail_data()
92 return -ENOMEM; in sctp_ulpq_tail_data()
94 event->ssn = ntohs(chunk->subh.data_hdr->ssn); in sctp_ulpq_tail_data()
95 event->ppid = chunk->subh.data_hdr->ppid; in sctp_ulpq_tail_data()
106 if (event->msg_flags & MSG_EOR) in sctp_ulpq_tail_data()
110 /* Send event to the ULP. 'event' is the sctp_ulpevent for in sctp_ulpq_tail_data()
114 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; in sctp_ulpq_tail_data()
121 /* Add a new event for propagation to the ULP. */
129 if (atomic_dec_and_test(&sp->pd_mode)) { in sctp_clear_pd()
133 if (!skb_queue_empty(&sp->pd_lobby)) { in sctp_clear_pd()
134 skb_queue_splice_tail_init(&sp->pd_lobby, in sctp_clear_pd()
135 &sk->sk_receive_queue); in sctp_clear_pd()
144 if (!skb_queue_empty(&sp->pd_lobby) && asoc) { in sctp_clear_pd()
148 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) { in sctp_clear_pd()
150 if (event->asoc == asoc) { in sctp_clear_pd()
151 __skb_unlink(skb, &sp->pd_lobby); in sctp_clear_pd()
152 __skb_queue_tail(&sk->sk_receive_queue, in sctp_clear_pd()
159 return 0; in sctp_clear_pd()
165 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd()
167 atomic_inc(&sp->pd_mode); in sctp_ulpq_set_pd()
168 ulpq->pd_mode = 1; in sctp_ulpq_set_pd()
174 ulpq->pd_mode = 0; in sctp_ulpq_clear_pd()
176 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); in sctp_ulpq_clear_pd()
181 struct sock *sk = ulpq->asoc->base.sk; in sctp_ulpq_tail_event()
186 int clear_pd = 0; in sctp_ulpq_tail_event()
194 if (sk->sk_shutdown & RCV_SHUTDOWN && in sctp_ulpq_tail_event()
195 (sk->sk_shutdown & SEND_SHUTDOWN || in sctp_ulpq_tail_event()
204 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) in sctp_ulpq_tail_event()
212 if (atomic_read(&sp->pd_mode) == 0) { in sctp_ulpq_tail_event()
213 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event()
215 if (ulpq->pd_mode) { in sctp_ulpq_tail_event()
221 if ((event->msg_flags & MSG_NOTIFICATION) || in sctp_ulpq_tail_event()
223 (event->msg_flags & SCTP_DATA_FRAG_MASK))) in sctp_ulpq_tail_event()
224 queue = &sp->pd_lobby; in sctp_ulpq_tail_event()
226 clear_pd = event->msg_flags & MSG_EOR; in sctp_ulpq_tail_event()
227 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event()
235 if (sp->frag_interleave) in sctp_ulpq_tail_event()
236 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event()
238 queue = &sp->pd_lobby; in sctp_ulpq_tail_event()
251 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { in sctp_ulpq_tail_event()
253 sp->data_ready_signalled = 1; in sctp_ulpq_tail_event()
254 sk->sk_data_ready(sk); in sctp_ulpq_tail_event()
261 return 0; in sctp_ulpq_tail_event()
274 tsn = event->tsn; in sctp_ulpq_store_reasm()
277 pos = skb_peek_tail(&ulpq->reasm); in sctp_ulpq_store_reasm()
279 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
285 ctsn = cevent->tsn; in sctp_ulpq_store_reasm()
287 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
292 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_store_reasm()
294 ctsn = cevent->tsn; in sctp_ulpq_store_reasm()
301 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
307 * This routine creates a re-assembled skb given the first and last skb's
308 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
321 struct sk_buff *list = skb_shinfo(f_frag)->frag_list; in sctp_make_reassembled_event()
327 pos = f_frag->next; in sctp_make_reassembled_event()
330 for (last = list; list; last = list, list = list->next) in sctp_make_reassembled_event()
337 last->next = pos; in sctp_make_reassembled_event()
349 sctp_skb_set_owner_r(new, f_frag->sk); in sctp_make_reassembled_event()
351 skb_shinfo(new)->frag_list = pos; in sctp_make_reassembled_event()
353 skb_shinfo(f_frag)->frag_list = pos; in sctp_make_reassembled_event()
359 /* if we did unshare, then free the old skb and re-assign */ in sctp_make_reassembled_event()
367 pnext = pos->next; in sctp_make_reassembled_event()
370 f_frag->len += pos->len; in sctp_make_reassembled_event()
371 f_frag->data_len += pos->len; in sctp_make_reassembled_event()
379 pos->next = pnext; in sctp_make_reassembled_event()
402 size_t pd_len = 0; in sctp_ulpq_retrieve_reassembled()
406 /* Initialized to 0 just to avoid compiler warning message. Will in sctp_ulpq_retrieve_reassembled()
410 next_tsn = 0; in sctp_ulpq_retrieve_reassembled()
425 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_reassembled()
427 ctsn = cevent->tsn; in sctp_ulpq_retrieve_reassembled()
429 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { in sctp_ulpq_retrieve_reassembled()
435 if (skb_queue_is_first(&ulpq->reasm, pos)) { in sctp_ulpq_retrieve_reassembled()
438 pd_len = pos->len; in sctp_ulpq_retrieve_reassembled()
442 pd_len = 0; in sctp_ulpq_retrieve_reassembled()
454 pd_len += pos->len; in sctp_ulpq_retrieve_reassembled()
469 asoc = ulpq->asoc; in sctp_ulpq_retrieve_reassembled()
476 if (!sctp_sk(asoc->base.sk)->frag_interleave && in sctp_ulpq_retrieve_reassembled()
477 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) in sctp_ulpq_retrieve_reassembled()
481 pd_point = sctp_sk(asoc->base.sk)->pd_point; in sctp_ulpq_retrieve_reassembled()
483 retval = sctp_make_reassembled_event(asoc->base.net, in sctp_ulpq_retrieve_reassembled()
484 &ulpq->reasm, in sctp_ulpq_retrieve_reassembled()
493 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, in sctp_ulpq_retrieve_reassembled()
494 &ulpq->reasm, first_frag, pos); in sctp_ulpq_retrieve_reassembled()
496 retval->msg_flags |= MSG_EOR; in sctp_ulpq_retrieve_reassembled()
514 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_retrieve_partial()
519 next_tsn = 0; in sctp_ulpq_retrieve_partial()
520 is_last = 0; in sctp_ulpq_retrieve_partial()
522 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_partial()
524 ctsn = cevent->tsn; in sctp_ulpq_retrieve_partial()
526 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { in sctp_ulpq_retrieve_partial()
559 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, in sctp_ulpq_retrieve_partial()
562 retval->msg_flags |= MSG_EOR; in sctp_ulpq_retrieve_partial()
577 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { in sctp_ulpq_reasm()
578 event->msg_flags |= MSG_EOR; in sctp_ulpq_reasm()
583 if (!ulpq->pd_mode) in sctp_ulpq_reasm()
591 ctsn = event->tsn; in sctp_ulpq_reasm()
592 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); in sctp_ulpq_reasm()
613 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_retrieve_first()
618 next_tsn = 0; in sctp_ulpq_retrieve_first()
620 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_first()
622 ctsn = cevent->tsn; in sctp_ulpq_retrieve_first()
624 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { in sctp_ulpq_retrieve_first()
660 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, in sctp_ulpq_retrieve_first()
672 * take cautions in updating its re-assembly queue. The receiver MUST
685 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_reasm_flushtsn()
688 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { in sctp_ulpq_reasm_flushtsn()
690 tsn = event->tsn; in sctp_ulpq_reasm_flushtsn()
698 __skb_unlink(pos, &ulpq->reasm); in sctp_ulpq_reasm_flushtsn()
714 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_reasm_drain()
724 if (event->msg_flags & MSG_EOR) in sctp_ulpq_reasm_drain()
727 /* Send event to the ULP. 'event' is the in sctp_ulpq_reasm_drain()
748 sid = event->stream; in sctp_ulpq_retrieve_ordered()
749 stream = &ulpq->asoc->stream; in sctp_ulpq_retrieve_ordered()
751 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; in sctp_ulpq_retrieve_ordered()
754 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { in sctp_ulpq_retrieve_ordered()
755 cevent = (struct sctp_ulpevent *) pos->cb; in sctp_ulpq_retrieve_ordered()
756 csid = cevent->stream; in sctp_ulpq_retrieve_ordered()
757 cssn = cevent->ssn; in sctp_ulpq_retrieve_ordered()
773 __skb_unlink(pos, &ulpq->lobby); in sctp_ulpq_retrieve_ordered()
789 pos = skb_peek_tail(&ulpq->lobby); in sctp_ulpq_store_ordered()
791 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
795 sid = event->stream; in sctp_ulpq_store_ordered()
796 ssn = event->ssn; in sctp_ulpq_store_ordered()
798 cevent = (struct sctp_ulpevent *) pos->cb; in sctp_ulpq_store_ordered()
799 csid = cevent->stream; in sctp_ulpq_store_ordered()
800 cssn = cevent->ssn; in sctp_ulpq_store_ordered()
802 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
807 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
814 skb_queue_walk(&ulpq->lobby, pos) { in sctp_ulpq_store_ordered()
815 cevent = (struct sctp_ulpevent *) pos->cb; in sctp_ulpq_store_ordered()
816 csid = cevent->stream; in sctp_ulpq_store_ordered()
817 cssn = cevent->ssn; in sctp_ulpq_store_ordered()
827 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
837 if (event->msg_flags & SCTP_DATA_UNORDERED) in sctp_ulpq_order()
841 sid = event->stream; in sctp_ulpq_order()
842 ssn = event->ssn; in sctp_ulpq_order()
843 stream = &ulpq->asoc->stream; in sctp_ulpq_order()
875 struct sk_buff_head *lobby = &ulpq->lobby; in sctp_ulpq_reap_ordered()
878 stream = &ulpq->asoc->stream; in sctp_ulpq_reap_ordered()
884 cevent = (struct sctp_ulpevent *) pos->cb; in sctp_ulpq_reap_ordered()
885 csid = cevent->stream; in sctp_ulpq_reap_ordered()
886 cssn = cevent->ssn; in sctp_ulpq_reap_ordered()
913 cevent = (struct sctp_ulpevent *) pos->cb; in sctp_ulpq_reap_ordered()
914 csid = cevent->stream; in sctp_ulpq_reap_ordered()
915 cssn = cevent->ssn; in sctp_ulpq_reap_ordered()
925 /* Send event to the ULP. 'event' is the sctp_ulpevent for in sctp_ulpq_reap_ordered()
943 stream = &ulpq->asoc->stream; in sctp_ulpq_skip()
961 __u16 freed = 0; in sctp_ulpq_renege_list()
967 tsnmap = &ulpq->asoc->peer.tsn_map; in sctp_ulpq_renege_list()
971 tsn = event->tsn; in sctp_ulpq_renege_list()
982 flist = skb_shinfo(skb)->frag_list; in sctp_ulpq_renege_list()
983 for (last = flist; flist; flist = flist->next) { in sctp_ulpq_renege_list()
988 last_tsn = sctp_skb2event(last)->tsn; in sctp_ulpq_renege_list()
1009 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); in sctp_ulpq_renege_order()
1015 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); in sctp_ulpq_renege_frags()
1028 asoc = ulpq->asoc; in sctp_ulpq_partial_delivery()
1029 sp = sctp_sk(asoc->base.sk); in sctp_ulpq_partial_delivery()
1034 if (ulpq->pd_mode) in sctp_ulpq_partial_delivery()
1040 skb = skb_peek(&asoc->ulpq.reasm); in sctp_ulpq_partial_delivery()
1042 ctsn = sctp_skb2event(skb)->tsn; in sctp_ulpq_partial_delivery()
1043 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) in sctp_ulpq_partial_delivery()
1052 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) { in sctp_ulpq_partial_delivery()
1055 /* Send event to the ULP. */ in sctp_ulpq_partial_delivery()
1072 struct sctp_association *asoc = ulpq->asoc; in sctp_ulpq_renege()
1073 __u32 freed = 0; in sctp_ulpq_renege()
1076 needed = ntohs(chunk->chunk_hdr->length) - in sctp_ulpq_renege()
1079 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { in sctp_ulpq_renege()
1082 freed += sctp_ulpq_renege_frags(ulpq, needed - freed); in sctp_ulpq_renege()
1085 if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) && in sctp_ulpq_renege()
1092 if (retval <= 0) in sctp_ulpq_renege()
1108 if (!ulpq->pd_mode) in sctp_ulpq_abort_pd()
1111 sk = ulpq->asoc->base.sk; in sctp_ulpq_abort_pd()
1113 if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe, in sctp_ulpq_abort_pd()
1115 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, in sctp_ulpq_abort_pd()
1117 0, 0, 0, gfp); in sctp_ulpq_abort_pd()
1119 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); in sctp_ulpq_abort_pd()
1122 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) { in sctp_ulpq_abort_pd()
1123 sp->data_ready_signalled = 1; in sctp_ulpq_abort_pd()
1124 sk->sk_data_ready(sk); in sctp_ulpq_abort_pd()