xref: /openbmc/linux/net/sctp/inqueue.c (revision 47505b8b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2002 International Business Machines, Corp.
6  *
7  * This file is part of the SCTP kernel implementation
8  *
9  * These functions are the methods for accessing the SCTP inqueue.
10  *
11  * An SCTP inqueue is a queue into which you push SCTP packets
12  * (which might be bundles or fragments of chunks) and out of which you
13  * pop SCTP whole chunks.
14  *
15  * Please send any bug reports or fixes you make to the
16  * email address(es):
17  *    lksctp developers <linux-sctp@vger.kernel.org>
18  *
19  * Written or modified by:
20  *    La Monte H.P. Yarroll <piggy@acm.org>
21  *    Karl Knutson <karl@athena.chicago.il.us>
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <net/sctp/sctp.h>
27 #include <net/sctp/sm.h>
28 #include <linux/interrupt.h>
29 #include <linux/slab.h>
30 
31 /* Initialize an SCTP inqueue.  */
sctp_inq_init(struct sctp_inq * queue)32 void sctp_inq_init(struct sctp_inq *queue)
33 {
34 	INIT_LIST_HEAD(&queue->in_chunk_list);
35 	queue->in_progress = NULL;
36 
37 	/* Create a task for delivering data.  */
38 	INIT_WORK(&queue->immediate, NULL);
39 }
40 
41 /* Release the memory associated with an SCTP inqueue.  */
sctp_inq_free(struct sctp_inq * queue)42 void sctp_inq_free(struct sctp_inq *queue)
43 {
44 	struct sctp_chunk *chunk, *tmp;
45 
46 	/* Empty the queue.  */
47 	list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
48 		list_del_init(&chunk->list);
49 		sctp_chunk_free(chunk);
50 	}
51 
52 	/* If there is a packet which is currently being worked on,
53 	 * free it as well.
54 	 */
55 	if (queue->in_progress) {
56 		sctp_chunk_free(queue->in_progress);
57 		queue->in_progress = NULL;
58 	}
59 }
60 
61 /* Put a new packet in an SCTP inqueue.
62  * We assume that packet->sctp_hdr is set and in host byte order.
63  */
sctp_inq_push(struct sctp_inq * q,struct sctp_chunk * chunk)64 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
65 {
66 	/* Directly call the packet handling routine. */
67 	if (chunk->rcvr->dead) {
68 		sctp_chunk_free(chunk);
69 		return;
70 	}
71 
72 	/* We are now calling this either from the soft interrupt
73 	 * or from the backlog processing.
74 	 * Eventually, we should clean up inqueue to not rely
75 	 * on the BH related data structures.
76 	 */
77 	list_add_tail(&chunk->list, &q->in_chunk_list);
78 	if (chunk->asoc)
79 		chunk->asoc->stats.ipackets++;
80 	q->immediate.func(&q->immediate);
81 }
82 
83 /* Peek at the next chunk on the inqeue. */
sctp_inq_peek(struct sctp_inq * queue)84 struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
85 {
86 	struct sctp_chunk *chunk;
87 	struct sctp_chunkhdr *ch = NULL;
88 
89 	chunk = queue->in_progress;
90 	/* If there is no more chunks in this packet, say so */
91 	if (chunk->singleton ||
92 	    chunk->end_of_packet ||
93 	    chunk->pdiscard)
94 		    return NULL;
95 
96 	ch = (struct sctp_chunkhdr *)chunk->chunk_end;
97 
98 	return ch;
99 }
100 
101 
102 /* Extract a chunk from an SCTP inqueue.
103  *
104  * WARNING:  If you need to put the chunk on another queue, you need to
105  * make a shallow copy (clone) of it.
106  */
sctp_inq_pop(struct sctp_inq * queue)107 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
108 {
109 	struct sctp_chunk *chunk;
110 	struct sctp_chunkhdr *ch = NULL;
111 
112 	/* The assumption is that we are safe to process the chunks
113 	 * at this time.
114 	 */
115 
116 	chunk = queue->in_progress;
117 	if (chunk) {
118 		/* There is a packet that we have been working on.
119 		 * Any post processing work to do before we move on?
120 		 */
121 		if (chunk->singleton ||
122 		    chunk->end_of_packet ||
123 		    chunk->pdiscard) {
124 			if (chunk->head_skb == chunk->skb) {
125 				chunk->skb = skb_shinfo(chunk->skb)->frag_list;
126 				goto new_skb;
127 			}
128 			if (chunk->skb->next) {
129 				chunk->skb = chunk->skb->next;
130 				goto new_skb;
131 			}
132 
133 			if (chunk->head_skb)
134 				chunk->skb = chunk->head_skb;
135 			sctp_chunk_free(chunk);
136 			chunk = queue->in_progress = NULL;
137 		} else {
138 			/* Nothing to do. Next chunk in the packet, please. */
139 			ch = (struct sctp_chunkhdr *)chunk->chunk_end;
140 			/* Force chunk->skb->data to chunk->chunk_end.  */
141 			skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
142 			/* We are guaranteed to pull a SCTP header. */
143 		}
144 	}
145 
146 	/* Do we need to take the next packet out of the queue to process? */
147 	if (!chunk) {
148 		struct list_head *entry;
149 
150 next_chunk:
151 		/* Is the queue empty?  */
152 		entry = sctp_list_dequeue(&queue->in_chunk_list);
153 		if (!entry)
154 			return NULL;
155 
156 		chunk = list_entry(entry, struct sctp_chunk, list);
157 
158 		if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) {
159 			/* GSO-marked skbs but without frags, handle
160 			 * them normally
161 			 */
162 			if (skb_shinfo(chunk->skb)->frag_list)
163 				chunk->head_skb = chunk->skb;
164 
165 			/* skbs with "cover letter" */
166 			if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
167 				chunk->skb = skb_shinfo(chunk->skb)->frag_list;
168 
169 			if (WARN_ON(!chunk->skb)) {
170 				__SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
171 				sctp_chunk_free(chunk);
172 				goto next_chunk;
173 			}
174 		}
175 
176 		if (chunk->asoc)
177 			sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
178 
179 		queue->in_progress = chunk;
180 
181 new_skb:
182 		/* This is the first chunk in the packet.  */
183 		ch = (struct sctp_chunkhdr *)chunk->skb->data;
184 		chunk->singleton = 1;
185 		chunk->data_accepted = 0;
186 		chunk->pdiscard = 0;
187 		chunk->auth = 0;
188 		chunk->has_asconf = 0;
189 		chunk->end_of_packet = 0;
190 		if (chunk->head_skb) {
191 			struct sctp_input_cb
192 				*cb = SCTP_INPUT_CB(chunk->skb),
193 				*head_cb = SCTP_INPUT_CB(chunk->head_skb);
194 
195 			cb->chunk = head_cb->chunk;
196 			cb->af = head_cb->af;
197 		}
198 	}
199 
200 	chunk->chunk_hdr = ch;
201 	chunk->chunk_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
202 	skb_pull(chunk->skb, sizeof(*ch));
203 	chunk->subh.v = NULL; /* Subheader is no longer valid.  */
204 
205 	if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
206 		/* This is not a singleton */
207 		chunk->singleton = 0;
208 	} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
209 		/* Discard inside state machine. */
210 		chunk->pdiscard = 1;
211 		chunk->chunk_end = skb_tail_pointer(chunk->skb);
212 	} else {
213 		/* We are at the end of the packet, so mark the chunk
214 		 * in case we need to send a SACK.
215 		 */
216 		chunk->end_of_packet = 1;
217 	}
218 
219 	pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
220 		 chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
221 		 ntohs(chunk->chunk_hdr->length), chunk->skb->len);
222 
223 	return chunk;
224 }
225 
226 /* Set a top-half handler.
227  *
228  * Originally, we the top-half handler was scheduled as a BH.  We now
229  * call the handler directly in sctp_inq_push() at a time that
230  * we know we are lock safe.
231  * The intent is that this routine will pull stuff out of the
232  * inqueue and process it.
233  */
sctp_inq_set_th_handler(struct sctp_inq * q,work_func_t callback)234 void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
235 {
236 	INIT_WORK(&q->immediate, callback);
237 }
238