xref: /openbmc/linux/net/sctp/inqueue.c (revision 80483c3a)
1 /* SCTP kernel implementation
2  * Copyright (c) 1999-2000 Cisco, Inc.
3  * Copyright (c) 1999-2001 Motorola, Inc.
4  * Copyright (c) 2002 International Business Machines, Corp.
5  *
6  * This file is part of the SCTP kernel implementation
7  *
8  * These functions are the methods for accessing the SCTP inqueue.
9  *
10  * An SCTP inqueue is a queue into which you push SCTP packets
11  * (which might be bundles or fragments of chunks) and out of which you
12  * pop SCTP whole chunks.
13  *
14  * This SCTP implementation is free software;
15  * you can redistribute it and/or modify it under the terms of
16  * the GNU General Public License as published by
17  * the Free Software Foundation; either version 2, or (at your option)
18  * any later version.
19  *
20  * This SCTP implementation is distributed in the hope that it
21  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22  *                 ************************
23  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24  * See the GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with GNU CC; see the file COPYING.  If not, see
28  * <http://www.gnu.org/licenses/>.
29  *
30  * Please send any bug reports or fixes you make to the
31  * email address(es):
32  *    lksctp developers <linux-sctp@vger.kernel.org>
33  *
34  * Written or modified by:
35  *    La Monte H.P. Yarroll <piggy@acm.org>
36  *    Karl Knutson <karl@athena.chicago.il.us>
37  */
38 
39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 
41 #include <net/sctp/sctp.h>
42 #include <net/sctp/sm.h>
43 #include <linux/interrupt.h>
44 #include <linux/slab.h>
45 
46 /* Initialize an SCTP inqueue.  */
47 void sctp_inq_init(struct sctp_inq *queue)
48 {
49 	INIT_LIST_HEAD(&queue->in_chunk_list);
50 	queue->in_progress = NULL;
51 
52 	/* Create a task for delivering data.  */
53 	INIT_WORK(&queue->immediate, NULL);
54 }
55 
56 /* Release the memory associated with an SCTP inqueue.  */
57 void sctp_inq_free(struct sctp_inq *queue)
58 {
59 	struct sctp_chunk *chunk, *tmp;
60 
61 	/* Empty the queue.  */
62 	list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
63 		list_del_init(&chunk->list);
64 		sctp_chunk_free(chunk);
65 	}
66 
67 	/* If there is a packet which is currently being worked on,
68 	 * free it as well.
69 	 */
70 	if (queue->in_progress) {
71 		sctp_chunk_free(queue->in_progress);
72 		queue->in_progress = NULL;
73 	}
74 }
75 
76 /* Put a new packet in an SCTP inqueue.
77  * We assume that packet->sctp_hdr is set and in host byte order.
78  */
79 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
80 {
81 	/* Directly call the packet handling routine. */
82 	if (chunk->rcvr->dead) {
83 		sctp_chunk_free(chunk);
84 		return;
85 	}
86 
87 	/* We are now calling this either from the soft interrupt
88 	 * or from the backlog processing.
89 	 * Eventually, we should clean up inqueue to not rely
90 	 * on the BH related data structures.
91 	 */
92 	list_add_tail(&chunk->list, &q->in_chunk_list);
93 	if (chunk->asoc)
94 		chunk->asoc->stats.ipackets++;
95 	q->immediate.func(&q->immediate);
96 }
97 
98 /* Peek at the next chunk on the inqeue. */
99 struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
100 {
101 	struct sctp_chunk *chunk;
102 	sctp_chunkhdr_t *ch = NULL;
103 
104 	chunk = queue->in_progress;
105 	/* If there is no more chunks in this packet, say so */
106 	if (chunk->singleton ||
107 	    chunk->end_of_packet ||
108 	    chunk->pdiscard)
109 		    return NULL;
110 
111 	ch = (sctp_chunkhdr_t *)chunk->chunk_end;
112 
113 	return ch;
114 }
115 
116 
117 /* Extract a chunk from an SCTP inqueue.
118  *
119  * WARNING:  If you need to put the chunk on another queue, you need to
120  * make a shallow copy (clone) of it.
121  */
122 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
123 {
124 	struct sctp_chunk *chunk;
125 	sctp_chunkhdr_t *ch = NULL;
126 
127 	/* The assumption is that we are safe to process the chunks
128 	 * at this time.
129 	 */
130 
131 	chunk = queue->in_progress;
132 	if (chunk) {
133 		/* There is a packet that we have been working on.
134 		 * Any post processing work to do before we move on?
135 		 */
136 		if (chunk->singleton ||
137 		    chunk->end_of_packet ||
138 		    chunk->pdiscard) {
139 			if (chunk->head_skb == chunk->skb) {
140 				chunk->skb = skb_shinfo(chunk->skb)->frag_list;
141 				goto new_skb;
142 			}
143 			if (chunk->skb->next) {
144 				chunk->skb = chunk->skb->next;
145 				goto new_skb;
146 			}
147 
148 			if (chunk->head_skb)
149 				chunk->skb = chunk->head_skb;
150 			sctp_chunk_free(chunk);
151 			chunk = queue->in_progress = NULL;
152 		} else {
153 			/* Nothing to do. Next chunk in the packet, please. */
154 			ch = (sctp_chunkhdr_t *) chunk->chunk_end;
155 			/* Force chunk->skb->data to chunk->chunk_end.  */
156 			skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
157 			/* We are guaranteed to pull a SCTP header. */
158 		}
159 	}
160 
161 	/* Do we need to take the next packet out of the queue to process? */
162 	if (!chunk) {
163 		struct list_head *entry;
164 
165 next_chunk:
166 		/* Is the queue empty?  */
167 		entry = sctp_list_dequeue(&queue->in_chunk_list);
168 		if (!entry)
169 			return NULL;
170 
171 		chunk = list_entry(entry, struct sctp_chunk, list);
172 
173 		/* Linearize if it's not GSO */
174 		if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
175 		    skb_is_nonlinear(chunk->skb)) {
176 			if (skb_linearize(chunk->skb)) {
177 				__SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
178 				sctp_chunk_free(chunk);
179 				goto next_chunk;
180 			}
181 
182 			/* Update sctp_hdr as it probably changed */
183 			chunk->sctp_hdr = sctp_hdr(chunk->skb);
184 		}
185 
186 		if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
187 			/* GSO-marked skbs but without frags, handle
188 			 * them normally
189 			 */
190 			if (skb_shinfo(chunk->skb)->frag_list)
191 				chunk->head_skb = chunk->skb;
192 
193 			/* skbs with "cover letter" */
194 			if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
195 				chunk->skb = skb_shinfo(chunk->skb)->frag_list;
196 
197 			if (WARN_ON(!chunk->skb)) {
198 				__SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
199 				sctp_chunk_free(chunk);
200 				goto next_chunk;
201 			}
202 		}
203 
204 		if (chunk->asoc)
205 			sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
206 
207 		queue->in_progress = chunk;
208 
209 new_skb:
210 		/* This is the first chunk in the packet.  */
211 		ch = (sctp_chunkhdr_t *) chunk->skb->data;
212 		chunk->singleton = 1;
213 		chunk->data_accepted = 0;
214 		chunk->pdiscard = 0;
215 		chunk->auth = 0;
216 		chunk->has_asconf = 0;
217 		chunk->end_of_packet = 0;
218 		if (chunk->head_skb) {
219 			struct sctp_input_cb
220 				*cb = SCTP_INPUT_CB(chunk->skb),
221 				*head_cb = SCTP_INPUT_CB(chunk->head_skb);
222 
223 			cb->chunk = head_cb->chunk;
224 			cb->af = head_cb->af;
225 		}
226 	}
227 
228 	chunk->chunk_hdr = ch;
229 	chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
230 	skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
231 	chunk->subh.v = NULL; /* Subheader is no longer valid.  */
232 
233 	if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
234 	    skb_tail_pointer(chunk->skb)) {
235 		/* This is not a singleton */
236 		chunk->singleton = 0;
237 	} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
238 		/* Discard inside state machine. */
239 		chunk->pdiscard = 1;
240 		chunk->chunk_end = skb_tail_pointer(chunk->skb);
241 	} else {
242 		/* We are at the end of the packet, so mark the chunk
243 		 * in case we need to send a SACK.
244 		 */
245 		chunk->end_of_packet = 1;
246 	}
247 
248 	pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
249 		 chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
250 		 ntohs(chunk->chunk_hdr->length), chunk->skb->len);
251 
252 	return chunk;
253 }
254 
255 /* Set a top-half handler.
256  *
257  * Originally, we the top-half handler was scheduled as a BH.  We now
258  * call the handler directly in sctp_inq_push() at a time that
259  * we know we are lock safe.
260  * The intent is that this routine will pull stuff out of the
261  * inqueue and process it.
262  */
263 void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
264 {
265 	INIT_WORK(&q->immediate, callback);
266 }
267