xref: /openbmc/linux/net/sctp/endpointola.c (revision 4e27428f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001-2002 International Business Machines, Corp.
6  * Copyright (c) 2001 Intel Corp.
7  * Copyright (c) 2001 Nokia, Inc.
8  * Copyright (c) 2001 La Monte H.P. Yarroll
9  *
10  * This file is part of the SCTP kernel implementation
11  *
12  * This abstraction represents an SCTP endpoint.
13  *
14  * Please send any bug reports or fixes you make to the
15  * email address(es):
16  *    lksctp developers <linux-sctp@vger.kernel.org>
17  *
18  * Written or modified by:
19  *    La Monte H.P. Yarroll <piggy@acm.org>
20  *    Karl Knutson <karl@athena.chicago.il.us>
21  *    Jon Grimm <jgrimm@austin.ibm.com>
22  *    Daisy Chang <daisyc@us.ibm.com>
23  *    Dajiang Zhang <dajiang.zhang@nokia.com>
24  */
25 
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/in.h>
29 #include <linux/random.h>	/* get_random_bytes() */
30 #include <net/sock.h>
31 #include <net/ipv6.h>
32 #include <net/sctp/sctp.h>
33 #include <net/sctp/sm.h>
34 
35 /* Forward declarations for internal helpers. */
36 static void sctp_endpoint_bh_rcv(struct work_struct *work);
37 
38 /*
39  * Initialize the base fields of the endpoint structure.
40  */
41 static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
42 						struct sock *sk,
43 						gfp_t gfp)
44 {
45 	struct net *net = sock_net(sk);
46 	struct sctp_hmac_algo_param *auth_hmacs = NULL;
47 	struct sctp_chunks_param *auth_chunks = NULL;
48 	struct sctp_shared_key *null_key;
49 	int err;
50 
51 	ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
52 	if (!ep->digest)
53 		return NULL;
54 
55 	ep->asconf_enable = net->sctp.addip_enable;
56 	ep->auth_enable = net->sctp.auth_enable;
57 	if (ep->auth_enable) {
58 		/* Allocate space for HMACS and CHUNKS authentication
59 		 * variables.  There are arrays that we encode directly
60 		 * into parameters to make the rest of the operations easier.
61 		 */
62 		auth_hmacs = kzalloc(struct_size(auth_hmacs, hmac_ids,
63 						 SCTP_AUTH_NUM_HMACS), gfp);
64 		if (!auth_hmacs)
65 			goto nomem;
66 
67 		auth_chunks = kzalloc(sizeof(*auth_chunks) +
68 				      SCTP_NUM_CHUNK_TYPES, gfp);
69 		if (!auth_chunks)
70 			goto nomem;
71 
72 		/* Initialize the HMACS parameter.
73 		 * SCTP-AUTH: Section 3.3
74 		 *    Every endpoint supporting SCTP chunk authentication MUST
75 		 *    support the HMAC based on the SHA-1 algorithm.
76 		 */
77 		auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO;
78 		auth_hmacs->param_hdr.length =
79 					htons(sizeof(struct sctp_paramhdr) + 2);
80 		auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1);
81 
82 		/* Initialize the CHUNKS parameter */
83 		auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
84 		auth_chunks->param_hdr.length =
85 					htons(sizeof(struct sctp_paramhdr));
86 
87 		/* If the Add-IP functionality is enabled, we must
88 		 * authenticate, ASCONF and ASCONF-ACK chunks
89 		 */
90 		if (ep->asconf_enable) {
91 			auth_chunks->chunks[0] = SCTP_CID_ASCONF;
92 			auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
93 			auth_chunks->param_hdr.length =
94 					htons(sizeof(struct sctp_paramhdr) + 2);
95 		}
96 
97 		/* Allocate and initialize transorms arrays for supported
98 		 * HMACs.
99 		 */
100 		err = sctp_auth_init_hmacs(ep, gfp);
101 		if (err)
102 			goto nomem;
103 	}
104 
105 	/* Initialize the base structure. */
106 	/* What type of endpoint are we?  */
107 	ep->base.type = SCTP_EP_TYPE_SOCKET;
108 
109 	/* Initialize the basic object fields. */
110 	refcount_set(&ep->base.refcnt, 1);
111 	ep->base.dead = false;
112 
113 	/* Create an input queue.  */
114 	sctp_inq_init(&ep->base.inqueue);
115 
116 	/* Set its top-half handler */
117 	sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
118 
119 	/* Initialize the bind addr area */
120 	sctp_bind_addr_init(&ep->base.bind_addr, 0);
121 
122 	/* Create the lists of associations.  */
123 	INIT_LIST_HEAD(&ep->asocs);
124 
125 	/* Use SCTP specific send buffer space queues.  */
126 	ep->sndbuf_policy = net->sctp.sndbuf_policy;
127 
128 	sk->sk_data_ready = sctp_data_ready;
129 	sk->sk_write_space = sctp_write_space;
130 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
131 
132 	/* Get the receive buffer policy for this endpoint */
133 	ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
134 
135 	/* Initialize the secret key used with cookie. */
136 	get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
137 
138 	/* SCTP-AUTH extensions*/
139 	INIT_LIST_HEAD(&ep->endpoint_shared_keys);
140 	null_key = sctp_auth_shkey_create(0, gfp);
141 	if (!null_key)
142 		goto nomem_shkey;
143 
144 	list_add(&null_key->key_list, &ep->endpoint_shared_keys);
145 
146 	/* Add the null key to the endpoint shared keys list and
147 	 * set the hmcas and chunks pointers.
148 	 */
149 	ep->auth_hmacs_list = auth_hmacs;
150 	ep->auth_chunk_list = auth_chunks;
151 	ep->prsctp_enable = net->sctp.prsctp_enable;
152 	ep->reconf_enable = net->sctp.reconf_enable;
153 
154 	/* Remember who we are attached to.  */
155 	ep->base.sk = sk;
156 	sock_hold(ep->base.sk);
157 
158 	return ep;
159 
160 nomem_shkey:
161 	sctp_auth_destroy_hmacs(ep->auth_hmacs);
162 nomem:
163 	/* Free all allocations */
164 	kfree(auth_hmacs);
165 	kfree(auth_chunks);
166 	kfree(ep->digest);
167 	return NULL;
168 
169 }
170 
171 /* Create a sctp_endpoint with all that boring stuff initialized.
172  * Returns NULL if there isn't enough memory.
173  */
174 struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
175 {
176 	struct sctp_endpoint *ep;
177 
178 	/* Build a local endpoint. */
179 	ep = kzalloc(sizeof(*ep), gfp);
180 	if (!ep)
181 		goto fail;
182 
183 	if (!sctp_endpoint_init(ep, sk, gfp))
184 		goto fail_init;
185 
186 	SCTP_DBG_OBJCNT_INC(ep);
187 	return ep;
188 
189 fail_init:
190 	kfree(ep);
191 fail:
192 	return NULL;
193 }
194 
195 /* Add an association to an endpoint.  */
196 void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
197 			    struct sctp_association *asoc)
198 {
199 	struct sock *sk = ep->base.sk;
200 
201 	/* If this is a temporary association, don't bother
202 	 * since we'll be removing it shortly and don't
203 	 * want anyone to find it anyway.
204 	 */
205 	if (asoc->temp)
206 		return;
207 
208 	/* Now just add it to our list of asocs */
209 	list_add_tail(&asoc->asocs, &ep->asocs);
210 
211 	/* Increment the backlog value for a TCP-style listening socket. */
212 	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
213 		sk->sk_ack_backlog++;
214 }
215 
216 /* Free the endpoint structure.  Delay cleanup until
217  * all users have released their reference count on this structure.
218  */
219 void sctp_endpoint_free(struct sctp_endpoint *ep)
220 {
221 	ep->base.dead = true;
222 
223 	inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
224 
225 	/* Unlink this endpoint, so we can't find it again! */
226 	sctp_unhash_endpoint(ep);
227 
228 	sctp_endpoint_put(ep);
229 }
230 
231 /* Final destructor for endpoint.  */
232 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
233 {
234 	struct sock *sk;
235 
236 	if (unlikely(!ep->base.dead)) {
237 		WARN(1, "Attempt to destroy undead endpoint %p!\n", ep);
238 		return;
239 	}
240 
241 	/* Free the digest buffer */
242 	kfree(ep->digest);
243 
244 	/* SCTP-AUTH: Free up AUTH releated data such as shared keys
245 	 * chunks and hmacs arrays that were allocated
246 	 */
247 	sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
248 	kfree(ep->auth_hmacs_list);
249 	kfree(ep->auth_chunk_list);
250 
251 	/* AUTH - Free any allocated HMAC transform containers */
252 	sctp_auth_destroy_hmacs(ep->auth_hmacs);
253 
254 	/* Cleanup. */
255 	sctp_inq_free(&ep->base.inqueue);
256 	sctp_bind_addr_free(&ep->base.bind_addr);
257 
258 	memset(ep->secret_key, 0, sizeof(ep->secret_key));
259 
260 	sk = ep->base.sk;
261 	/* Remove and free the port */
262 	if (sctp_sk(sk)->bind_hash)
263 		sctp_put_port(sk);
264 
265 	sctp_sk(sk)->ep = NULL;
266 	/* Give up our hold on the sock */
267 	sock_put(sk);
268 
269 	kfree(ep);
270 	SCTP_DBG_OBJCNT_DEC(ep);
271 }
272 
273 /* Hold a reference to an endpoint. */
274 void sctp_endpoint_hold(struct sctp_endpoint *ep)
275 {
276 	refcount_inc(&ep->base.refcnt);
277 }
278 
279 /* Release a reference to an endpoint and clean up if there are
280  * no more references.
281  */
282 void sctp_endpoint_put(struct sctp_endpoint *ep)
283 {
284 	if (refcount_dec_and_test(&ep->base.refcnt))
285 		sctp_endpoint_destroy(ep);
286 }
287 
288 /* Is this the endpoint we are looking for?  */
289 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
290 					       struct net *net,
291 					       const union sctp_addr *laddr)
292 {
293 	struct sctp_endpoint *retval = NULL;
294 
295 	if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
296 	    net_eq(sock_net(ep->base.sk), net)) {
297 		if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
298 					 sctp_sk(ep->base.sk)))
299 			retval = ep;
300 	}
301 
302 	return retval;
303 }
304 
305 /* Find the association that goes with this chunk.
306  * We lookup the transport from hashtable at first, then get association
307  * through t->assoc.
308  */
309 struct sctp_association *sctp_endpoint_lookup_assoc(
310 	const struct sctp_endpoint *ep,
311 	const union sctp_addr *paddr,
312 	struct sctp_transport **transport)
313 {
314 	struct sctp_association *asoc = NULL;
315 	struct sctp_transport *t;
316 
317 	*transport = NULL;
318 
319 	/* If the local port is not set, there can't be any associations
320 	 * on this endpoint.
321 	 */
322 	if (!ep->base.bind_addr.port)
323 		return NULL;
324 
325 	rcu_read_lock();
326 	t = sctp_epaddr_lookup_transport(ep, paddr);
327 	if (!t)
328 		goto out;
329 
330 	*transport = t;
331 	asoc = t->asoc;
332 out:
333 	rcu_read_unlock();
334 	return asoc;
335 }
336 
337 /* Look for any peeled off association from the endpoint that matches the
338  * given peer address.
339  */
340 bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
341 				 const union sctp_addr *paddr)
342 {
343 	struct sctp_sockaddr_entry *addr;
344 	struct sctp_bind_addr *bp;
345 	struct net *net = sock_net(ep->base.sk);
346 
347 	bp = &ep->base.bind_addr;
348 	/* This function is called with the socket lock held,
349 	 * so the address_list can not change.
350 	 */
351 	list_for_each_entry(addr, &bp->address_list, list) {
352 		if (sctp_has_association(net, &addr->a, paddr))
353 			return true;
354 	}
355 
356 	return false;
357 }
358 
359 /* Do delayed input processing.  This is scheduled by sctp_rcv().
360  * This may be called on BH or task time.
361  */
362 static void sctp_endpoint_bh_rcv(struct work_struct *work)
363 {
364 	struct sctp_endpoint *ep =
365 		container_of(work, struct sctp_endpoint,
366 			     base.inqueue.immediate);
367 	struct sctp_association *asoc;
368 	struct sock *sk;
369 	struct net *net;
370 	struct sctp_transport *transport;
371 	struct sctp_chunk *chunk;
372 	struct sctp_inq *inqueue;
373 	union sctp_subtype subtype;
374 	enum sctp_state state;
375 	int error = 0;
376 	int first_time = 1;	/* is this the first time through the loop */
377 
378 	if (ep->base.dead)
379 		return;
380 
381 	asoc = NULL;
382 	inqueue = &ep->base.inqueue;
383 	sk = ep->base.sk;
384 	net = sock_net(sk);
385 
386 	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
387 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
388 
389 		/* If the first chunk in the packet is AUTH, do special
390 		 * processing specified in Section 6.3 of SCTP-AUTH spec
391 		 */
392 		if (first_time && (subtype.chunk == SCTP_CID_AUTH)) {
393 			struct sctp_chunkhdr *next_hdr;
394 
395 			next_hdr = sctp_inq_peek(inqueue);
396 			if (!next_hdr)
397 				goto normal;
398 
399 			/* If the next chunk is COOKIE-ECHO, skip the AUTH
400 			 * chunk while saving a pointer to it so we can do
401 			 * Authentication later (during cookie-echo
402 			 * processing).
403 			 */
404 			if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
405 				chunk->auth_chunk = skb_clone(chunk->skb,
406 								GFP_ATOMIC);
407 				chunk->auth = 1;
408 				continue;
409 			}
410 		}
411 normal:
412 		/* We might have grown an association since last we
413 		 * looked, so try again.
414 		 *
415 		 * This happens when we've just processed our
416 		 * COOKIE-ECHO chunk.
417 		 */
418 		if (NULL == chunk->asoc) {
419 			asoc = sctp_endpoint_lookup_assoc(ep,
420 							  sctp_source(chunk),
421 							  &transport);
422 			chunk->asoc = asoc;
423 			chunk->transport = transport;
424 		}
425 
426 		state = asoc ? asoc->state : SCTP_STATE_CLOSED;
427 		if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
428 			continue;
429 
430 		/* Remember where the last DATA chunk came from so we
431 		 * know where to send the SACK.
432 		 */
433 		if (asoc && sctp_chunk_is_data(chunk))
434 			asoc->peer.last_data_from = chunk->transport;
435 		else {
436 			SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
437 			if (asoc)
438 				asoc->stats.ictrlchunks++;
439 		}
440 
441 		if (chunk->transport)
442 			chunk->transport->last_time_heard = ktime_get();
443 
444 		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
445 				   ep, asoc, chunk, GFP_ATOMIC);
446 
447 		if (error && chunk)
448 			chunk->pdiscard = 1;
449 
450 		/* Check to see if the endpoint is freed in response to
451 		 * the incoming chunk. If so, get out of the while loop.
452 		 */
453 		if (!sctp_sk(sk)->ep)
454 			break;
455 
456 		if (first_time)
457 			first_time = 0;
458 	}
459 }
460