xref: /openbmc/linux/net/sctp/associola.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 La Monte H.P. Yarroll
7  *
8  * This file is part of the SCTP kernel implementation
9  *
10  * This module provides the abstraction for an SCTP association.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, see
26  * <http://www.gnu.org/licenses/>.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <linux-sctp@vger.kernel.org>
31  *
32  * Written or modified by:
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Karl Knutson          <karl@athena.chicago.il.us>
35  *    Jon Grimm             <jgrimm@us.ibm.com>
36  *    Xingang Guo           <xingang.guo@intel.com>
37  *    Hui Huang             <hui.huang@nokia.com>
38  *    Sridhar Samudrala	    <sri@us.ibm.com>
39  *    Daisy Chang	    <daisyc@us.ibm.com>
40  *    Ryan Layer	    <rmlayer@us.ibm.com>
41  *    Kevin Gao             <kevin.gao@intel.com>
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
50 
51 #include <linux/slab.h>
52 #include <linux/in.h>
53 #include <net/ipv6.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
56 
57 /* Forward declarations for internal functions. */
58 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59 static void sctp_assoc_bh_rcv(struct work_struct *work);
60 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62 
63 /* 1st Level Abstractions. */
64 
65 /* Initialize a new association from provided memory. */
66 static struct sctp_association *sctp_association_init(
67 					struct sctp_association *asoc,
68 					const struct sctp_endpoint *ep,
69 					const struct sock *sk,
70 					enum sctp_scope scope, gfp_t gfp)
71 {
72 	struct net *net = sock_net(sk);
73 	struct sctp_sock *sp;
74 	struct sctp_paramhdr *p;
75 	int i;
76 
77 	/* Retrieve the SCTP per socket area.  */
78 	sp = sctp_sk((struct sock *)sk);
79 
80 	/* Discarding const is appropriate here.  */
81 	asoc->ep = (struct sctp_endpoint *)ep;
82 	asoc->base.sk = (struct sock *)sk;
83 
84 	sctp_endpoint_hold(asoc->ep);
85 	sock_hold(asoc->base.sk);
86 
87 	/* Initialize the common base substructure.  */
88 	asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
89 
90 	/* Initialize the object handling fields.  */
91 	refcount_set(&asoc->base.refcnt, 1);
92 
93 	/* Initialize the bind addr area.  */
94 	sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
95 
96 	asoc->state = SCTP_STATE_CLOSED;
97 	asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
98 	asoc->user_frag = sp->user_frag;
99 
100 	/* Set the association max_retrans and RTO values from the
101 	 * socket values.
102 	 */
103 	asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
104 	asoc->pf_retrans  = net->sctp.pf_retrans;
105 
106 	asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
107 	asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
108 	asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
109 
110 	/* Initialize the association's heartbeat interval based on the
111 	 * sock configured value.
112 	 */
113 	asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
114 
115 	/* Initialize path max retrans value. */
116 	asoc->pathmaxrxt = sp->pathmaxrxt;
117 
118 	asoc->flowlabel = sp->flowlabel;
119 	asoc->dscp = sp->dscp;
120 
121 	/* Set association default SACK delay */
122 	asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
123 	asoc->sackfreq = sp->sackfreq;
124 
125 	/* Set the association default flags controlling
126 	 * Heartbeat, SACK delay, and Path MTU Discovery.
127 	 */
128 	asoc->param_flags = sp->param_flags;
129 
130 	/* Initialize the maximum number of new data packets that can be sent
131 	 * in a burst.
132 	 */
133 	asoc->max_burst = sp->max_burst;
134 
135 	/* initialize association timers */
136 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
137 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
138 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
139 
140 	/* sctpimpguide Section 2.12.2
141 	 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
142 	 * recommended value of 5 times 'RTO.Max'.
143 	 */
144 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
145 		= 5 * asoc->rto_max;
146 
147 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
148 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
149 
150 	/* Initializes the timers */
151 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
152 		timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
153 
154 	/* Pull default initialization values from the sock options.
155 	 * Note: This assumes that the values have already been
156 	 * validated in the sock.
157 	 */
158 	asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
159 	asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
160 	asoc->max_init_attempts	= sp->initmsg.sinit_max_attempts;
161 
162 	asoc->max_init_timeo =
163 		 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
164 
165 	/* Set the local window size for receive.
166 	 * This is also the rcvbuf space per association.
167 	 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
168 	 * 1500 bytes in one SCTP packet.
169 	 */
170 	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
171 		asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
172 	else
173 		asoc->rwnd = sk->sk_rcvbuf/2;
174 
175 	asoc->a_rwnd = asoc->rwnd;
176 
177 	/* Use my own max window until I learn something better.  */
178 	asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
179 
180 	/* Initialize the receive memory counter */
181 	atomic_set(&asoc->rmem_alloc, 0);
182 
183 	init_waitqueue_head(&asoc->wait);
184 
185 	asoc->c.my_vtag = sctp_generate_tag(ep);
186 	asoc->c.my_port = ep->base.bind_addr.port;
187 
188 	asoc->c.initial_tsn = sctp_generate_tsn(ep);
189 
190 	asoc->next_tsn = asoc->c.initial_tsn;
191 
192 	asoc->ctsn_ack_point = asoc->next_tsn - 1;
193 	asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
194 	asoc->highest_sacked = asoc->ctsn_ack_point;
195 	asoc->last_cwr_tsn = asoc->ctsn_ack_point;
196 
197 	/* ADDIP Section 4.1 Asconf Chunk Procedures
198 	 *
199 	 * When an endpoint has an ASCONF signaled change to be sent to the
200 	 * remote endpoint it should do the following:
201 	 * ...
202 	 * A2) a serial number should be assigned to the chunk. The serial
203 	 * number SHOULD be a monotonically increasing number. The serial
204 	 * numbers SHOULD be initialized at the start of the
205 	 * association to the same value as the initial TSN.
206 	 */
207 	asoc->addip_serial = asoc->c.initial_tsn;
208 	asoc->strreset_outseq = asoc->c.initial_tsn;
209 
210 	INIT_LIST_HEAD(&asoc->addip_chunk_list);
211 	INIT_LIST_HEAD(&asoc->asconf_ack_list);
212 
213 	/* Make an empty list of remote transport addresses.  */
214 	INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
215 
216 	/* RFC 2960 5.1 Normal Establishment of an Association
217 	 *
218 	 * After the reception of the first data chunk in an
219 	 * association the endpoint must immediately respond with a
220 	 * sack to acknowledge the data chunk.  Subsequent
221 	 * acknowledgements should be done as described in Section
222 	 * 6.2.
223 	 *
224 	 * [We implement this by telling a new association that it
225 	 * already received one packet.]
226 	 */
227 	asoc->peer.sack_needed = 1;
228 	asoc->peer.sack_generation = 1;
229 
230 	/* Assume that the peer will tell us if he recognizes ASCONF
231 	 * as part of INIT exchange.
232 	 * The sctp_addip_noauth option is there for backward compatibility
233 	 * and will revert old behavior.
234 	 */
235 	if (net->sctp.addip_noauth)
236 		asoc->peer.asconf_capable = 1;
237 
238 	/* Create an input queue.  */
239 	sctp_inq_init(&asoc->base.inqueue);
240 	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
241 
242 	/* Create an output queue.  */
243 	sctp_outq_init(asoc, &asoc->outqueue);
244 
245 	if (!sctp_ulpq_init(&asoc->ulpq, asoc))
246 		goto fail_init;
247 
248 	if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
249 			     0, gfp))
250 		goto fail_init;
251 
252 	/* Initialize default path MTU. */
253 	asoc->pathmtu = sp->pathmtu;
254 	sctp_assoc_update_frag_point(asoc);
255 
256 	/* Assume that peer would support both address types unless we are
257 	 * told otherwise.
258 	 */
259 	asoc->peer.ipv4_address = 1;
260 	if (asoc->base.sk->sk_family == PF_INET6)
261 		asoc->peer.ipv6_address = 1;
262 	INIT_LIST_HEAD(&asoc->asocs);
263 
264 	asoc->default_stream = sp->default_stream;
265 	asoc->default_ppid = sp->default_ppid;
266 	asoc->default_flags = sp->default_flags;
267 	asoc->default_context = sp->default_context;
268 	asoc->default_timetolive = sp->default_timetolive;
269 	asoc->default_rcv_context = sp->default_rcv_context;
270 
271 	/* AUTH related initializations */
272 	INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
273 	if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
274 		goto stream_free;
275 
276 	asoc->active_key_id = ep->active_key_id;
277 	asoc->prsctp_enable = ep->prsctp_enable;
278 	asoc->reconf_enable = ep->reconf_enable;
279 	asoc->strreset_enable = ep->strreset_enable;
280 
281 	/* Save the hmacs and chunks list into this association */
282 	if (ep->auth_hmacs_list)
283 		memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
284 			ntohs(ep->auth_hmacs_list->param_hdr.length));
285 	if (ep->auth_chunk_list)
286 		memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
287 			ntohs(ep->auth_chunk_list->param_hdr.length));
288 
289 	/* Get the AUTH random number for this association */
290 	p = (struct sctp_paramhdr *)asoc->c.auth_random;
291 	p->type = SCTP_PARAM_RANDOM;
292 	p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
293 	get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
294 
295 	return asoc;
296 
297 stream_free:
298 	sctp_stream_free(&asoc->stream);
299 fail_init:
300 	sock_put(asoc->base.sk);
301 	sctp_endpoint_put(asoc->ep);
302 	return NULL;
303 }
304 
305 /* Allocate and initialize a new association */
306 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
307 					      const struct sock *sk,
308 					      enum sctp_scope scope, gfp_t gfp)
309 {
310 	struct sctp_association *asoc;
311 
312 	asoc = kzalloc(sizeof(*asoc), gfp);
313 	if (!asoc)
314 		goto fail;
315 
316 	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
317 		goto fail_init;
318 
319 	SCTP_DBG_OBJCNT_INC(assoc);
320 
321 	pr_debug("Created asoc %p\n", asoc);
322 
323 	return asoc;
324 
325 fail_init:
326 	kfree(asoc);
327 fail:
328 	return NULL;
329 }
330 
331 /* Free this association if possible.  There may still be users, so
332  * the actual deallocation may be delayed.
333  */
334 void sctp_association_free(struct sctp_association *asoc)
335 {
336 	struct sock *sk = asoc->base.sk;
337 	struct sctp_transport *transport;
338 	struct list_head *pos, *temp;
339 	int i;
340 
341 	/* Only real associations count against the endpoint, so
342 	 * don't bother for if this is a temporary association.
343 	 */
344 	if (!list_empty(&asoc->asocs)) {
345 		list_del(&asoc->asocs);
346 
347 		/* Decrement the backlog value for a TCP-style listening
348 		 * socket.
349 		 */
350 		if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
351 			sk->sk_ack_backlog--;
352 	}
353 
354 	/* Mark as dead, so other users can know this structure is
355 	 * going away.
356 	 */
357 	asoc->base.dead = true;
358 
359 	/* Dispose of any data lying around in the outqueue. */
360 	sctp_outq_free(&asoc->outqueue);
361 
362 	/* Dispose of any pending messages for the upper layer. */
363 	sctp_ulpq_free(&asoc->ulpq);
364 
365 	/* Dispose of any pending chunks on the inqueue. */
366 	sctp_inq_free(&asoc->base.inqueue);
367 
368 	sctp_tsnmap_free(&asoc->peer.tsn_map);
369 
370 	/* Free stream information. */
371 	sctp_stream_free(&asoc->stream);
372 
373 	if (asoc->strreset_chunk)
374 		sctp_chunk_free(asoc->strreset_chunk);
375 
376 	/* Clean up the bound address list. */
377 	sctp_bind_addr_free(&asoc->base.bind_addr);
378 
379 	/* Do we need to go through all of our timers and
380 	 * delete them?   To be safe we will try to delete all, but we
381 	 * should be able to go through and make a guess based
382 	 * on our state.
383 	 */
384 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
385 		if (del_timer(&asoc->timers[i]))
386 			sctp_association_put(asoc);
387 	}
388 
389 	/* Free peer's cached cookie. */
390 	kfree(asoc->peer.cookie);
391 	kfree(asoc->peer.peer_random);
392 	kfree(asoc->peer.peer_chunks);
393 	kfree(asoc->peer.peer_hmacs);
394 
395 	/* Release the transport structures. */
396 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
397 		transport = list_entry(pos, struct sctp_transport, transports);
398 		list_del_rcu(pos);
399 		sctp_unhash_transport(transport);
400 		sctp_transport_free(transport);
401 	}
402 
403 	asoc->peer.transport_count = 0;
404 
405 	sctp_asconf_queue_teardown(asoc);
406 
407 	/* Free pending address space being deleted */
408 	kfree(asoc->asconf_addr_del_pending);
409 
410 	/* AUTH - Free the endpoint shared keys */
411 	sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
412 
413 	/* AUTH - Free the association shared key */
414 	sctp_auth_key_put(asoc->asoc_shared_key);
415 
416 	sctp_association_put(asoc);
417 }
418 
419 /* Cleanup and free up an association. */
420 static void sctp_association_destroy(struct sctp_association *asoc)
421 {
422 	if (unlikely(!asoc->base.dead)) {
423 		WARN(1, "Attempt to destroy undead association %p!\n", asoc);
424 		return;
425 	}
426 
427 	sctp_endpoint_put(asoc->ep);
428 	sock_put(asoc->base.sk);
429 
430 	if (asoc->assoc_id != 0) {
431 		spin_lock_bh(&sctp_assocs_id_lock);
432 		idr_remove(&sctp_assocs_id, asoc->assoc_id);
433 		spin_unlock_bh(&sctp_assocs_id_lock);
434 	}
435 
436 	WARN_ON(atomic_read(&asoc->rmem_alloc));
437 
438 	kfree_rcu(asoc, rcu);
439 	SCTP_DBG_OBJCNT_DEC(assoc);
440 }
441 
442 /* Change the primary destination address for the peer. */
443 void sctp_assoc_set_primary(struct sctp_association *asoc,
444 			    struct sctp_transport *transport)
445 {
446 	int changeover = 0;
447 
448 	/* it's a changeover only if we already have a primary path
449 	 * that we are changing
450 	 */
451 	if (asoc->peer.primary_path != NULL &&
452 	    asoc->peer.primary_path != transport)
453 		changeover = 1 ;
454 
455 	asoc->peer.primary_path = transport;
456 
457 	/* Set a default msg_name for events. */
458 	memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
459 	       sizeof(union sctp_addr));
460 
461 	/* If the primary path is changing, assume that the
462 	 * user wants to use this new path.
463 	 */
464 	if ((transport->state == SCTP_ACTIVE) ||
465 	    (transport->state == SCTP_UNKNOWN))
466 		asoc->peer.active_path = transport;
467 
468 	/*
469 	 * SFR-CACC algorithm:
470 	 * Upon the receipt of a request to change the primary
471 	 * destination address, on the data structure for the new
472 	 * primary destination, the sender MUST do the following:
473 	 *
474 	 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
475 	 * to this destination address earlier. The sender MUST set
476 	 * CYCLING_CHANGEOVER to indicate that this switch is a
477 	 * double switch to the same destination address.
478 	 *
479 	 * Really, only bother is we have data queued or outstanding on
480 	 * the association.
481 	 */
482 	if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
483 		return;
484 
485 	if (transport->cacc.changeover_active)
486 		transport->cacc.cycling_changeover = changeover;
487 
488 	/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
489 	 * a changeover has occurred.
490 	 */
491 	transport->cacc.changeover_active = changeover;
492 
493 	/* 3) The sender MUST store the next TSN to be sent in
494 	 * next_tsn_at_change.
495 	 */
496 	transport->cacc.next_tsn_at_change = asoc->next_tsn;
497 }
498 
499 /* Remove a transport from an association.  */
500 void sctp_assoc_rm_peer(struct sctp_association *asoc,
501 			struct sctp_transport *peer)
502 {
503 	struct sctp_transport *transport;
504 	struct list_head *pos;
505 	struct sctp_chunk *ch;
506 
507 	pr_debug("%s: association:%p addr:%pISpc\n",
508 		 __func__, asoc, &peer->ipaddr.sa);
509 
510 	/* If we are to remove the current retran_path, update it
511 	 * to the next peer before removing this peer from the list.
512 	 */
513 	if (asoc->peer.retran_path == peer)
514 		sctp_assoc_update_retran_path(asoc);
515 
516 	/* Remove this peer from the list. */
517 	list_del_rcu(&peer->transports);
518 	/* Remove this peer from the transport hashtable */
519 	sctp_unhash_transport(peer);
520 
521 	/* Get the first transport of asoc. */
522 	pos = asoc->peer.transport_addr_list.next;
523 	transport = list_entry(pos, struct sctp_transport, transports);
524 
525 	/* Update any entries that match the peer to be deleted. */
526 	if (asoc->peer.primary_path == peer)
527 		sctp_assoc_set_primary(asoc, transport);
528 	if (asoc->peer.active_path == peer)
529 		asoc->peer.active_path = transport;
530 	if (asoc->peer.retran_path == peer)
531 		asoc->peer.retran_path = transport;
532 	if (asoc->peer.last_data_from == peer)
533 		asoc->peer.last_data_from = transport;
534 
535 	if (asoc->strreset_chunk &&
536 	    asoc->strreset_chunk->transport == peer) {
537 		asoc->strreset_chunk->transport = transport;
538 		sctp_transport_reset_reconf_timer(transport);
539 	}
540 
541 	/* If we remove the transport an INIT was last sent to, set it to
542 	 * NULL. Combined with the update of the retran path above, this
543 	 * will cause the next INIT to be sent to the next available
544 	 * transport, maintaining the cycle.
545 	 */
546 	if (asoc->init_last_sent_to == peer)
547 		asoc->init_last_sent_to = NULL;
548 
549 	/* If we remove the transport an SHUTDOWN was last sent to, set it
550 	 * to NULL. Combined with the update of the retran path above, this
551 	 * will cause the next SHUTDOWN to be sent to the next available
552 	 * transport, maintaining the cycle.
553 	 */
554 	if (asoc->shutdown_last_sent_to == peer)
555 		asoc->shutdown_last_sent_to = NULL;
556 
557 	/* If we remove the transport an ASCONF was last sent to, set it to
558 	 * NULL.
559 	 */
560 	if (asoc->addip_last_asconf &&
561 	    asoc->addip_last_asconf->transport == peer)
562 		asoc->addip_last_asconf->transport = NULL;
563 
564 	/* If we have something on the transmitted list, we have to
565 	 * save it off.  The best place is the active path.
566 	 */
567 	if (!list_empty(&peer->transmitted)) {
568 		struct sctp_transport *active = asoc->peer.active_path;
569 
570 		/* Reset the transport of each chunk on this list */
571 		list_for_each_entry(ch, &peer->transmitted,
572 					transmitted_list) {
573 			ch->transport = NULL;
574 			ch->rtt_in_progress = 0;
575 		}
576 
577 		list_splice_tail_init(&peer->transmitted,
578 					&active->transmitted);
579 
580 		/* Start a T3 timer here in case it wasn't running so
581 		 * that these migrated packets have a chance to get
582 		 * retransmitted.
583 		 */
584 		if (!timer_pending(&active->T3_rtx_timer))
585 			if (!mod_timer(&active->T3_rtx_timer,
586 					jiffies + active->rto))
587 				sctp_transport_hold(active);
588 	}
589 
590 	list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
591 		if (ch->transport == peer)
592 			ch->transport = NULL;
593 
594 	asoc->peer.transport_count--;
595 
596 	sctp_transport_free(peer);
597 }
598 
599 /* Add a transport address to an association.  */
600 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
601 					   const union sctp_addr *addr,
602 					   const gfp_t gfp,
603 					   const int peer_state)
604 {
605 	struct net *net = sock_net(asoc->base.sk);
606 	struct sctp_transport *peer;
607 	struct sctp_sock *sp;
608 	unsigned short port;
609 
610 	sp = sctp_sk(asoc->base.sk);
611 
612 	/* AF_INET and AF_INET6 share common port field. */
613 	port = ntohs(addr->v4.sin_port);
614 
615 	pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
616 		 asoc, &addr->sa, peer_state);
617 
618 	/* Set the port if it has not been set yet.  */
619 	if (0 == asoc->peer.port)
620 		asoc->peer.port = port;
621 
622 	/* Check to see if this is a duplicate. */
623 	peer = sctp_assoc_lookup_paddr(asoc, addr);
624 	if (peer) {
625 		/* An UNKNOWN state is only set on transports added by
626 		 * user in sctp_connectx() call.  Such transports should be
627 		 * considered CONFIRMED per RFC 4960, Section 5.4.
628 		 */
629 		if (peer->state == SCTP_UNKNOWN) {
630 			peer->state = SCTP_ACTIVE;
631 		}
632 		return peer;
633 	}
634 
635 	peer = sctp_transport_new(net, addr, gfp);
636 	if (!peer)
637 		return NULL;
638 
639 	sctp_transport_set_owner(peer, asoc);
640 
641 	/* Initialize the peer's heartbeat interval based on the
642 	 * association configured value.
643 	 */
644 	peer->hbinterval = asoc->hbinterval;
645 
646 	/* Set the path max_retrans.  */
647 	peer->pathmaxrxt = asoc->pathmaxrxt;
648 
649 	/* And the partial failure retrans threshold */
650 	peer->pf_retrans = asoc->pf_retrans;
651 
652 	/* Initialize the peer's SACK delay timeout based on the
653 	 * association configured value.
654 	 */
655 	peer->sackdelay = asoc->sackdelay;
656 	peer->sackfreq = asoc->sackfreq;
657 
658 	if (addr->sa.sa_family == AF_INET6) {
659 		__be32 info = addr->v6.sin6_flowinfo;
660 
661 		if (info) {
662 			peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
663 			peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
664 		} else {
665 			peer->flowlabel = asoc->flowlabel;
666 		}
667 	}
668 	peer->dscp = asoc->dscp;
669 
670 	/* Enable/disable heartbeat, SACK delay, and path MTU discovery
671 	 * based on association setting.
672 	 */
673 	peer->param_flags = asoc->param_flags;
674 
675 	/* Initialize the pmtu of the transport. */
676 	sctp_transport_route(peer, NULL, sp);
677 
678 	/* If this is the first transport addr on this association,
679 	 * initialize the association PMTU to the peer's PMTU.
680 	 * If not and the current association PMTU is higher than the new
681 	 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
682 	 */
683 	sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
684 				  min_t(int, peer->pathmtu, asoc->pathmtu) :
685 				  peer->pathmtu);
686 
687 	peer->pmtu_pending = 0;
688 
689 	/* The asoc->peer.port might not be meaningful yet, but
690 	 * initialize the packet structure anyway.
691 	 */
692 	sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
693 			 asoc->peer.port);
694 
695 	/* 7.2.1 Slow-Start
696 	 *
697 	 * o The initial cwnd before DATA transmission or after a sufficiently
698 	 *   long idle period MUST be set to
699 	 *      min(4*MTU, max(2*MTU, 4380 bytes))
700 	 *
701 	 * o The initial value of ssthresh MAY be arbitrarily high
702 	 *   (for example, implementations MAY use the size of the
703 	 *   receiver advertised window).
704 	 */
705 	peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
706 
707 	/* At this point, we may not have the receiver's advertised window,
708 	 * so initialize ssthresh to the default value and it will be set
709 	 * later when we process the INIT.
710 	 */
711 	peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
712 
713 	peer->partial_bytes_acked = 0;
714 	peer->flight_size = 0;
715 	peer->burst_limited = 0;
716 
717 	/* Set the transport's RTO.initial value */
718 	peer->rto = asoc->rto_initial;
719 	sctp_max_rto(asoc, peer);
720 
721 	/* Set the peer's active state. */
722 	peer->state = peer_state;
723 
724 	/* Add this peer into the transport hashtable */
725 	if (sctp_hash_transport(peer)) {
726 		sctp_transport_free(peer);
727 		return NULL;
728 	}
729 
730 	/* Attach the remote transport to our asoc.  */
731 	list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
732 	asoc->peer.transport_count++;
733 
734 	/* If we do not yet have a primary path, set one.  */
735 	if (!asoc->peer.primary_path) {
736 		sctp_assoc_set_primary(asoc, peer);
737 		asoc->peer.retran_path = peer;
738 	}
739 
740 	if (asoc->peer.active_path == asoc->peer.retran_path &&
741 	    peer->state != SCTP_UNCONFIRMED) {
742 		asoc->peer.retran_path = peer;
743 	}
744 
745 	return peer;
746 }
747 
748 /* Delete a transport address from an association.  */
749 void sctp_assoc_del_peer(struct sctp_association *asoc,
750 			 const union sctp_addr *addr)
751 {
752 	struct list_head	*pos;
753 	struct list_head	*temp;
754 	struct sctp_transport	*transport;
755 
756 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
757 		transport = list_entry(pos, struct sctp_transport, transports);
758 		if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
759 			/* Do book keeping for removing the peer and free it. */
760 			sctp_assoc_rm_peer(asoc, transport);
761 			break;
762 		}
763 	}
764 }
765 
766 /* Lookup a transport by address. */
767 struct sctp_transport *sctp_assoc_lookup_paddr(
768 					const struct sctp_association *asoc,
769 					const union sctp_addr *address)
770 {
771 	struct sctp_transport *t;
772 
773 	/* Cycle through all transports searching for a peer address. */
774 
775 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
776 			transports) {
777 		if (sctp_cmp_addr_exact(address, &t->ipaddr))
778 			return t;
779 	}
780 
781 	return NULL;
782 }
783 
784 /* Remove all transports except a give one */
785 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
786 				     struct sctp_transport *primary)
787 {
788 	struct sctp_transport	*temp;
789 	struct sctp_transport	*t;
790 
791 	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
792 				 transports) {
793 		/* if the current transport is not the primary one, delete it */
794 		if (t != primary)
795 			sctp_assoc_rm_peer(asoc, t);
796 	}
797 }
798 
799 /* Engage in transport control operations.
800  * Mark the transport up or down and send a notification to the user.
801  * Select and update the new active and retran paths.
802  */
803 void sctp_assoc_control_transport(struct sctp_association *asoc,
804 				  struct sctp_transport *transport,
805 				  enum sctp_transport_cmd command,
806 				  sctp_sn_error_t error)
807 {
808 	struct sctp_ulpevent *event;
809 	struct sockaddr_storage addr;
810 	int spc_state = 0;
811 	bool ulp_notify = true;
812 
813 	/* Record the transition on the transport.  */
814 	switch (command) {
815 	case SCTP_TRANSPORT_UP:
816 		/* If we are moving from UNCONFIRMED state due
817 		 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
818 		 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
819 		 */
820 		if (SCTP_UNCONFIRMED == transport->state &&
821 		    SCTP_HEARTBEAT_SUCCESS == error)
822 			spc_state = SCTP_ADDR_CONFIRMED;
823 		else
824 			spc_state = SCTP_ADDR_AVAILABLE;
825 		/* Don't inform ULP about transition from PF to
826 		 * active state and set cwnd to 1 MTU, see SCTP
827 		 * Quick failover draft section 5.1, point 5
828 		 */
829 		if (transport->state == SCTP_PF) {
830 			ulp_notify = false;
831 			transport->cwnd = asoc->pathmtu;
832 		}
833 		transport->state = SCTP_ACTIVE;
834 		break;
835 
836 	case SCTP_TRANSPORT_DOWN:
837 		/* If the transport was never confirmed, do not transition it
838 		 * to inactive state.  Also, release the cached route since
839 		 * there may be a better route next time.
840 		 */
841 		if (transport->state != SCTP_UNCONFIRMED)
842 			transport->state = SCTP_INACTIVE;
843 		else {
844 			sctp_transport_dst_release(transport);
845 			ulp_notify = false;
846 		}
847 
848 		spc_state = SCTP_ADDR_UNREACHABLE;
849 		break;
850 
851 	case SCTP_TRANSPORT_PF:
852 		transport->state = SCTP_PF;
853 		ulp_notify = false;
854 		break;
855 
856 	default:
857 		return;
858 	}
859 
860 	/* Generate and send a SCTP_PEER_ADDR_CHANGE notification
861 	 * to the user.
862 	 */
863 	if (ulp_notify) {
864 		memset(&addr, 0, sizeof(struct sockaddr_storage));
865 		memcpy(&addr, &transport->ipaddr,
866 		       transport->af_specific->sockaddr_len);
867 
868 		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
869 					0, spc_state, error, GFP_ATOMIC);
870 		if (event)
871 			asoc->stream.si->enqueue_event(&asoc->ulpq, event);
872 	}
873 
874 	/* Select new active and retran paths. */
875 	sctp_select_active_and_retran_path(asoc);
876 }
877 
878 /* Hold a reference to an association. */
879 void sctp_association_hold(struct sctp_association *asoc)
880 {
881 	refcount_inc(&asoc->base.refcnt);
882 }
883 
884 /* Release a reference to an association and cleanup
885  * if there are no more references.
886  */
887 void sctp_association_put(struct sctp_association *asoc)
888 {
889 	if (refcount_dec_and_test(&asoc->base.refcnt))
890 		sctp_association_destroy(asoc);
891 }
892 
893 /* Allocate the next TSN, Transmission Sequence Number, for the given
894  * association.
895  */
896 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
897 {
898 	/* From Section 1.6 Serial Number Arithmetic:
899 	 * Transmission Sequence Numbers wrap around when they reach
900 	 * 2**32 - 1.  That is, the next TSN a DATA chunk MUST use
901 	 * after transmitting TSN = 2*32 - 1 is TSN = 0.
902 	 */
903 	__u32 retval = asoc->next_tsn;
904 	asoc->next_tsn++;
905 	asoc->unack_data++;
906 
907 	return retval;
908 }
909 
910 /* Compare two addresses to see if they match.  Wildcard addresses
911  * only match themselves.
912  */
913 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
914 			const union sctp_addr *ss2)
915 {
916 	struct sctp_af *af;
917 
918 	af = sctp_get_af_specific(ss1->sa.sa_family);
919 	if (unlikely(!af))
920 		return 0;
921 
922 	return af->cmp_addr(ss1, ss2);
923 }
924 
925 /* Return an ecne chunk to get prepended to a packet.
926  * Note:  We are sly and return a shared, prealloced chunk.  FIXME:
927  * No we don't, but we could/should.
928  */
929 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
930 {
931 	if (!asoc->need_ecne)
932 		return NULL;
933 
934 	/* Send ECNE if needed.
935 	 * Not being able to allocate a chunk here is not deadly.
936 	 */
937 	return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
938 }
939 
940 /*
941  * Find which transport this TSN was sent on.
942  */
943 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
944 					     __u32 tsn)
945 {
946 	struct sctp_transport *active;
947 	struct sctp_transport *match;
948 	struct sctp_transport *transport;
949 	struct sctp_chunk *chunk;
950 	__be32 key = htonl(tsn);
951 
952 	match = NULL;
953 
954 	/*
955 	 * FIXME: In general, find a more efficient data structure for
956 	 * searching.
957 	 */
958 
959 	/*
960 	 * The general strategy is to search each transport's transmitted
961 	 * list.   Return which transport this TSN lives on.
962 	 *
963 	 * Let's be hopeful and check the active_path first.
964 	 * Another optimization would be to know if there is only one
965 	 * outbound path and not have to look for the TSN at all.
966 	 *
967 	 */
968 
969 	active = asoc->peer.active_path;
970 
971 	list_for_each_entry(chunk, &active->transmitted,
972 			transmitted_list) {
973 
974 		if (key == chunk->subh.data_hdr->tsn) {
975 			match = active;
976 			goto out;
977 		}
978 	}
979 
980 	/* If not found, go search all the other transports. */
981 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
982 			transports) {
983 
984 		if (transport == active)
985 			continue;
986 		list_for_each_entry(chunk, &transport->transmitted,
987 				transmitted_list) {
988 			if (key == chunk->subh.data_hdr->tsn) {
989 				match = transport;
990 				goto out;
991 			}
992 		}
993 	}
994 out:
995 	return match;
996 }
997 
998 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
999 static void sctp_assoc_bh_rcv(struct work_struct *work)
1000 {
1001 	struct sctp_association *asoc =
1002 		container_of(work, struct sctp_association,
1003 			     base.inqueue.immediate);
1004 	struct net *net = sock_net(asoc->base.sk);
1005 	union sctp_subtype subtype;
1006 	struct sctp_endpoint *ep;
1007 	struct sctp_chunk *chunk;
1008 	struct sctp_inq *inqueue;
1009 	int first_time = 1;	/* is this the first time through the loop */
1010 	int error = 0;
1011 	int state;
1012 
1013 	/* The association should be held so we should be safe. */
1014 	ep = asoc->ep;
1015 
1016 	inqueue = &asoc->base.inqueue;
1017 	sctp_association_hold(asoc);
1018 	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1019 		state = asoc->state;
1020 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1021 
1022 		/* If the first chunk in the packet is AUTH, do special
1023 		 * processing specified in Section 6.3 of SCTP-AUTH spec
1024 		 */
1025 		if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1026 			struct sctp_chunkhdr *next_hdr;
1027 
1028 			next_hdr = sctp_inq_peek(inqueue);
1029 			if (!next_hdr)
1030 				goto normal;
1031 
1032 			/* If the next chunk is COOKIE-ECHO, skip the AUTH
1033 			 * chunk while saving a pointer to it so we can do
1034 			 * Authentication later (during cookie-echo
1035 			 * processing).
1036 			 */
1037 			if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1038 				chunk->auth_chunk = skb_clone(chunk->skb,
1039 							      GFP_ATOMIC);
1040 				chunk->auth = 1;
1041 				continue;
1042 			}
1043 		}
1044 
1045 normal:
1046 		/* SCTP-AUTH, Section 6.3:
1047 		 *    The receiver has a list of chunk types which it expects
1048 		 *    to be received only after an AUTH-chunk.  This list has
1049 		 *    been sent to the peer during the association setup.  It
1050 		 *    MUST silently discard these chunks if they are not placed
1051 		 *    after an AUTH chunk in the packet.
1052 		 */
1053 		if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1054 			continue;
1055 
1056 		/* Remember where the last DATA chunk came from so we
1057 		 * know where to send the SACK.
1058 		 */
1059 		if (sctp_chunk_is_data(chunk))
1060 			asoc->peer.last_data_from = chunk->transport;
1061 		else {
1062 			SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1063 			asoc->stats.ictrlchunks++;
1064 			if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1065 				asoc->stats.isacks++;
1066 		}
1067 
1068 		if (chunk->transport)
1069 			chunk->transport->last_time_heard = ktime_get();
1070 
1071 		/* Run through the state machine. */
1072 		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1073 				   state, ep, asoc, chunk, GFP_ATOMIC);
1074 
1075 		/* Check to see if the association is freed in response to
1076 		 * the incoming chunk.  If so, get out of the while loop.
1077 		 */
1078 		if (asoc->base.dead)
1079 			break;
1080 
1081 		/* If there is an error on chunk, discard this packet. */
1082 		if (error && chunk)
1083 			chunk->pdiscard = 1;
1084 
1085 		if (first_time)
1086 			first_time = 0;
1087 	}
1088 	sctp_association_put(asoc);
1089 }
1090 
1091 /* This routine moves an association from its old sk to a new sk.  */
1092 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1093 {
1094 	struct sctp_sock *newsp = sctp_sk(newsk);
1095 	struct sock *oldsk = assoc->base.sk;
1096 
1097 	/* Delete the association from the old endpoint's list of
1098 	 * associations.
1099 	 */
1100 	list_del_init(&assoc->asocs);
1101 
1102 	/* Decrement the backlog value for a TCP-style socket. */
1103 	if (sctp_style(oldsk, TCP))
1104 		oldsk->sk_ack_backlog--;
1105 
1106 	/* Release references to the old endpoint and the sock.  */
1107 	sctp_endpoint_put(assoc->ep);
1108 	sock_put(assoc->base.sk);
1109 
1110 	/* Get a reference to the new endpoint.  */
1111 	assoc->ep = newsp->ep;
1112 	sctp_endpoint_hold(assoc->ep);
1113 
1114 	/* Get a reference to the new sock.  */
1115 	assoc->base.sk = newsk;
1116 	sock_hold(assoc->base.sk);
1117 
1118 	/* Add the association to the new endpoint's list of associations.  */
1119 	sctp_endpoint_add_asoc(newsp->ep, assoc);
1120 }
1121 
1122 /* Update an association (possibly from unexpected COOKIE-ECHO processing).  */
1123 int sctp_assoc_update(struct sctp_association *asoc,
1124 		      struct sctp_association *new)
1125 {
1126 	struct sctp_transport *trans;
1127 	struct list_head *pos, *temp;
1128 
1129 	/* Copy in new parameters of peer. */
1130 	asoc->c = new->c;
1131 	asoc->peer.rwnd = new->peer.rwnd;
1132 	asoc->peer.sack_needed = new->peer.sack_needed;
1133 	asoc->peer.auth_capable = new->peer.auth_capable;
1134 	asoc->peer.i = new->peer.i;
1135 
1136 	if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1137 			      asoc->peer.i.initial_tsn, GFP_ATOMIC))
1138 		return -ENOMEM;
1139 
1140 	/* Remove any peer addresses not present in the new association. */
1141 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1142 		trans = list_entry(pos, struct sctp_transport, transports);
1143 		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1144 			sctp_assoc_rm_peer(asoc, trans);
1145 			continue;
1146 		}
1147 
1148 		if (asoc->state >= SCTP_STATE_ESTABLISHED)
1149 			sctp_transport_reset(trans);
1150 	}
1151 
1152 	/* If the case is A (association restart), use
1153 	 * initial_tsn as next_tsn. If the case is B, use
1154 	 * current next_tsn in case data sent to peer
1155 	 * has been discarded and needs retransmission.
1156 	 */
1157 	if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1158 		asoc->next_tsn = new->next_tsn;
1159 		asoc->ctsn_ack_point = new->ctsn_ack_point;
1160 		asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1161 
1162 		/* Reinitialize SSN for both local streams
1163 		 * and peer's streams.
1164 		 */
1165 		sctp_stream_clear(&asoc->stream);
1166 
1167 		/* Flush the ULP reassembly and ordered queue.
1168 		 * Any data there will now be stale and will
1169 		 * cause problems.
1170 		 */
1171 		sctp_ulpq_flush(&asoc->ulpq);
1172 
1173 		/* reset the overall association error count so
1174 		 * that the restarted association doesn't get torn
1175 		 * down on the next retransmission timer.
1176 		 */
1177 		asoc->overall_error_count = 0;
1178 
1179 	} else {
1180 		/* Add any peer addresses from the new association. */
1181 		list_for_each_entry(trans, &new->peer.transport_addr_list,
1182 				    transports)
1183 			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
1184 			    !sctp_assoc_add_peer(asoc, &trans->ipaddr,
1185 						 GFP_ATOMIC, trans->state))
1186 				return -ENOMEM;
1187 
1188 		asoc->ctsn_ack_point = asoc->next_tsn - 1;
1189 		asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1190 
1191 		if (sctp_state(asoc, COOKIE_WAIT))
1192 			sctp_stream_update(&asoc->stream, &new->stream);
1193 
1194 		/* get a new assoc id if we don't have one yet. */
1195 		if (sctp_assoc_set_id(asoc, GFP_ATOMIC))
1196 			return -ENOMEM;
1197 	}
1198 
1199 	/* SCTP-AUTH: Save the peer parameters from the new associations
1200 	 * and also move the association shared keys over
1201 	 */
1202 	kfree(asoc->peer.peer_random);
1203 	asoc->peer.peer_random = new->peer.peer_random;
1204 	new->peer.peer_random = NULL;
1205 
1206 	kfree(asoc->peer.peer_chunks);
1207 	asoc->peer.peer_chunks = new->peer.peer_chunks;
1208 	new->peer.peer_chunks = NULL;
1209 
1210 	kfree(asoc->peer.peer_hmacs);
1211 	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1212 	new->peer.peer_hmacs = NULL;
1213 
1214 	return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1215 }
1216 
1217 /* Update the retran path for sending a retransmitted packet.
1218  * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1219  *
1220  *   When there is outbound data to send and the primary path
1221  *   becomes inactive (e.g., due to failures), or where the
1222  *   SCTP user explicitly requests to send data to an
1223  *   inactive destination transport address, before reporting
1224  *   an error to its ULP, the SCTP endpoint should try to send
1225  *   the data to an alternate active destination transport
1226  *   address if one exists.
1227  *
1228  *   When retransmitting data that timed out, if the endpoint
1229  *   is multihomed, it should consider each source-destination
1230  *   address pair in its retransmission selection policy.
1231  *   When retransmitting timed-out data, the endpoint should
1232  *   attempt to pick the most divergent source-destination
1233  *   pair from the original source-destination pair to which
1234  *   the packet was transmitted.
1235  *
1236  *   Note: Rules for picking the most divergent source-destination
1237  *   pair are an implementation decision and are not specified
1238  *   within this document.
1239  *
1240  * Our basic strategy is to round-robin transports in priorities
1241  * according to sctp_trans_score() e.g., if no such
1242  * transport with state SCTP_ACTIVE exists, round-robin through
1243  * SCTP_UNKNOWN, etc. You get the picture.
1244  */
1245 static u8 sctp_trans_score(const struct sctp_transport *trans)
1246 {
1247 	switch (trans->state) {
1248 	case SCTP_ACTIVE:
1249 		return 3;	/* best case */
1250 	case SCTP_UNKNOWN:
1251 		return 2;
1252 	case SCTP_PF:
1253 		return 1;
1254 	default: /* case SCTP_INACTIVE */
1255 		return 0;	/* worst case */
1256 	}
1257 }
1258 
1259 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1260 						   struct sctp_transport *trans2)
1261 {
1262 	if (trans1->error_count > trans2->error_count) {
1263 		return trans2;
1264 	} else if (trans1->error_count == trans2->error_count &&
1265 		   ktime_after(trans2->last_time_heard,
1266 			       trans1->last_time_heard)) {
1267 		return trans2;
1268 	} else {
1269 		return trans1;
1270 	}
1271 }
1272 
1273 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1274 						    struct sctp_transport *best)
1275 {
1276 	u8 score_curr, score_best;
1277 
1278 	if (best == NULL || curr == best)
1279 		return curr;
1280 
1281 	score_curr = sctp_trans_score(curr);
1282 	score_best = sctp_trans_score(best);
1283 
1284 	/* First, try a score-based selection if both transport states
1285 	 * differ. If we're in a tie, lets try to make a more clever
1286 	 * decision here based on error counts and last time heard.
1287 	 */
1288 	if (score_curr > score_best)
1289 		return curr;
1290 	else if (score_curr == score_best)
1291 		return sctp_trans_elect_tie(best, curr);
1292 	else
1293 		return best;
1294 }
1295 
1296 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1297 {
1298 	struct sctp_transport *trans = asoc->peer.retran_path;
1299 	struct sctp_transport *trans_next = NULL;
1300 
1301 	/* We're done as we only have the one and only path. */
1302 	if (asoc->peer.transport_count == 1)
1303 		return;
1304 	/* If active_path and retran_path are the same and active,
1305 	 * then this is the only active path. Use it.
1306 	 */
1307 	if (asoc->peer.active_path == asoc->peer.retran_path &&
1308 	    asoc->peer.active_path->state == SCTP_ACTIVE)
1309 		return;
1310 
1311 	/* Iterate from retran_path's successor back to retran_path. */
1312 	for (trans = list_next_entry(trans, transports); 1;
1313 	     trans = list_next_entry(trans, transports)) {
1314 		/* Manually skip the head element. */
1315 		if (&trans->transports == &asoc->peer.transport_addr_list)
1316 			continue;
1317 		if (trans->state == SCTP_UNCONFIRMED)
1318 			continue;
1319 		trans_next = sctp_trans_elect_best(trans, trans_next);
1320 		/* Active is good enough for immediate return. */
1321 		if (trans_next->state == SCTP_ACTIVE)
1322 			break;
1323 		/* We've reached the end, time to update path. */
1324 		if (trans == asoc->peer.retran_path)
1325 			break;
1326 	}
1327 
1328 	asoc->peer.retran_path = trans_next;
1329 
1330 	pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1331 		 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1332 }
1333 
1334 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1335 {
1336 	struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1337 	struct sctp_transport *trans_pf = NULL;
1338 
1339 	/* Look for the two most recently used active transports. */
1340 	list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1341 			    transports) {
1342 		/* Skip uninteresting transports. */
1343 		if (trans->state == SCTP_INACTIVE ||
1344 		    trans->state == SCTP_UNCONFIRMED)
1345 			continue;
1346 		/* Keep track of the best PF transport from our
1347 		 * list in case we don't find an active one.
1348 		 */
1349 		if (trans->state == SCTP_PF) {
1350 			trans_pf = sctp_trans_elect_best(trans, trans_pf);
1351 			continue;
1352 		}
1353 		/* For active transports, pick the most recent ones. */
1354 		if (trans_pri == NULL ||
1355 		    ktime_after(trans->last_time_heard,
1356 				trans_pri->last_time_heard)) {
1357 			trans_sec = trans_pri;
1358 			trans_pri = trans;
1359 		} else if (trans_sec == NULL ||
1360 			   ktime_after(trans->last_time_heard,
1361 				       trans_sec->last_time_heard)) {
1362 			trans_sec = trans;
1363 		}
1364 	}
1365 
1366 	/* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1367 	 *
1368 	 * By default, an endpoint should always transmit to the primary
1369 	 * path, unless the SCTP user explicitly specifies the
1370 	 * destination transport address (and possibly source transport
1371 	 * address) to use. [If the primary is active but not most recent,
1372 	 * bump the most recently used transport.]
1373 	 */
1374 	if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1375 	     asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1376 	     asoc->peer.primary_path != trans_pri) {
1377 		trans_sec = trans_pri;
1378 		trans_pri = asoc->peer.primary_path;
1379 	}
1380 
1381 	/* We did not find anything useful for a possible retransmission
1382 	 * path; either primary path that we found is the the same as
1383 	 * the current one, or we didn't generally find an active one.
1384 	 */
1385 	if (trans_sec == NULL)
1386 		trans_sec = trans_pri;
1387 
1388 	/* If we failed to find a usable transport, just camp on the
1389 	 * active or pick a PF iff it's the better choice.
1390 	 */
1391 	if (trans_pri == NULL) {
1392 		trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1393 		trans_sec = trans_pri;
1394 	}
1395 
1396 	/* Set the active and retran transports. */
1397 	asoc->peer.active_path = trans_pri;
1398 	asoc->peer.retran_path = trans_sec;
1399 }
1400 
1401 struct sctp_transport *
1402 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1403 				  struct sctp_transport *last_sent_to)
1404 {
1405 	/* If this is the first time packet is sent, use the active path,
1406 	 * else use the retran path. If the last packet was sent over the
1407 	 * retran path, update the retran path and use it.
1408 	 */
1409 	if (last_sent_to == NULL) {
1410 		return asoc->peer.active_path;
1411 	} else {
1412 		if (last_sent_to == asoc->peer.retran_path)
1413 			sctp_assoc_update_retran_path(asoc);
1414 
1415 		return asoc->peer.retran_path;
1416 	}
1417 }
1418 
1419 void sctp_assoc_update_frag_point(struct sctp_association *asoc)
1420 {
1421 	int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
1422 				    sctp_datachk_len(&asoc->stream));
1423 
1424 	if (asoc->user_frag)
1425 		frag = min_t(int, frag, asoc->user_frag);
1426 
1427 	frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
1428 				sctp_datachk_len(&asoc->stream));
1429 
1430 	asoc->frag_point = SCTP_TRUNC4(frag);
1431 }
1432 
1433 void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
1434 {
1435 	if (asoc->pathmtu != pmtu) {
1436 		asoc->pathmtu = pmtu;
1437 		sctp_assoc_update_frag_point(asoc);
1438 	}
1439 
1440 	pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1441 		 asoc->pathmtu, asoc->frag_point);
1442 }
1443 
1444 /* Update the association's pmtu and frag_point by going through all the
1445  * transports. This routine is called when a transport's PMTU has changed.
1446  */
1447 void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1448 {
1449 	struct sctp_transport *t;
1450 	__u32 pmtu = 0;
1451 
1452 	if (!asoc)
1453 		return;
1454 
1455 	/* Get the lowest pmtu of all the transports. */
1456 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
1457 		if (t->pmtu_pending && t->dst) {
1458 			sctp_transport_update_pmtu(t,
1459 						   atomic_read(&t->mtu_info));
1460 			t->pmtu_pending = 0;
1461 		}
1462 		if (!pmtu || (t->pathmtu < pmtu))
1463 			pmtu = t->pathmtu;
1464 	}
1465 
1466 	sctp_assoc_set_pmtu(asoc, pmtu);
1467 }
1468 
1469 /* Should we send a SACK to update our peer? */
1470 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1471 {
1472 	struct net *net = sock_net(asoc->base.sk);
1473 	switch (asoc->state) {
1474 	case SCTP_STATE_ESTABLISHED:
1475 	case SCTP_STATE_SHUTDOWN_PENDING:
1476 	case SCTP_STATE_SHUTDOWN_RECEIVED:
1477 	case SCTP_STATE_SHUTDOWN_SENT:
1478 		if ((asoc->rwnd > asoc->a_rwnd) &&
1479 		    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1480 			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1481 			   asoc->pathmtu)))
1482 			return true;
1483 		break;
1484 	default:
1485 		break;
1486 	}
1487 	return false;
1488 }
1489 
1490 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1491 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1492 {
1493 	struct sctp_chunk *sack;
1494 	struct timer_list *timer;
1495 
1496 	if (asoc->rwnd_over) {
1497 		if (asoc->rwnd_over >= len) {
1498 			asoc->rwnd_over -= len;
1499 		} else {
1500 			asoc->rwnd += (len - asoc->rwnd_over);
1501 			asoc->rwnd_over = 0;
1502 		}
1503 	} else {
1504 		asoc->rwnd += len;
1505 	}
1506 
1507 	/* If we had window pressure, start recovering it
1508 	 * once our rwnd had reached the accumulated pressure
1509 	 * threshold.  The idea is to recover slowly, but up
1510 	 * to the initial advertised window.
1511 	 */
1512 	if (asoc->rwnd_press) {
1513 		int change = min(asoc->pathmtu, asoc->rwnd_press);
1514 		asoc->rwnd += change;
1515 		asoc->rwnd_press -= change;
1516 	}
1517 
1518 	pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1519 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1520 		 asoc->a_rwnd);
1521 
1522 	/* Send a window update SACK if the rwnd has increased by at least the
1523 	 * minimum of the association's PMTU and half of the receive buffer.
1524 	 * The algorithm used is similar to the one described in
1525 	 * Section 4.2.3.3 of RFC 1122.
1526 	 */
1527 	if (sctp_peer_needs_update(asoc)) {
1528 		asoc->a_rwnd = asoc->rwnd;
1529 
1530 		pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1531 			 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1532 			 asoc->a_rwnd);
1533 
1534 		sack = sctp_make_sack(asoc);
1535 		if (!sack)
1536 			return;
1537 
1538 		asoc->peer.sack_needed = 0;
1539 
1540 		sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1541 
1542 		/* Stop the SACK timer.  */
1543 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1544 		if (del_timer(timer))
1545 			sctp_association_put(asoc);
1546 	}
1547 }
1548 
1549 /* Decrease asoc's rwnd by len. */
1550 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1551 {
1552 	int rx_count;
1553 	int over = 0;
1554 
1555 	if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1556 		pr_debug("%s: association:%p has asoc->rwnd:%u, "
1557 			 "asoc->rwnd_over:%u!\n", __func__, asoc,
1558 			 asoc->rwnd, asoc->rwnd_over);
1559 
1560 	if (asoc->ep->rcvbuf_policy)
1561 		rx_count = atomic_read(&asoc->rmem_alloc);
1562 	else
1563 		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1564 
1565 	/* If we've reached or overflowed our receive buffer, announce
1566 	 * a 0 rwnd if rwnd would still be positive.  Store the
1567 	 * the potential pressure overflow so that the window can be restored
1568 	 * back to original value.
1569 	 */
1570 	if (rx_count >= asoc->base.sk->sk_rcvbuf)
1571 		over = 1;
1572 
1573 	if (asoc->rwnd >= len) {
1574 		asoc->rwnd -= len;
1575 		if (over) {
1576 			asoc->rwnd_press += asoc->rwnd;
1577 			asoc->rwnd = 0;
1578 		}
1579 	} else {
1580 		asoc->rwnd_over += len - asoc->rwnd;
1581 		asoc->rwnd = 0;
1582 	}
1583 
1584 	pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1585 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1586 		 asoc->rwnd_press);
1587 }
1588 
1589 /* Build the bind address list for the association based on info from the
1590  * local endpoint and the remote peer.
1591  */
1592 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1593 				     enum sctp_scope scope, gfp_t gfp)
1594 {
1595 	int flags;
1596 
1597 	/* Use scoping rules to determine the subset of addresses from
1598 	 * the endpoint.
1599 	 */
1600 	flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1601 	if (asoc->peer.ipv4_address)
1602 		flags |= SCTP_ADDR4_PEERSUPP;
1603 	if (asoc->peer.ipv6_address)
1604 		flags |= SCTP_ADDR6_PEERSUPP;
1605 
1606 	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1607 				   &asoc->base.bind_addr,
1608 				   &asoc->ep->base.bind_addr,
1609 				   scope, gfp, flags);
1610 }
1611 
1612 /* Build the association's bind address list from the cookie.  */
1613 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1614 					 struct sctp_cookie *cookie,
1615 					 gfp_t gfp)
1616 {
1617 	int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1618 	int var_size3 = cookie->raw_addr_list_len;
1619 	__u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1620 
1621 	return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1622 				      asoc->ep->base.bind_addr.port, gfp);
1623 }
1624 
1625 /* Lookup laddr in the bind address list of an association. */
1626 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1627 			    const union sctp_addr *laddr)
1628 {
1629 	int found = 0;
1630 
1631 	if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1632 	    sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1633 				 sctp_sk(asoc->base.sk)))
1634 		found = 1;
1635 
1636 	return found;
1637 }
1638 
1639 /* Set an association id for a given association */
1640 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1641 {
1642 	bool preload = gfpflags_allow_blocking(gfp);
1643 	int ret;
1644 
1645 	/* If the id is already assigned, keep it. */
1646 	if (asoc->assoc_id)
1647 		return 0;
1648 
1649 	if (preload)
1650 		idr_preload(gfp);
1651 	spin_lock_bh(&sctp_assocs_id_lock);
1652 	/* 0 is not a valid assoc_id, must be >= 1 */
1653 	ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1654 	spin_unlock_bh(&sctp_assocs_id_lock);
1655 	if (preload)
1656 		idr_preload_end();
1657 	if (ret < 0)
1658 		return ret;
1659 
1660 	asoc->assoc_id = (sctp_assoc_t)ret;
1661 	return 0;
1662 }
1663 
1664 /* Free the ASCONF queue */
1665 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1666 {
1667 	struct sctp_chunk *asconf;
1668 	struct sctp_chunk *tmp;
1669 
1670 	list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1671 		list_del_init(&asconf->list);
1672 		sctp_chunk_free(asconf);
1673 	}
1674 }
1675 
1676 /* Free asconf_ack cache */
1677 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1678 {
1679 	struct sctp_chunk *ack;
1680 	struct sctp_chunk *tmp;
1681 
1682 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1683 				transmitted_list) {
1684 		list_del_init(&ack->transmitted_list);
1685 		sctp_chunk_free(ack);
1686 	}
1687 }
1688 
1689 /* Clean up the ASCONF_ACK queue */
1690 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1691 {
1692 	struct sctp_chunk *ack;
1693 	struct sctp_chunk *tmp;
1694 
1695 	/* We can remove all the entries from the queue up to
1696 	 * the "Peer-Sequence-Number".
1697 	 */
1698 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1699 				transmitted_list) {
1700 		if (ack->subh.addip_hdr->serial ==
1701 				htonl(asoc->peer.addip_serial))
1702 			break;
1703 
1704 		list_del_init(&ack->transmitted_list);
1705 		sctp_chunk_free(ack);
1706 	}
1707 }
1708 
1709 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1710 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1711 					const struct sctp_association *asoc,
1712 					__be32 serial)
1713 {
1714 	struct sctp_chunk *ack;
1715 
1716 	/* Walk through the list of cached ASCONF-ACKs and find the
1717 	 * ack chunk whose serial number matches that of the request.
1718 	 */
1719 	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1720 		if (sctp_chunk_pending(ack))
1721 			continue;
1722 		if (ack->subh.addip_hdr->serial == serial) {
1723 			sctp_chunk_hold(ack);
1724 			return ack;
1725 		}
1726 	}
1727 
1728 	return NULL;
1729 }
1730 
1731 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1732 {
1733 	/* Free any cached ASCONF_ACK chunk. */
1734 	sctp_assoc_free_asconf_acks(asoc);
1735 
1736 	/* Free the ASCONF queue. */
1737 	sctp_assoc_free_asconf_queue(asoc);
1738 
1739 	/* Free any cached ASCONF chunk. */
1740 	if (asoc->addip_last_asconf)
1741 		sctp_chunk_free(asoc->addip_last_asconf);
1742 }
1743