xref: /openbmc/linux/net/sctp/associola.c (revision a8a28aff)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 La Monte H.P. Yarroll
7  *
8  * This file is part of the SCTP kernel implementation
9  *
10  * This module provides the abstraction for an SCTP association.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, see
26  * <http://www.gnu.org/licenses/>.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <linux-sctp@vger.kernel.org>
31  *
32  * Written or modified by:
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Karl Knutson          <karl@athena.chicago.il.us>
35  *    Jon Grimm             <jgrimm@us.ibm.com>
36  *    Xingang Guo           <xingang.guo@intel.com>
37  *    Hui Huang             <hui.huang@nokia.com>
38  *    Sridhar Samudrala	    <sri@us.ibm.com>
39  *    Daisy Chang	    <daisyc@us.ibm.com>
40  *    Ryan Layer	    <rmlayer@us.ibm.com>
41  *    Kevin Gao             <kevin.gao@intel.com>
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
50 
51 #include <linux/slab.h>
52 #include <linux/in.h>
53 #include <net/ipv6.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
56 
57 /* Forward declarations for internal functions. */
58 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59 static void sctp_assoc_bh_rcv(struct work_struct *work);
60 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62 
63 /* 1st Level Abstractions. */
64 
65 /* Initialize a new association from provided memory. */
66 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
67 					  const struct sctp_endpoint *ep,
68 					  const struct sock *sk,
69 					  sctp_scope_t scope,
70 					  gfp_t gfp)
71 {
72 	struct net *net = sock_net(sk);
73 	struct sctp_sock *sp;
74 	int i;
75 	sctp_paramhdr_t *p;
76 	int err;
77 
78 	/* Retrieve the SCTP per socket area.  */
79 	sp = sctp_sk((struct sock *)sk);
80 
81 	/* Discarding const is appropriate here.  */
82 	asoc->ep = (struct sctp_endpoint *)ep;
83 	asoc->base.sk = (struct sock *)sk;
84 
85 	sctp_endpoint_hold(asoc->ep);
86 	sock_hold(asoc->base.sk);
87 
88 	/* Initialize the common base substructure.  */
89 	asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
90 
91 	/* Initialize the object handling fields.  */
92 	atomic_set(&asoc->base.refcnt, 1);
93 
94 	/* Initialize the bind addr area.  */
95 	sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
96 
97 	asoc->state = SCTP_STATE_CLOSED;
98 	asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
99 	asoc->user_frag = sp->user_frag;
100 
101 	/* Set the association max_retrans and RTO values from the
102 	 * socket values.
103 	 */
104 	asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
105 	asoc->pf_retrans  = net->sctp.pf_retrans;
106 
107 	asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
108 	asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
109 	asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
110 
111 	/* Initialize the association's heartbeat interval based on the
112 	 * sock configured value.
113 	 */
114 	asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
115 
116 	/* Initialize path max retrans value. */
117 	asoc->pathmaxrxt = sp->pathmaxrxt;
118 
119 	/* Initialize default path MTU. */
120 	asoc->pathmtu = sp->pathmtu;
121 
122 	/* Set association default SACK delay */
123 	asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
124 	asoc->sackfreq = sp->sackfreq;
125 
126 	/* Set the association default flags controlling
127 	 * Heartbeat, SACK delay, and Path MTU Discovery.
128 	 */
129 	asoc->param_flags = sp->param_flags;
130 
131 	/* Initialize the maximum number of new data packets that can be sent
132 	 * in a burst.
133 	 */
134 	asoc->max_burst = sp->max_burst;
135 
136 	/* initialize association timers */
137 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
138 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
139 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
140 
141 	/* sctpimpguide Section 2.12.2
142 	 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
143 	 * recommended value of 5 times 'RTO.Max'.
144 	 */
145 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
146 		= 5 * asoc->rto_max;
147 
148 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
149 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
150 
151 	/* Initializes the timers */
152 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
153 		setup_timer(&asoc->timers[i], sctp_timer_events[i],
154 				(unsigned long)asoc);
155 
156 	/* Pull default initialization values from the sock options.
157 	 * Note: This assumes that the values have already been
158 	 * validated in the sock.
159 	 */
160 	asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
161 	asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
162 	asoc->max_init_attempts	= sp->initmsg.sinit_max_attempts;
163 
164 	asoc->max_init_timeo =
165 		 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
166 
167 	/* Set the local window size for receive.
168 	 * This is also the rcvbuf space per association.
169 	 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
170 	 * 1500 bytes in one SCTP packet.
171 	 */
172 	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
173 		asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
174 	else
175 		asoc->rwnd = sk->sk_rcvbuf/2;
176 
177 	asoc->a_rwnd = asoc->rwnd;
178 
179 	/* Use my own max window until I learn something better.  */
180 	asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
181 
182 	/* Initialize the receive memory counter */
183 	atomic_set(&asoc->rmem_alloc, 0);
184 
185 	init_waitqueue_head(&asoc->wait);
186 
187 	asoc->c.my_vtag = sctp_generate_tag(ep);
188 	asoc->c.my_port = ep->base.bind_addr.port;
189 
190 	asoc->c.initial_tsn = sctp_generate_tsn(ep);
191 
192 	asoc->next_tsn = asoc->c.initial_tsn;
193 
194 	asoc->ctsn_ack_point = asoc->next_tsn - 1;
195 	asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
196 	asoc->highest_sacked = asoc->ctsn_ack_point;
197 	asoc->last_cwr_tsn = asoc->ctsn_ack_point;
198 
199 	/* ADDIP Section 4.1 Asconf Chunk Procedures
200 	 *
201 	 * When an endpoint has an ASCONF signaled change to be sent to the
202 	 * remote endpoint it should do the following:
203 	 * ...
204 	 * A2) a serial number should be assigned to the chunk. The serial
205 	 * number SHOULD be a monotonically increasing number. The serial
206 	 * numbers SHOULD be initialized at the start of the
207 	 * association to the same value as the initial TSN.
208 	 */
209 	asoc->addip_serial = asoc->c.initial_tsn;
210 
211 	INIT_LIST_HEAD(&asoc->addip_chunk_list);
212 	INIT_LIST_HEAD(&asoc->asconf_ack_list);
213 
214 	/* Make an empty list of remote transport addresses.  */
215 	INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
216 
217 	/* RFC 2960 5.1 Normal Establishment of an Association
218 	 *
219 	 * After the reception of the first data chunk in an
220 	 * association the endpoint must immediately respond with a
221 	 * sack to acknowledge the data chunk.  Subsequent
222 	 * acknowledgements should be done as described in Section
223 	 * 6.2.
224 	 *
225 	 * [We implement this by telling a new association that it
226 	 * already received one packet.]
227 	 */
228 	asoc->peer.sack_needed = 1;
229 	asoc->peer.sack_generation = 1;
230 
231 	/* Assume that the peer will tell us if he recognizes ASCONF
232 	 * as part of INIT exchange.
233 	 * The sctp_addip_noauth option is there for backward compatibility
234 	 * and will revert old behavior.
235 	 */
236 	if (net->sctp.addip_noauth)
237 		asoc->peer.asconf_capable = 1;
238 
239 	/* Create an input queue.  */
240 	sctp_inq_init(&asoc->base.inqueue);
241 	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
242 
243 	/* Create an output queue.  */
244 	sctp_outq_init(asoc, &asoc->outqueue);
245 
246 	if (!sctp_ulpq_init(&asoc->ulpq, asoc))
247 		goto fail_init;
248 
249 	/* Assume that peer would support both address types unless we are
250 	 * told otherwise.
251 	 */
252 	asoc->peer.ipv4_address = 1;
253 	if (asoc->base.sk->sk_family == PF_INET6)
254 		asoc->peer.ipv6_address = 1;
255 	INIT_LIST_HEAD(&asoc->asocs);
256 
257 	asoc->default_stream = sp->default_stream;
258 	asoc->default_ppid = sp->default_ppid;
259 	asoc->default_flags = sp->default_flags;
260 	asoc->default_context = sp->default_context;
261 	asoc->default_timetolive = sp->default_timetolive;
262 	asoc->default_rcv_context = sp->default_rcv_context;
263 
264 	/* AUTH related initializations */
265 	INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
266 	err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
267 	if (err)
268 		goto fail_init;
269 
270 	asoc->active_key_id = ep->active_key_id;
271 
272 	/* Save the hmacs and chunks list into this association */
273 	if (ep->auth_hmacs_list)
274 		memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
275 			ntohs(ep->auth_hmacs_list->param_hdr.length));
276 	if (ep->auth_chunk_list)
277 		memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
278 			ntohs(ep->auth_chunk_list->param_hdr.length));
279 
280 	/* Get the AUTH random number for this association */
281 	p = (sctp_paramhdr_t *)asoc->c.auth_random;
282 	p->type = SCTP_PARAM_RANDOM;
283 	p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
284 	get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
285 
286 	return asoc;
287 
288 fail_init:
289 	sock_put(asoc->base.sk);
290 	sctp_endpoint_put(asoc->ep);
291 	return NULL;
292 }
293 
294 /* Allocate and initialize a new association */
295 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
296 					 const struct sock *sk,
297 					 sctp_scope_t scope,
298 					 gfp_t gfp)
299 {
300 	struct sctp_association *asoc;
301 
302 	asoc = kzalloc(sizeof(*asoc), gfp);
303 	if (!asoc)
304 		goto fail;
305 
306 	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
307 		goto fail_init;
308 
309 	SCTP_DBG_OBJCNT_INC(assoc);
310 
311 	pr_debug("Created asoc %p\n", asoc);
312 
313 	return asoc;
314 
315 fail_init:
316 	kfree(asoc);
317 fail:
318 	return NULL;
319 }
320 
321 /* Free this association if possible.  There may still be users, so
322  * the actual deallocation may be delayed.
323  */
324 void sctp_association_free(struct sctp_association *asoc)
325 {
326 	struct sock *sk = asoc->base.sk;
327 	struct sctp_transport *transport;
328 	struct list_head *pos, *temp;
329 	int i;
330 
331 	/* Only real associations count against the endpoint, so
332 	 * don't bother for if this is a temporary association.
333 	 */
334 	if (!list_empty(&asoc->asocs)) {
335 		list_del(&asoc->asocs);
336 
337 		/* Decrement the backlog value for a TCP-style listening
338 		 * socket.
339 		 */
340 		if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
341 			sk->sk_ack_backlog--;
342 	}
343 
344 	/* Mark as dead, so other users can know this structure is
345 	 * going away.
346 	 */
347 	asoc->base.dead = true;
348 
349 	/* Dispose of any data lying around in the outqueue. */
350 	sctp_outq_free(&asoc->outqueue);
351 
352 	/* Dispose of any pending messages for the upper layer. */
353 	sctp_ulpq_free(&asoc->ulpq);
354 
355 	/* Dispose of any pending chunks on the inqueue. */
356 	sctp_inq_free(&asoc->base.inqueue);
357 
358 	sctp_tsnmap_free(&asoc->peer.tsn_map);
359 
360 	/* Free ssnmap storage. */
361 	sctp_ssnmap_free(asoc->ssnmap);
362 
363 	/* Clean up the bound address list. */
364 	sctp_bind_addr_free(&asoc->base.bind_addr);
365 
366 	/* Do we need to go through all of our timers and
367 	 * delete them?   To be safe we will try to delete all, but we
368 	 * should be able to go through and make a guess based
369 	 * on our state.
370 	 */
371 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
372 		if (del_timer(&asoc->timers[i]))
373 			sctp_association_put(asoc);
374 	}
375 
376 	/* Free peer's cached cookie. */
377 	kfree(asoc->peer.cookie);
378 	kfree(asoc->peer.peer_random);
379 	kfree(asoc->peer.peer_chunks);
380 	kfree(asoc->peer.peer_hmacs);
381 
382 	/* Release the transport structures. */
383 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
384 		transport = list_entry(pos, struct sctp_transport, transports);
385 		list_del_rcu(pos);
386 		sctp_transport_free(transport);
387 	}
388 
389 	asoc->peer.transport_count = 0;
390 
391 	sctp_asconf_queue_teardown(asoc);
392 
393 	/* Free pending address space being deleted */
394 	if (asoc->asconf_addr_del_pending != NULL)
395 		kfree(asoc->asconf_addr_del_pending);
396 
397 	/* AUTH - Free the endpoint shared keys */
398 	sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
399 
400 	/* AUTH - Free the association shared key */
401 	sctp_auth_key_put(asoc->asoc_shared_key);
402 
403 	sctp_association_put(asoc);
404 }
405 
406 /* Cleanup and free up an association. */
407 static void sctp_association_destroy(struct sctp_association *asoc)
408 {
409 	if (unlikely(!asoc->base.dead)) {
410 		WARN(1, "Attempt to destroy undead association %p!\n", asoc);
411 		return;
412 	}
413 
414 	sctp_endpoint_put(asoc->ep);
415 	sock_put(asoc->base.sk);
416 
417 	if (asoc->assoc_id != 0) {
418 		spin_lock_bh(&sctp_assocs_id_lock);
419 		idr_remove(&sctp_assocs_id, asoc->assoc_id);
420 		spin_unlock_bh(&sctp_assocs_id_lock);
421 	}
422 
423 	WARN_ON(atomic_read(&asoc->rmem_alloc));
424 
425 	kfree(asoc);
426 	SCTP_DBG_OBJCNT_DEC(assoc);
427 }
428 
429 /* Change the primary destination address for the peer. */
430 void sctp_assoc_set_primary(struct sctp_association *asoc,
431 			    struct sctp_transport *transport)
432 {
433 	int changeover = 0;
434 
435 	/* it's a changeover only if we already have a primary path
436 	 * that we are changing
437 	 */
438 	if (asoc->peer.primary_path != NULL &&
439 	    asoc->peer.primary_path != transport)
440 		changeover = 1 ;
441 
442 	asoc->peer.primary_path = transport;
443 
444 	/* Set a default msg_name for events. */
445 	memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
446 	       sizeof(union sctp_addr));
447 
448 	/* If the primary path is changing, assume that the
449 	 * user wants to use this new path.
450 	 */
451 	if ((transport->state == SCTP_ACTIVE) ||
452 	    (transport->state == SCTP_UNKNOWN))
453 		asoc->peer.active_path = transport;
454 
455 	/*
456 	 * SFR-CACC algorithm:
457 	 * Upon the receipt of a request to change the primary
458 	 * destination address, on the data structure for the new
459 	 * primary destination, the sender MUST do the following:
460 	 *
461 	 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
462 	 * to this destination address earlier. The sender MUST set
463 	 * CYCLING_CHANGEOVER to indicate that this switch is a
464 	 * double switch to the same destination address.
465 	 *
466 	 * Really, only bother is we have data queued or outstanding on
467 	 * the association.
468 	 */
469 	if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
470 		return;
471 
472 	if (transport->cacc.changeover_active)
473 		transport->cacc.cycling_changeover = changeover;
474 
475 	/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
476 	 * a changeover has occurred.
477 	 */
478 	transport->cacc.changeover_active = changeover;
479 
480 	/* 3) The sender MUST store the next TSN to be sent in
481 	 * next_tsn_at_change.
482 	 */
483 	transport->cacc.next_tsn_at_change = asoc->next_tsn;
484 }
485 
486 /* Remove a transport from an association.  */
487 void sctp_assoc_rm_peer(struct sctp_association *asoc,
488 			struct sctp_transport *peer)
489 {
490 	struct list_head	*pos;
491 	struct sctp_transport	*transport;
492 
493 	pr_debug("%s: association:%p addr:%pISpc\n",
494 		 __func__, asoc, &peer->ipaddr.sa);
495 
496 	/* If we are to remove the current retran_path, update it
497 	 * to the next peer before removing this peer from the list.
498 	 */
499 	if (asoc->peer.retran_path == peer)
500 		sctp_assoc_update_retran_path(asoc);
501 
502 	/* Remove this peer from the list. */
503 	list_del_rcu(&peer->transports);
504 
505 	/* Get the first transport of asoc. */
506 	pos = asoc->peer.transport_addr_list.next;
507 	transport = list_entry(pos, struct sctp_transport, transports);
508 
509 	/* Update any entries that match the peer to be deleted. */
510 	if (asoc->peer.primary_path == peer)
511 		sctp_assoc_set_primary(asoc, transport);
512 	if (asoc->peer.active_path == peer)
513 		asoc->peer.active_path = transport;
514 	if (asoc->peer.retran_path == peer)
515 		asoc->peer.retran_path = transport;
516 	if (asoc->peer.last_data_from == peer)
517 		asoc->peer.last_data_from = transport;
518 
519 	/* If we remove the transport an INIT was last sent to, set it to
520 	 * NULL. Combined with the update of the retran path above, this
521 	 * will cause the next INIT to be sent to the next available
522 	 * transport, maintaining the cycle.
523 	 */
524 	if (asoc->init_last_sent_to == peer)
525 		asoc->init_last_sent_to = NULL;
526 
527 	/* If we remove the transport an SHUTDOWN was last sent to, set it
528 	 * to NULL. Combined with the update of the retran path above, this
529 	 * will cause the next SHUTDOWN to be sent to the next available
530 	 * transport, maintaining the cycle.
531 	 */
532 	if (asoc->shutdown_last_sent_to == peer)
533 		asoc->shutdown_last_sent_to = NULL;
534 
535 	/* If we remove the transport an ASCONF was last sent to, set it to
536 	 * NULL.
537 	 */
538 	if (asoc->addip_last_asconf &&
539 	    asoc->addip_last_asconf->transport == peer)
540 		asoc->addip_last_asconf->transport = NULL;
541 
542 	/* If we have something on the transmitted list, we have to
543 	 * save it off.  The best place is the active path.
544 	 */
545 	if (!list_empty(&peer->transmitted)) {
546 		struct sctp_transport *active = asoc->peer.active_path;
547 		struct sctp_chunk *ch;
548 
549 		/* Reset the transport of each chunk on this list */
550 		list_for_each_entry(ch, &peer->transmitted,
551 					transmitted_list) {
552 			ch->transport = NULL;
553 			ch->rtt_in_progress = 0;
554 		}
555 
556 		list_splice_tail_init(&peer->transmitted,
557 					&active->transmitted);
558 
559 		/* Start a T3 timer here in case it wasn't running so
560 		 * that these migrated packets have a chance to get
561 		 * retransmitted.
562 		 */
563 		if (!timer_pending(&active->T3_rtx_timer))
564 			if (!mod_timer(&active->T3_rtx_timer,
565 					jiffies + active->rto))
566 				sctp_transport_hold(active);
567 	}
568 
569 	asoc->peer.transport_count--;
570 
571 	sctp_transport_free(peer);
572 }
573 
574 /* Add a transport address to an association.  */
575 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
576 					   const union sctp_addr *addr,
577 					   const gfp_t gfp,
578 					   const int peer_state)
579 {
580 	struct net *net = sock_net(asoc->base.sk);
581 	struct sctp_transport *peer;
582 	struct sctp_sock *sp;
583 	unsigned short port;
584 
585 	sp = sctp_sk(asoc->base.sk);
586 
587 	/* AF_INET and AF_INET6 share common port field. */
588 	port = ntohs(addr->v4.sin_port);
589 
590 	pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
591 		 asoc, &addr->sa, peer_state);
592 
593 	/* Set the port if it has not been set yet.  */
594 	if (0 == asoc->peer.port)
595 		asoc->peer.port = port;
596 
597 	/* Check to see if this is a duplicate. */
598 	peer = sctp_assoc_lookup_paddr(asoc, addr);
599 	if (peer) {
600 		/* An UNKNOWN state is only set on transports added by
601 		 * user in sctp_connectx() call.  Such transports should be
602 		 * considered CONFIRMED per RFC 4960, Section 5.4.
603 		 */
604 		if (peer->state == SCTP_UNKNOWN) {
605 			peer->state = SCTP_ACTIVE;
606 		}
607 		return peer;
608 	}
609 
610 	peer = sctp_transport_new(net, addr, gfp);
611 	if (!peer)
612 		return NULL;
613 
614 	sctp_transport_set_owner(peer, asoc);
615 
616 	/* Initialize the peer's heartbeat interval based on the
617 	 * association configured value.
618 	 */
619 	peer->hbinterval = asoc->hbinterval;
620 
621 	/* Set the path max_retrans.  */
622 	peer->pathmaxrxt = asoc->pathmaxrxt;
623 
624 	/* And the partial failure retrans threshold */
625 	peer->pf_retrans = asoc->pf_retrans;
626 
627 	/* Initialize the peer's SACK delay timeout based on the
628 	 * association configured value.
629 	 */
630 	peer->sackdelay = asoc->sackdelay;
631 	peer->sackfreq = asoc->sackfreq;
632 
633 	/* Enable/disable heartbeat, SACK delay, and path MTU discovery
634 	 * based on association setting.
635 	 */
636 	peer->param_flags = asoc->param_flags;
637 
638 	sctp_transport_route(peer, NULL, sp);
639 
640 	/* Initialize the pmtu of the transport. */
641 	if (peer->param_flags & SPP_PMTUD_DISABLE) {
642 		if (asoc->pathmtu)
643 			peer->pathmtu = asoc->pathmtu;
644 		else
645 			peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
646 	}
647 
648 	/* If this is the first transport addr on this association,
649 	 * initialize the association PMTU to the peer's PMTU.
650 	 * If not and the current association PMTU is higher than the new
651 	 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
652 	 */
653 	if (asoc->pathmtu)
654 		asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
655 	else
656 		asoc->pathmtu = peer->pathmtu;
657 
658 	pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
659 		 asoc->pathmtu);
660 
661 	peer->pmtu_pending = 0;
662 
663 	asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
664 
665 	/* The asoc->peer.port might not be meaningful yet, but
666 	 * initialize the packet structure anyway.
667 	 */
668 	sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
669 			 asoc->peer.port);
670 
671 	/* 7.2.1 Slow-Start
672 	 *
673 	 * o The initial cwnd before DATA transmission or after a sufficiently
674 	 *   long idle period MUST be set to
675 	 *      min(4*MTU, max(2*MTU, 4380 bytes))
676 	 *
677 	 * o The initial value of ssthresh MAY be arbitrarily high
678 	 *   (for example, implementations MAY use the size of the
679 	 *   receiver advertised window).
680 	 */
681 	peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
682 
683 	/* At this point, we may not have the receiver's advertised window,
684 	 * so initialize ssthresh to the default value and it will be set
685 	 * later when we process the INIT.
686 	 */
687 	peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
688 
689 	peer->partial_bytes_acked = 0;
690 	peer->flight_size = 0;
691 	peer->burst_limited = 0;
692 
693 	/* Set the transport's RTO.initial value */
694 	peer->rto = asoc->rto_initial;
695 	sctp_max_rto(asoc, peer);
696 
697 	/* Set the peer's active state. */
698 	peer->state = peer_state;
699 
700 	/* Attach the remote transport to our asoc.  */
701 	list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
702 	asoc->peer.transport_count++;
703 
704 	/* If we do not yet have a primary path, set one.  */
705 	if (!asoc->peer.primary_path) {
706 		sctp_assoc_set_primary(asoc, peer);
707 		asoc->peer.retran_path = peer;
708 	}
709 
710 	if (asoc->peer.active_path == asoc->peer.retran_path &&
711 	    peer->state != SCTP_UNCONFIRMED) {
712 		asoc->peer.retran_path = peer;
713 	}
714 
715 	return peer;
716 }
717 
718 /* Delete a transport address from an association.  */
719 void sctp_assoc_del_peer(struct sctp_association *asoc,
720 			 const union sctp_addr *addr)
721 {
722 	struct list_head	*pos;
723 	struct list_head	*temp;
724 	struct sctp_transport	*transport;
725 
726 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
727 		transport = list_entry(pos, struct sctp_transport, transports);
728 		if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
729 			/* Do book keeping for removing the peer and free it. */
730 			sctp_assoc_rm_peer(asoc, transport);
731 			break;
732 		}
733 	}
734 }
735 
736 /* Lookup a transport by address. */
737 struct sctp_transport *sctp_assoc_lookup_paddr(
738 					const struct sctp_association *asoc,
739 					const union sctp_addr *address)
740 {
741 	struct sctp_transport *t;
742 
743 	/* Cycle through all transports searching for a peer address. */
744 
745 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
746 			transports) {
747 		if (sctp_cmp_addr_exact(address, &t->ipaddr))
748 			return t;
749 	}
750 
751 	return NULL;
752 }
753 
754 /* Remove all transports except a give one */
755 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
756 				     struct sctp_transport *primary)
757 {
758 	struct sctp_transport	*temp;
759 	struct sctp_transport	*t;
760 
761 	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
762 				 transports) {
763 		/* if the current transport is not the primary one, delete it */
764 		if (t != primary)
765 			sctp_assoc_rm_peer(asoc, t);
766 	}
767 }
768 
769 /* Engage in transport control operations.
770  * Mark the transport up or down and send a notification to the user.
771  * Select and update the new active and retran paths.
772  */
773 void sctp_assoc_control_transport(struct sctp_association *asoc,
774 				  struct sctp_transport *transport,
775 				  sctp_transport_cmd_t command,
776 				  sctp_sn_error_t error)
777 {
778 	struct sctp_ulpevent *event;
779 	struct sockaddr_storage addr;
780 	int spc_state = 0;
781 	bool ulp_notify = true;
782 
783 	/* Record the transition on the transport.  */
784 	switch (command) {
785 	case SCTP_TRANSPORT_UP:
786 		/* If we are moving from UNCONFIRMED state due
787 		 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
788 		 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
789 		 */
790 		if (SCTP_UNCONFIRMED == transport->state &&
791 		    SCTP_HEARTBEAT_SUCCESS == error)
792 			spc_state = SCTP_ADDR_CONFIRMED;
793 		else
794 			spc_state = SCTP_ADDR_AVAILABLE;
795 		/* Don't inform ULP about transition from PF to
796 		 * active state and set cwnd to 1 MTU, see SCTP
797 		 * Quick failover draft section 5.1, point 5
798 		 */
799 		if (transport->state == SCTP_PF) {
800 			ulp_notify = false;
801 			transport->cwnd = asoc->pathmtu;
802 		}
803 		transport->state = SCTP_ACTIVE;
804 		break;
805 
806 	case SCTP_TRANSPORT_DOWN:
807 		/* If the transport was never confirmed, do not transition it
808 		 * to inactive state.  Also, release the cached route since
809 		 * there may be a better route next time.
810 		 */
811 		if (transport->state != SCTP_UNCONFIRMED)
812 			transport->state = SCTP_INACTIVE;
813 		else {
814 			dst_release(transport->dst);
815 			transport->dst = NULL;
816 		}
817 
818 		spc_state = SCTP_ADDR_UNREACHABLE;
819 		break;
820 
821 	case SCTP_TRANSPORT_PF:
822 		transport->state = SCTP_PF;
823 		ulp_notify = false;
824 		break;
825 
826 	default:
827 		return;
828 	}
829 
830 	/* Generate and send a SCTP_PEER_ADDR_CHANGE notification
831 	 * to the user.
832 	 */
833 	if (ulp_notify) {
834 		memset(&addr, 0, sizeof(struct sockaddr_storage));
835 		memcpy(&addr, &transport->ipaddr,
836 		       transport->af_specific->sockaddr_len);
837 
838 		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
839 					0, spc_state, error, GFP_ATOMIC);
840 		if (event)
841 			sctp_ulpq_tail_event(&asoc->ulpq, event);
842 	}
843 
844 	/* Select new active and retran paths. */
845 	sctp_select_active_and_retran_path(asoc);
846 }
847 
848 /* Hold a reference to an association. */
849 void sctp_association_hold(struct sctp_association *asoc)
850 {
851 	atomic_inc(&asoc->base.refcnt);
852 }
853 
854 /* Release a reference to an association and cleanup
855  * if there are no more references.
856  */
857 void sctp_association_put(struct sctp_association *asoc)
858 {
859 	if (atomic_dec_and_test(&asoc->base.refcnt))
860 		sctp_association_destroy(asoc);
861 }
862 
863 /* Allocate the next TSN, Transmission Sequence Number, for the given
864  * association.
865  */
866 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
867 {
868 	/* From Section 1.6 Serial Number Arithmetic:
869 	 * Transmission Sequence Numbers wrap around when they reach
870 	 * 2**32 - 1.  That is, the next TSN a DATA chunk MUST use
871 	 * after transmitting TSN = 2*32 - 1 is TSN = 0.
872 	 */
873 	__u32 retval = asoc->next_tsn;
874 	asoc->next_tsn++;
875 	asoc->unack_data++;
876 
877 	return retval;
878 }
879 
880 /* Compare two addresses to see if they match.  Wildcard addresses
881  * only match themselves.
882  */
883 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
884 			const union sctp_addr *ss2)
885 {
886 	struct sctp_af *af;
887 
888 	af = sctp_get_af_specific(ss1->sa.sa_family);
889 	if (unlikely(!af))
890 		return 0;
891 
892 	return af->cmp_addr(ss1, ss2);
893 }
894 
895 /* Return an ecne chunk to get prepended to a packet.
896  * Note:  We are sly and return a shared, prealloced chunk.  FIXME:
897  * No we don't, but we could/should.
898  */
899 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
900 {
901 	if (!asoc->need_ecne)
902 		return NULL;
903 
904 	/* Send ECNE if needed.
905 	 * Not being able to allocate a chunk here is not deadly.
906 	 */
907 	return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
908 }
909 
910 /*
911  * Find which transport this TSN was sent on.
912  */
913 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
914 					     __u32 tsn)
915 {
916 	struct sctp_transport *active;
917 	struct sctp_transport *match;
918 	struct sctp_transport *transport;
919 	struct sctp_chunk *chunk;
920 	__be32 key = htonl(tsn);
921 
922 	match = NULL;
923 
924 	/*
925 	 * FIXME: In general, find a more efficient data structure for
926 	 * searching.
927 	 */
928 
929 	/*
930 	 * The general strategy is to search each transport's transmitted
931 	 * list.   Return which transport this TSN lives on.
932 	 *
933 	 * Let's be hopeful and check the active_path first.
934 	 * Another optimization would be to know if there is only one
935 	 * outbound path and not have to look for the TSN at all.
936 	 *
937 	 */
938 
939 	active = asoc->peer.active_path;
940 
941 	list_for_each_entry(chunk, &active->transmitted,
942 			transmitted_list) {
943 
944 		if (key == chunk->subh.data_hdr->tsn) {
945 			match = active;
946 			goto out;
947 		}
948 	}
949 
950 	/* If not found, go search all the other transports. */
951 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
952 			transports) {
953 
954 		if (transport == active)
955 			continue;
956 		list_for_each_entry(chunk, &transport->transmitted,
957 				transmitted_list) {
958 			if (key == chunk->subh.data_hdr->tsn) {
959 				match = transport;
960 				goto out;
961 			}
962 		}
963 	}
964 out:
965 	return match;
966 }
967 
968 /* Is this the association we are looking for? */
969 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
970 					   struct net *net,
971 					   const union sctp_addr *laddr,
972 					   const union sctp_addr *paddr)
973 {
974 	struct sctp_transport *transport;
975 
976 	if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
977 	    (htons(asoc->peer.port) == paddr->v4.sin_port) &&
978 	    net_eq(sock_net(asoc->base.sk), net)) {
979 		transport = sctp_assoc_lookup_paddr(asoc, paddr);
980 		if (!transport)
981 			goto out;
982 
983 		if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
984 					 sctp_sk(asoc->base.sk)))
985 			goto out;
986 	}
987 	transport = NULL;
988 
989 out:
990 	return transport;
991 }
992 
993 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
994 static void sctp_assoc_bh_rcv(struct work_struct *work)
995 {
996 	struct sctp_association *asoc =
997 		container_of(work, struct sctp_association,
998 			     base.inqueue.immediate);
999 	struct net *net = sock_net(asoc->base.sk);
1000 	struct sctp_endpoint *ep;
1001 	struct sctp_chunk *chunk;
1002 	struct sctp_inq *inqueue;
1003 	int state;
1004 	sctp_subtype_t subtype;
1005 	int error = 0;
1006 
1007 	/* The association should be held so we should be safe. */
1008 	ep = asoc->ep;
1009 
1010 	inqueue = &asoc->base.inqueue;
1011 	sctp_association_hold(asoc);
1012 	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1013 		state = asoc->state;
1014 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1015 
1016 		/* SCTP-AUTH, Section 6.3:
1017 		 *    The receiver has a list of chunk types which it expects
1018 		 *    to be received only after an AUTH-chunk.  This list has
1019 		 *    been sent to the peer during the association setup.  It
1020 		 *    MUST silently discard these chunks if they are not placed
1021 		 *    after an AUTH chunk in the packet.
1022 		 */
1023 		if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1024 			continue;
1025 
1026 		/* Remember where the last DATA chunk came from so we
1027 		 * know where to send the SACK.
1028 		 */
1029 		if (sctp_chunk_is_data(chunk))
1030 			asoc->peer.last_data_from = chunk->transport;
1031 		else {
1032 			SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1033 			asoc->stats.ictrlchunks++;
1034 			if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1035 				asoc->stats.isacks++;
1036 		}
1037 
1038 		if (chunk->transport)
1039 			chunk->transport->last_time_heard = ktime_get();
1040 
1041 		/* Run through the state machine. */
1042 		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1043 				   state, ep, asoc, chunk, GFP_ATOMIC);
1044 
1045 		/* Check to see if the association is freed in response to
1046 		 * the incoming chunk.  If so, get out of the while loop.
1047 		 */
1048 		if (asoc->base.dead)
1049 			break;
1050 
1051 		/* If there is an error on chunk, discard this packet. */
1052 		if (error && chunk)
1053 			chunk->pdiscard = 1;
1054 	}
1055 	sctp_association_put(asoc);
1056 }
1057 
1058 /* This routine moves an association from its old sk to a new sk.  */
1059 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1060 {
1061 	struct sctp_sock *newsp = sctp_sk(newsk);
1062 	struct sock *oldsk = assoc->base.sk;
1063 
1064 	/* Delete the association from the old endpoint's list of
1065 	 * associations.
1066 	 */
1067 	list_del_init(&assoc->asocs);
1068 
1069 	/* Decrement the backlog value for a TCP-style socket. */
1070 	if (sctp_style(oldsk, TCP))
1071 		oldsk->sk_ack_backlog--;
1072 
1073 	/* Release references to the old endpoint and the sock.  */
1074 	sctp_endpoint_put(assoc->ep);
1075 	sock_put(assoc->base.sk);
1076 
1077 	/* Get a reference to the new endpoint.  */
1078 	assoc->ep = newsp->ep;
1079 	sctp_endpoint_hold(assoc->ep);
1080 
1081 	/* Get a reference to the new sock.  */
1082 	assoc->base.sk = newsk;
1083 	sock_hold(assoc->base.sk);
1084 
1085 	/* Add the association to the new endpoint's list of associations.  */
1086 	sctp_endpoint_add_asoc(newsp->ep, assoc);
1087 }
1088 
1089 /* Update an association (possibly from unexpected COOKIE-ECHO processing).  */
1090 void sctp_assoc_update(struct sctp_association *asoc,
1091 		       struct sctp_association *new)
1092 {
1093 	struct sctp_transport *trans;
1094 	struct list_head *pos, *temp;
1095 
1096 	/* Copy in new parameters of peer. */
1097 	asoc->c = new->c;
1098 	asoc->peer.rwnd = new->peer.rwnd;
1099 	asoc->peer.sack_needed = new->peer.sack_needed;
1100 	asoc->peer.i = new->peer.i;
1101 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1102 			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1103 
1104 	/* Remove any peer addresses not present in the new association. */
1105 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1106 		trans = list_entry(pos, struct sctp_transport, transports);
1107 		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1108 			sctp_assoc_rm_peer(asoc, trans);
1109 			continue;
1110 		}
1111 
1112 		if (asoc->state >= SCTP_STATE_ESTABLISHED)
1113 			sctp_transport_reset(trans);
1114 	}
1115 
1116 	/* If the case is A (association restart), use
1117 	 * initial_tsn as next_tsn. If the case is B, use
1118 	 * current next_tsn in case data sent to peer
1119 	 * has been discarded and needs retransmission.
1120 	 */
1121 	if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1122 		asoc->next_tsn = new->next_tsn;
1123 		asoc->ctsn_ack_point = new->ctsn_ack_point;
1124 		asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1125 
1126 		/* Reinitialize SSN for both local streams
1127 		 * and peer's streams.
1128 		 */
1129 		sctp_ssnmap_clear(asoc->ssnmap);
1130 
1131 		/* Flush the ULP reassembly and ordered queue.
1132 		 * Any data there will now be stale and will
1133 		 * cause problems.
1134 		 */
1135 		sctp_ulpq_flush(&asoc->ulpq);
1136 
1137 		/* reset the overall association error count so
1138 		 * that the restarted association doesn't get torn
1139 		 * down on the next retransmission timer.
1140 		 */
1141 		asoc->overall_error_count = 0;
1142 
1143 	} else {
1144 		/* Add any peer addresses from the new association. */
1145 		list_for_each_entry(trans, &new->peer.transport_addr_list,
1146 				transports) {
1147 			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1148 				sctp_assoc_add_peer(asoc, &trans->ipaddr,
1149 						    GFP_ATOMIC, trans->state);
1150 		}
1151 
1152 		asoc->ctsn_ack_point = asoc->next_tsn - 1;
1153 		asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1154 		if (!asoc->ssnmap) {
1155 			/* Move the ssnmap. */
1156 			asoc->ssnmap = new->ssnmap;
1157 			new->ssnmap = NULL;
1158 		}
1159 
1160 		if (!asoc->assoc_id) {
1161 			/* get a new association id since we don't have one
1162 			 * yet.
1163 			 */
1164 			sctp_assoc_set_id(asoc, GFP_ATOMIC);
1165 		}
1166 	}
1167 
1168 	/* SCTP-AUTH: Save the peer parameters from the new associations
1169 	 * and also move the association shared keys over
1170 	 */
1171 	kfree(asoc->peer.peer_random);
1172 	asoc->peer.peer_random = new->peer.peer_random;
1173 	new->peer.peer_random = NULL;
1174 
1175 	kfree(asoc->peer.peer_chunks);
1176 	asoc->peer.peer_chunks = new->peer.peer_chunks;
1177 	new->peer.peer_chunks = NULL;
1178 
1179 	kfree(asoc->peer.peer_hmacs);
1180 	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1181 	new->peer.peer_hmacs = NULL;
1182 
1183 	sctp_auth_key_put(asoc->asoc_shared_key);
1184 	sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1185 }
1186 
1187 /* Update the retran path for sending a retransmitted packet.
1188  * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1189  *
1190  *   When there is outbound data to send and the primary path
1191  *   becomes inactive (e.g., due to failures), or where the
1192  *   SCTP user explicitly requests to send data to an
1193  *   inactive destination transport address, before reporting
1194  *   an error to its ULP, the SCTP endpoint should try to send
1195  *   the data to an alternate active destination transport
1196  *   address if one exists.
1197  *
1198  *   When retransmitting data that timed out, if the endpoint
1199  *   is multihomed, it should consider each source-destination
1200  *   address pair in its retransmission selection policy.
1201  *   When retransmitting timed-out data, the endpoint should
1202  *   attempt to pick the most divergent source-destination
1203  *   pair from the original source-destination pair to which
1204  *   the packet was transmitted.
1205  *
1206  *   Note: Rules for picking the most divergent source-destination
1207  *   pair are an implementation decision and are not specified
1208  *   within this document.
1209  *
1210  * Our basic strategy is to round-robin transports in priorities
1211  * according to sctp_state_prio_map[] e.g., if no such
1212  * transport with state SCTP_ACTIVE exists, round-robin through
1213  * SCTP_UNKNOWN, etc. You get the picture.
1214  */
1215 static const u8 sctp_trans_state_to_prio_map[] = {
1216 	[SCTP_ACTIVE]	= 3,	/* best case */
1217 	[SCTP_UNKNOWN]	= 2,
1218 	[SCTP_PF]	= 1,
1219 	[SCTP_INACTIVE] = 0,	/* worst case */
1220 };
1221 
1222 static u8 sctp_trans_score(const struct sctp_transport *trans)
1223 {
1224 	return sctp_trans_state_to_prio_map[trans->state];
1225 }
1226 
1227 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1228 						   struct sctp_transport *trans2)
1229 {
1230 	if (trans1->error_count > trans2->error_count) {
1231 		return trans2;
1232 	} else if (trans1->error_count == trans2->error_count &&
1233 		   ktime_after(trans2->last_time_heard,
1234 			       trans1->last_time_heard)) {
1235 		return trans2;
1236 	} else {
1237 		return trans1;
1238 	}
1239 }
1240 
1241 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1242 						    struct sctp_transport *best)
1243 {
1244 	u8 score_curr, score_best;
1245 
1246 	if (best == NULL)
1247 		return curr;
1248 
1249 	score_curr = sctp_trans_score(curr);
1250 	score_best = sctp_trans_score(best);
1251 
1252 	/* First, try a score-based selection if both transport states
1253 	 * differ. If we're in a tie, lets try to make a more clever
1254 	 * decision here based on error counts and last time heard.
1255 	 */
1256 	if (score_curr > score_best)
1257 		return curr;
1258 	else if (score_curr == score_best)
1259 		return sctp_trans_elect_tie(curr, best);
1260 	else
1261 		return best;
1262 }
1263 
1264 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1265 {
1266 	struct sctp_transport *trans = asoc->peer.retran_path;
1267 	struct sctp_transport *trans_next = NULL;
1268 
1269 	/* We're done as we only have the one and only path. */
1270 	if (asoc->peer.transport_count == 1)
1271 		return;
1272 	/* If active_path and retran_path are the same and active,
1273 	 * then this is the only active path. Use it.
1274 	 */
1275 	if (asoc->peer.active_path == asoc->peer.retran_path &&
1276 	    asoc->peer.active_path->state == SCTP_ACTIVE)
1277 		return;
1278 
1279 	/* Iterate from retran_path's successor back to retran_path. */
1280 	for (trans = list_next_entry(trans, transports); 1;
1281 	     trans = list_next_entry(trans, transports)) {
1282 		/* Manually skip the head element. */
1283 		if (&trans->transports == &asoc->peer.transport_addr_list)
1284 			continue;
1285 		if (trans->state == SCTP_UNCONFIRMED)
1286 			continue;
1287 		trans_next = sctp_trans_elect_best(trans, trans_next);
1288 		/* Active is good enough for immediate return. */
1289 		if (trans_next->state == SCTP_ACTIVE)
1290 			break;
1291 		/* We've reached the end, time to update path. */
1292 		if (trans == asoc->peer.retran_path)
1293 			break;
1294 	}
1295 
1296 	asoc->peer.retran_path = trans_next;
1297 
1298 	pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1299 		 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1300 }
1301 
1302 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1303 {
1304 	struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1305 	struct sctp_transport *trans_pf = NULL;
1306 
1307 	/* Look for the two most recently used active transports. */
1308 	list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1309 			    transports) {
1310 		/* Skip uninteresting transports. */
1311 		if (trans->state == SCTP_INACTIVE ||
1312 		    trans->state == SCTP_UNCONFIRMED)
1313 			continue;
1314 		/* Keep track of the best PF transport from our
1315 		 * list in case we don't find an active one.
1316 		 */
1317 		if (trans->state == SCTP_PF) {
1318 			trans_pf = sctp_trans_elect_best(trans, trans_pf);
1319 			continue;
1320 		}
1321 		/* For active transports, pick the most recent ones. */
1322 		if (trans_pri == NULL ||
1323 		    ktime_after(trans->last_time_heard,
1324 				trans_pri->last_time_heard)) {
1325 			trans_sec = trans_pri;
1326 			trans_pri = trans;
1327 		} else if (trans_sec == NULL ||
1328 			   ktime_after(trans->last_time_heard,
1329 				       trans_sec->last_time_heard)) {
1330 			trans_sec = trans;
1331 		}
1332 	}
1333 
1334 	/* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1335 	 *
1336 	 * By default, an endpoint should always transmit to the primary
1337 	 * path, unless the SCTP user explicitly specifies the
1338 	 * destination transport address (and possibly source transport
1339 	 * address) to use. [If the primary is active but not most recent,
1340 	 * bump the most recently used transport.]
1341 	 */
1342 	if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1343 	     asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1344 	     asoc->peer.primary_path != trans_pri) {
1345 		trans_sec = trans_pri;
1346 		trans_pri = asoc->peer.primary_path;
1347 	}
1348 
1349 	/* We did not find anything useful for a possible retransmission
1350 	 * path; either primary path that we found is the the same as
1351 	 * the current one, or we didn't generally find an active one.
1352 	 */
1353 	if (trans_sec == NULL)
1354 		trans_sec = trans_pri;
1355 
1356 	/* If we failed to find a usable transport, just camp on the
1357 	 * primary or retran, even if they are inactive, if possible
1358 	 * pick a PF iff it's the better choice.
1359 	 */
1360 	if (trans_pri == NULL) {
1361 		trans_pri = sctp_trans_elect_best(asoc->peer.primary_path,
1362 						  asoc->peer.retran_path);
1363 		trans_pri = sctp_trans_elect_best(trans_pri, trans_pf);
1364 		trans_sec = asoc->peer.primary_path;
1365 	}
1366 
1367 	/* Set the active and retran transports. */
1368 	asoc->peer.active_path = trans_pri;
1369 	asoc->peer.retran_path = trans_sec;
1370 }
1371 
1372 struct sctp_transport *
1373 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1374 				  struct sctp_transport *last_sent_to)
1375 {
1376 	/* If this is the first time packet is sent, use the active path,
1377 	 * else use the retran path. If the last packet was sent over the
1378 	 * retran path, update the retran path and use it.
1379 	 */
1380 	if (last_sent_to == NULL) {
1381 		return asoc->peer.active_path;
1382 	} else {
1383 		if (last_sent_to == asoc->peer.retran_path)
1384 			sctp_assoc_update_retran_path(asoc);
1385 
1386 		return asoc->peer.retran_path;
1387 	}
1388 }
1389 
1390 /* Update the association's pmtu and frag_point by going through all the
1391  * transports. This routine is called when a transport's PMTU has changed.
1392  */
1393 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1394 {
1395 	struct sctp_transport *t;
1396 	__u32 pmtu = 0;
1397 
1398 	if (!asoc)
1399 		return;
1400 
1401 	/* Get the lowest pmtu of all the transports. */
1402 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
1403 				transports) {
1404 		if (t->pmtu_pending && t->dst) {
1405 			sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1406 			t->pmtu_pending = 0;
1407 		}
1408 		if (!pmtu || (t->pathmtu < pmtu))
1409 			pmtu = t->pathmtu;
1410 	}
1411 
1412 	if (pmtu) {
1413 		asoc->pathmtu = pmtu;
1414 		asoc->frag_point = sctp_frag_point(asoc, pmtu);
1415 	}
1416 
1417 	pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1418 		 asoc->pathmtu, asoc->frag_point);
1419 }
1420 
1421 /* Should we send a SACK to update our peer? */
1422 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1423 {
1424 	struct net *net = sock_net(asoc->base.sk);
1425 	switch (asoc->state) {
1426 	case SCTP_STATE_ESTABLISHED:
1427 	case SCTP_STATE_SHUTDOWN_PENDING:
1428 	case SCTP_STATE_SHUTDOWN_RECEIVED:
1429 	case SCTP_STATE_SHUTDOWN_SENT:
1430 		if ((asoc->rwnd > asoc->a_rwnd) &&
1431 		    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1432 			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1433 			   asoc->pathmtu)))
1434 			return true;
1435 		break;
1436 	default:
1437 		break;
1438 	}
1439 	return false;
1440 }
1441 
1442 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1443 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1444 {
1445 	struct sctp_chunk *sack;
1446 	struct timer_list *timer;
1447 
1448 	if (asoc->rwnd_over) {
1449 		if (asoc->rwnd_over >= len) {
1450 			asoc->rwnd_over -= len;
1451 		} else {
1452 			asoc->rwnd += (len - asoc->rwnd_over);
1453 			asoc->rwnd_over = 0;
1454 		}
1455 	} else {
1456 		asoc->rwnd += len;
1457 	}
1458 
1459 	/* If we had window pressure, start recovering it
1460 	 * once our rwnd had reached the accumulated pressure
1461 	 * threshold.  The idea is to recover slowly, but up
1462 	 * to the initial advertised window.
1463 	 */
1464 	if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1465 		int change = min(asoc->pathmtu, asoc->rwnd_press);
1466 		asoc->rwnd += change;
1467 		asoc->rwnd_press -= change;
1468 	}
1469 
1470 	pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1471 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1472 		 asoc->a_rwnd);
1473 
1474 	/* Send a window update SACK if the rwnd has increased by at least the
1475 	 * minimum of the association's PMTU and half of the receive buffer.
1476 	 * The algorithm used is similar to the one described in
1477 	 * Section 4.2.3.3 of RFC 1122.
1478 	 */
1479 	if (sctp_peer_needs_update(asoc)) {
1480 		asoc->a_rwnd = asoc->rwnd;
1481 
1482 		pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1483 			 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1484 			 asoc->a_rwnd);
1485 
1486 		sack = sctp_make_sack(asoc);
1487 		if (!sack)
1488 			return;
1489 
1490 		asoc->peer.sack_needed = 0;
1491 
1492 		sctp_outq_tail(&asoc->outqueue, sack);
1493 
1494 		/* Stop the SACK timer.  */
1495 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1496 		if (del_timer(timer))
1497 			sctp_association_put(asoc);
1498 	}
1499 }
1500 
1501 /* Decrease asoc's rwnd by len. */
1502 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1503 {
1504 	int rx_count;
1505 	int over = 0;
1506 
1507 	if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1508 		pr_debug("%s: association:%p has asoc->rwnd:%u, "
1509 			 "asoc->rwnd_over:%u!\n", __func__, asoc,
1510 			 asoc->rwnd, asoc->rwnd_over);
1511 
1512 	if (asoc->ep->rcvbuf_policy)
1513 		rx_count = atomic_read(&asoc->rmem_alloc);
1514 	else
1515 		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1516 
1517 	/* If we've reached or overflowed our receive buffer, announce
1518 	 * a 0 rwnd if rwnd would still be positive.  Store the
1519 	 * the potential pressure overflow so that the window can be restored
1520 	 * back to original value.
1521 	 */
1522 	if (rx_count >= asoc->base.sk->sk_rcvbuf)
1523 		over = 1;
1524 
1525 	if (asoc->rwnd >= len) {
1526 		asoc->rwnd -= len;
1527 		if (over) {
1528 			asoc->rwnd_press += asoc->rwnd;
1529 			asoc->rwnd = 0;
1530 		}
1531 	} else {
1532 		asoc->rwnd_over = len - asoc->rwnd;
1533 		asoc->rwnd = 0;
1534 	}
1535 
1536 	pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1537 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1538 		 asoc->rwnd_press);
1539 }
1540 
1541 /* Build the bind address list for the association based on info from the
1542  * local endpoint and the remote peer.
1543  */
1544 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1545 				     sctp_scope_t scope, gfp_t gfp)
1546 {
1547 	int flags;
1548 
1549 	/* Use scoping rules to determine the subset of addresses from
1550 	 * the endpoint.
1551 	 */
1552 	flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1553 	if (asoc->peer.ipv4_address)
1554 		flags |= SCTP_ADDR4_PEERSUPP;
1555 	if (asoc->peer.ipv6_address)
1556 		flags |= SCTP_ADDR6_PEERSUPP;
1557 
1558 	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1559 				   &asoc->base.bind_addr,
1560 				   &asoc->ep->base.bind_addr,
1561 				   scope, gfp, flags);
1562 }
1563 
1564 /* Build the association's bind address list from the cookie.  */
1565 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1566 					 struct sctp_cookie *cookie,
1567 					 gfp_t gfp)
1568 {
1569 	int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1570 	int var_size3 = cookie->raw_addr_list_len;
1571 	__u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1572 
1573 	return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1574 				      asoc->ep->base.bind_addr.port, gfp);
1575 }
1576 
1577 /* Lookup laddr in the bind address list of an association. */
1578 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1579 			    const union sctp_addr *laddr)
1580 {
1581 	int found = 0;
1582 
1583 	if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1584 	    sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1585 				 sctp_sk(asoc->base.sk)))
1586 		found = 1;
1587 
1588 	return found;
1589 }
1590 
1591 /* Set an association id for a given association */
1592 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1593 {
1594 	bool preload = !!(gfp & __GFP_WAIT);
1595 	int ret;
1596 
1597 	/* If the id is already assigned, keep it. */
1598 	if (asoc->assoc_id)
1599 		return 0;
1600 
1601 	if (preload)
1602 		idr_preload(gfp);
1603 	spin_lock_bh(&sctp_assocs_id_lock);
1604 	/* 0 is not a valid assoc_id, must be >= 1 */
1605 	ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1606 	spin_unlock_bh(&sctp_assocs_id_lock);
1607 	if (preload)
1608 		idr_preload_end();
1609 	if (ret < 0)
1610 		return ret;
1611 
1612 	asoc->assoc_id = (sctp_assoc_t)ret;
1613 	return 0;
1614 }
1615 
1616 /* Free the ASCONF queue */
1617 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1618 {
1619 	struct sctp_chunk *asconf;
1620 	struct sctp_chunk *tmp;
1621 
1622 	list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1623 		list_del_init(&asconf->list);
1624 		sctp_chunk_free(asconf);
1625 	}
1626 }
1627 
1628 /* Free asconf_ack cache */
1629 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1630 {
1631 	struct sctp_chunk *ack;
1632 	struct sctp_chunk *tmp;
1633 
1634 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1635 				transmitted_list) {
1636 		list_del_init(&ack->transmitted_list);
1637 		sctp_chunk_free(ack);
1638 	}
1639 }
1640 
1641 /* Clean up the ASCONF_ACK queue */
1642 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1643 {
1644 	struct sctp_chunk *ack;
1645 	struct sctp_chunk *tmp;
1646 
1647 	/* We can remove all the entries from the queue up to
1648 	 * the "Peer-Sequence-Number".
1649 	 */
1650 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1651 				transmitted_list) {
1652 		if (ack->subh.addip_hdr->serial ==
1653 				htonl(asoc->peer.addip_serial))
1654 			break;
1655 
1656 		list_del_init(&ack->transmitted_list);
1657 		sctp_chunk_free(ack);
1658 	}
1659 }
1660 
1661 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1662 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1663 					const struct sctp_association *asoc,
1664 					__be32 serial)
1665 {
1666 	struct sctp_chunk *ack;
1667 
1668 	/* Walk through the list of cached ASCONF-ACKs and find the
1669 	 * ack chunk whose serial number matches that of the request.
1670 	 */
1671 	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1672 		if (ack->subh.addip_hdr->serial == serial) {
1673 			sctp_chunk_hold(ack);
1674 			return ack;
1675 		}
1676 	}
1677 
1678 	return NULL;
1679 }
1680 
1681 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1682 {
1683 	/* Free any cached ASCONF_ACK chunk. */
1684 	sctp_assoc_free_asconf_acks(asoc);
1685 
1686 	/* Free the ASCONF queue. */
1687 	sctp_assoc_free_asconf_queue(asoc);
1688 
1689 	/* Free any cached ASCONF chunk. */
1690 	if (asoc->addip_last_asconf)
1691 		sctp_chunk_free(asoc->addip_last_asconf);
1692 }
1693