xref: /openbmc/linux/net/sctp/associola.c (revision 232b0b08)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 La Monte H.P. Yarroll
7  *
8  * This file is part of the SCTP kernel implementation
9  *
10  * This module provides the abstraction for an SCTP association.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, see
26  * <http://www.gnu.org/licenses/>.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <linux-sctp@vger.kernel.org>
31  *
32  * Written or modified by:
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Karl Knutson          <karl@athena.chicago.il.us>
35  *    Jon Grimm             <jgrimm@us.ibm.com>
36  *    Xingang Guo           <xingang.guo@intel.com>
37  *    Hui Huang             <hui.huang@nokia.com>
38  *    Sridhar Samudrala	    <sri@us.ibm.com>
39  *    Daisy Chang	    <daisyc@us.ibm.com>
40  *    Ryan Layer	    <rmlayer@us.ibm.com>
41  *    Kevin Gao             <kevin.gao@intel.com>
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
50 
51 #include <linux/slab.h>
52 #include <linux/in.h>
53 #include <net/ipv6.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
56 
57 /* Forward declarations for internal functions. */
58 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59 static void sctp_assoc_bh_rcv(struct work_struct *work);
60 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62 
63 /* 1st Level Abstractions. */
64 
65 /* Initialize a new association from provided memory. */
66 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
67 					  const struct sctp_endpoint *ep,
68 					  const struct sock *sk,
69 					  sctp_scope_t scope,
70 					  gfp_t gfp)
71 {
72 	struct net *net = sock_net(sk);
73 	struct sctp_sock *sp;
74 	sctp_paramhdr_t *p;
75 	int i;
76 
77 	/* Retrieve the SCTP per socket area.  */
78 	sp = sctp_sk((struct sock *)sk);
79 
80 	/* Discarding const is appropriate here.  */
81 	asoc->ep = (struct sctp_endpoint *)ep;
82 	asoc->base.sk = (struct sock *)sk;
83 
84 	sctp_endpoint_hold(asoc->ep);
85 	sock_hold(asoc->base.sk);
86 
87 	/* Initialize the common base substructure.  */
88 	asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
89 
90 	/* Initialize the object handling fields.  */
91 	atomic_set(&asoc->base.refcnt, 1);
92 
93 	/* Initialize the bind addr area.  */
94 	sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
95 
96 	asoc->state = SCTP_STATE_CLOSED;
97 	asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
98 	asoc->user_frag = sp->user_frag;
99 
100 	/* Set the association max_retrans and RTO values from the
101 	 * socket values.
102 	 */
103 	asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
104 	asoc->pf_retrans  = net->sctp.pf_retrans;
105 
106 	asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
107 	asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
108 	asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
109 
110 	/* Initialize the association's heartbeat interval based on the
111 	 * sock configured value.
112 	 */
113 	asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
114 
115 	/* Initialize path max retrans value. */
116 	asoc->pathmaxrxt = sp->pathmaxrxt;
117 
118 	/* Initialize default path MTU. */
119 	asoc->pathmtu = sp->pathmtu;
120 
121 	/* Set association default SACK delay */
122 	asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
123 	asoc->sackfreq = sp->sackfreq;
124 
125 	/* Set the association default flags controlling
126 	 * Heartbeat, SACK delay, and Path MTU Discovery.
127 	 */
128 	asoc->param_flags = sp->param_flags;
129 
130 	/* Initialize the maximum number of new data packets that can be sent
131 	 * in a burst.
132 	 */
133 	asoc->max_burst = sp->max_burst;
134 
135 	/* initialize association timers */
136 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
137 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
138 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
139 
140 	/* sctpimpguide Section 2.12.2
141 	 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
142 	 * recommended value of 5 times 'RTO.Max'.
143 	 */
144 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
145 		= 5 * asoc->rto_max;
146 
147 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
148 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
149 
150 	/* Initializes the timers */
151 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
152 		setup_timer(&asoc->timers[i], sctp_timer_events[i],
153 				(unsigned long)asoc);
154 
155 	/* Pull default initialization values from the sock options.
156 	 * Note: This assumes that the values have already been
157 	 * validated in the sock.
158 	 */
159 	asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
160 	asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
161 	asoc->max_init_attempts	= sp->initmsg.sinit_max_attempts;
162 
163 	asoc->max_init_timeo =
164 		 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
165 
166 	/* Set the local window size for receive.
167 	 * This is also the rcvbuf space per association.
168 	 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
169 	 * 1500 bytes in one SCTP packet.
170 	 */
171 	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
172 		asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
173 	else
174 		asoc->rwnd = sk->sk_rcvbuf/2;
175 
176 	asoc->a_rwnd = asoc->rwnd;
177 
178 	/* Use my own max window until I learn something better.  */
179 	asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
180 
181 	/* Initialize the receive memory counter */
182 	atomic_set(&asoc->rmem_alloc, 0);
183 
184 	init_waitqueue_head(&asoc->wait);
185 
186 	asoc->c.my_vtag = sctp_generate_tag(ep);
187 	asoc->c.my_port = ep->base.bind_addr.port;
188 
189 	asoc->c.initial_tsn = sctp_generate_tsn(ep);
190 
191 	asoc->next_tsn = asoc->c.initial_tsn;
192 
193 	asoc->ctsn_ack_point = asoc->next_tsn - 1;
194 	asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
195 	asoc->highest_sacked = asoc->ctsn_ack_point;
196 	asoc->last_cwr_tsn = asoc->ctsn_ack_point;
197 
198 	/* ADDIP Section 4.1 Asconf Chunk Procedures
199 	 *
200 	 * When an endpoint has an ASCONF signaled change to be sent to the
201 	 * remote endpoint it should do the following:
202 	 * ...
203 	 * A2) a serial number should be assigned to the chunk. The serial
204 	 * number SHOULD be a monotonically increasing number. The serial
205 	 * numbers SHOULD be initialized at the start of the
206 	 * association to the same value as the initial TSN.
207 	 */
208 	asoc->addip_serial = asoc->c.initial_tsn;
209 	asoc->strreset_outseq = asoc->c.initial_tsn;
210 
211 	INIT_LIST_HEAD(&asoc->addip_chunk_list);
212 	INIT_LIST_HEAD(&asoc->asconf_ack_list);
213 
214 	/* Make an empty list of remote transport addresses.  */
215 	INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
216 
217 	/* RFC 2960 5.1 Normal Establishment of an Association
218 	 *
219 	 * After the reception of the first data chunk in an
220 	 * association the endpoint must immediately respond with a
221 	 * sack to acknowledge the data chunk.  Subsequent
222 	 * acknowledgements should be done as described in Section
223 	 * 6.2.
224 	 *
225 	 * [We implement this by telling a new association that it
226 	 * already received one packet.]
227 	 */
228 	asoc->peer.sack_needed = 1;
229 	asoc->peer.sack_generation = 1;
230 
231 	/* Assume that the peer will tell us if he recognizes ASCONF
232 	 * as part of INIT exchange.
233 	 * The sctp_addip_noauth option is there for backward compatibility
234 	 * and will revert old behavior.
235 	 */
236 	if (net->sctp.addip_noauth)
237 		asoc->peer.asconf_capable = 1;
238 
239 	/* Create an input queue.  */
240 	sctp_inq_init(&asoc->base.inqueue);
241 	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
242 
243 	/* Create an output queue.  */
244 	sctp_outq_init(asoc, &asoc->outqueue);
245 
246 	if (!sctp_ulpq_init(&asoc->ulpq, asoc))
247 		goto fail_init;
248 
249 	/* Assume that peer would support both address types unless we are
250 	 * told otherwise.
251 	 */
252 	asoc->peer.ipv4_address = 1;
253 	if (asoc->base.sk->sk_family == PF_INET6)
254 		asoc->peer.ipv6_address = 1;
255 	INIT_LIST_HEAD(&asoc->asocs);
256 
257 	asoc->default_stream = sp->default_stream;
258 	asoc->default_ppid = sp->default_ppid;
259 	asoc->default_flags = sp->default_flags;
260 	asoc->default_context = sp->default_context;
261 	asoc->default_timetolive = sp->default_timetolive;
262 	asoc->default_rcv_context = sp->default_rcv_context;
263 
264 	/* AUTH related initializations */
265 	INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
266 	if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
267 		goto fail_init;
268 
269 	asoc->active_key_id = ep->active_key_id;
270 	asoc->prsctp_enable = ep->prsctp_enable;
271 	asoc->reconf_enable = ep->reconf_enable;
272 	asoc->strreset_enable = ep->strreset_enable;
273 
274 	/* Save the hmacs and chunks list into this association */
275 	if (ep->auth_hmacs_list)
276 		memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
277 			ntohs(ep->auth_hmacs_list->param_hdr.length));
278 	if (ep->auth_chunk_list)
279 		memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
280 			ntohs(ep->auth_chunk_list->param_hdr.length));
281 
282 	/* Get the AUTH random number for this association */
283 	p = (sctp_paramhdr_t *)asoc->c.auth_random;
284 	p->type = SCTP_PARAM_RANDOM;
285 	p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
286 	get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
287 
288 	return asoc;
289 
290 fail_init:
291 	sock_put(asoc->base.sk);
292 	sctp_endpoint_put(asoc->ep);
293 	return NULL;
294 }
295 
296 /* Allocate and initialize a new association */
297 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
298 					 const struct sock *sk,
299 					 sctp_scope_t scope,
300 					 gfp_t gfp)
301 {
302 	struct sctp_association *asoc;
303 
304 	asoc = kzalloc(sizeof(*asoc), gfp);
305 	if (!asoc)
306 		goto fail;
307 
308 	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
309 		goto fail_init;
310 
311 	SCTP_DBG_OBJCNT_INC(assoc);
312 
313 	pr_debug("Created asoc %p\n", asoc);
314 
315 	return asoc;
316 
317 fail_init:
318 	kfree(asoc);
319 fail:
320 	return NULL;
321 }
322 
323 /* Free this association if possible.  There may still be users, so
324  * the actual deallocation may be delayed.
325  */
326 void sctp_association_free(struct sctp_association *asoc)
327 {
328 	struct sock *sk = asoc->base.sk;
329 	struct sctp_transport *transport;
330 	struct list_head *pos, *temp;
331 	int i;
332 
333 	/* Only real associations count against the endpoint, so
334 	 * don't bother for if this is a temporary association.
335 	 */
336 	if (!list_empty(&asoc->asocs)) {
337 		list_del(&asoc->asocs);
338 
339 		/* Decrement the backlog value for a TCP-style listening
340 		 * socket.
341 		 */
342 		if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
343 			sk->sk_ack_backlog--;
344 	}
345 
346 	/* Mark as dead, so other users can know this structure is
347 	 * going away.
348 	 */
349 	asoc->base.dead = true;
350 
351 	/* Dispose of any data lying around in the outqueue. */
352 	sctp_outq_free(&asoc->outqueue);
353 
354 	/* Dispose of any pending messages for the upper layer. */
355 	sctp_ulpq_free(&asoc->ulpq);
356 
357 	/* Dispose of any pending chunks on the inqueue. */
358 	sctp_inq_free(&asoc->base.inqueue);
359 
360 	sctp_tsnmap_free(&asoc->peer.tsn_map);
361 
362 	/* Free stream information. */
363 	sctp_stream_free(asoc->stream);
364 
365 	if (asoc->strreset_chunk)
366 		sctp_chunk_free(asoc->strreset_chunk);
367 
368 	/* Clean up the bound address list. */
369 	sctp_bind_addr_free(&asoc->base.bind_addr);
370 
371 	/* Do we need to go through all of our timers and
372 	 * delete them?   To be safe we will try to delete all, but we
373 	 * should be able to go through and make a guess based
374 	 * on our state.
375 	 */
376 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
377 		if (del_timer(&asoc->timers[i]))
378 			sctp_association_put(asoc);
379 	}
380 
381 	/* Free peer's cached cookie. */
382 	kfree(asoc->peer.cookie);
383 	kfree(asoc->peer.peer_random);
384 	kfree(asoc->peer.peer_chunks);
385 	kfree(asoc->peer.peer_hmacs);
386 
387 	/* Release the transport structures. */
388 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
389 		transport = list_entry(pos, struct sctp_transport, transports);
390 		list_del_rcu(pos);
391 		sctp_unhash_transport(transport);
392 		sctp_transport_free(transport);
393 	}
394 
395 	asoc->peer.transport_count = 0;
396 
397 	sctp_asconf_queue_teardown(asoc);
398 
399 	/* Free pending address space being deleted */
400 	kfree(asoc->asconf_addr_del_pending);
401 
402 	/* AUTH - Free the endpoint shared keys */
403 	sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
404 
405 	/* AUTH - Free the association shared key */
406 	sctp_auth_key_put(asoc->asoc_shared_key);
407 
408 	sctp_association_put(asoc);
409 }
410 
411 /* Cleanup and free up an association. */
412 static void sctp_association_destroy(struct sctp_association *asoc)
413 {
414 	if (unlikely(!asoc->base.dead)) {
415 		WARN(1, "Attempt to destroy undead association %p!\n", asoc);
416 		return;
417 	}
418 
419 	sctp_endpoint_put(asoc->ep);
420 	sock_put(asoc->base.sk);
421 
422 	if (asoc->assoc_id != 0) {
423 		spin_lock_bh(&sctp_assocs_id_lock);
424 		idr_remove(&sctp_assocs_id, asoc->assoc_id);
425 		spin_unlock_bh(&sctp_assocs_id_lock);
426 	}
427 
428 	WARN_ON(atomic_read(&asoc->rmem_alloc));
429 
430 	kfree(asoc);
431 	SCTP_DBG_OBJCNT_DEC(assoc);
432 }
433 
434 /* Change the primary destination address for the peer. */
435 void sctp_assoc_set_primary(struct sctp_association *asoc,
436 			    struct sctp_transport *transport)
437 {
438 	int changeover = 0;
439 
440 	/* it's a changeover only if we already have a primary path
441 	 * that we are changing
442 	 */
443 	if (asoc->peer.primary_path != NULL &&
444 	    asoc->peer.primary_path != transport)
445 		changeover = 1 ;
446 
447 	asoc->peer.primary_path = transport;
448 
449 	/* Set a default msg_name for events. */
450 	memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
451 	       sizeof(union sctp_addr));
452 
453 	/* If the primary path is changing, assume that the
454 	 * user wants to use this new path.
455 	 */
456 	if ((transport->state == SCTP_ACTIVE) ||
457 	    (transport->state == SCTP_UNKNOWN))
458 		asoc->peer.active_path = transport;
459 
460 	/*
461 	 * SFR-CACC algorithm:
462 	 * Upon the receipt of a request to change the primary
463 	 * destination address, on the data structure for the new
464 	 * primary destination, the sender MUST do the following:
465 	 *
466 	 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
467 	 * to this destination address earlier. The sender MUST set
468 	 * CYCLING_CHANGEOVER to indicate that this switch is a
469 	 * double switch to the same destination address.
470 	 *
471 	 * Really, only bother is we have data queued or outstanding on
472 	 * the association.
473 	 */
474 	if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
475 		return;
476 
477 	if (transport->cacc.changeover_active)
478 		transport->cacc.cycling_changeover = changeover;
479 
480 	/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
481 	 * a changeover has occurred.
482 	 */
483 	transport->cacc.changeover_active = changeover;
484 
485 	/* 3) The sender MUST store the next TSN to be sent in
486 	 * next_tsn_at_change.
487 	 */
488 	transport->cacc.next_tsn_at_change = asoc->next_tsn;
489 }
490 
491 /* Remove a transport from an association.  */
492 void sctp_assoc_rm_peer(struct sctp_association *asoc,
493 			struct sctp_transport *peer)
494 {
495 	struct list_head	*pos;
496 	struct sctp_transport	*transport;
497 
498 	pr_debug("%s: association:%p addr:%pISpc\n",
499 		 __func__, asoc, &peer->ipaddr.sa);
500 
501 	/* If we are to remove the current retran_path, update it
502 	 * to the next peer before removing this peer from the list.
503 	 */
504 	if (asoc->peer.retran_path == peer)
505 		sctp_assoc_update_retran_path(asoc);
506 
507 	/* Remove this peer from the list. */
508 	list_del_rcu(&peer->transports);
509 	/* Remove this peer from the transport hashtable */
510 	sctp_unhash_transport(peer);
511 
512 	/* Get the first transport of asoc. */
513 	pos = asoc->peer.transport_addr_list.next;
514 	transport = list_entry(pos, struct sctp_transport, transports);
515 
516 	/* Update any entries that match the peer to be deleted. */
517 	if (asoc->peer.primary_path == peer)
518 		sctp_assoc_set_primary(asoc, transport);
519 	if (asoc->peer.active_path == peer)
520 		asoc->peer.active_path = transport;
521 	if (asoc->peer.retran_path == peer)
522 		asoc->peer.retran_path = transport;
523 	if (asoc->peer.last_data_from == peer)
524 		asoc->peer.last_data_from = transport;
525 
526 	if (asoc->strreset_chunk &&
527 	    asoc->strreset_chunk->transport == peer) {
528 		asoc->strreset_chunk->transport = transport;
529 		sctp_transport_reset_reconf_timer(transport);
530 	}
531 
532 	/* If we remove the transport an INIT was last sent to, set it to
533 	 * NULL. Combined with the update of the retran path above, this
534 	 * will cause the next INIT to be sent to the next available
535 	 * transport, maintaining the cycle.
536 	 */
537 	if (asoc->init_last_sent_to == peer)
538 		asoc->init_last_sent_to = NULL;
539 
540 	/* If we remove the transport an SHUTDOWN was last sent to, set it
541 	 * to NULL. Combined with the update of the retran path above, this
542 	 * will cause the next SHUTDOWN to be sent to the next available
543 	 * transport, maintaining the cycle.
544 	 */
545 	if (asoc->shutdown_last_sent_to == peer)
546 		asoc->shutdown_last_sent_to = NULL;
547 
548 	/* If we remove the transport an ASCONF was last sent to, set it to
549 	 * NULL.
550 	 */
551 	if (asoc->addip_last_asconf &&
552 	    asoc->addip_last_asconf->transport == peer)
553 		asoc->addip_last_asconf->transport = NULL;
554 
555 	/* If we have something on the transmitted list, we have to
556 	 * save it off.  The best place is the active path.
557 	 */
558 	if (!list_empty(&peer->transmitted)) {
559 		struct sctp_transport *active = asoc->peer.active_path;
560 		struct sctp_chunk *ch;
561 
562 		/* Reset the transport of each chunk on this list */
563 		list_for_each_entry(ch, &peer->transmitted,
564 					transmitted_list) {
565 			ch->transport = NULL;
566 			ch->rtt_in_progress = 0;
567 		}
568 
569 		list_splice_tail_init(&peer->transmitted,
570 					&active->transmitted);
571 
572 		/* Start a T3 timer here in case it wasn't running so
573 		 * that these migrated packets have a chance to get
574 		 * retransmitted.
575 		 */
576 		if (!timer_pending(&active->T3_rtx_timer))
577 			if (!mod_timer(&active->T3_rtx_timer,
578 					jiffies + active->rto))
579 				sctp_transport_hold(active);
580 	}
581 
582 	asoc->peer.transport_count--;
583 
584 	sctp_transport_free(peer);
585 }
586 
587 /* Add a transport address to an association.  */
588 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
589 					   const union sctp_addr *addr,
590 					   const gfp_t gfp,
591 					   const int peer_state)
592 {
593 	struct net *net = sock_net(asoc->base.sk);
594 	struct sctp_transport *peer;
595 	struct sctp_sock *sp;
596 	unsigned short port;
597 
598 	sp = sctp_sk(asoc->base.sk);
599 
600 	/* AF_INET and AF_INET6 share common port field. */
601 	port = ntohs(addr->v4.sin_port);
602 
603 	pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
604 		 asoc, &addr->sa, peer_state);
605 
606 	/* Set the port if it has not been set yet.  */
607 	if (0 == asoc->peer.port)
608 		asoc->peer.port = port;
609 
610 	/* Check to see if this is a duplicate. */
611 	peer = sctp_assoc_lookup_paddr(asoc, addr);
612 	if (peer) {
613 		/* An UNKNOWN state is only set on transports added by
614 		 * user in sctp_connectx() call.  Such transports should be
615 		 * considered CONFIRMED per RFC 4960, Section 5.4.
616 		 */
617 		if (peer->state == SCTP_UNKNOWN) {
618 			peer->state = SCTP_ACTIVE;
619 		}
620 		return peer;
621 	}
622 
623 	peer = sctp_transport_new(net, addr, gfp);
624 	if (!peer)
625 		return NULL;
626 
627 	sctp_transport_set_owner(peer, asoc);
628 
629 	/* Initialize the peer's heartbeat interval based on the
630 	 * association configured value.
631 	 */
632 	peer->hbinterval = asoc->hbinterval;
633 
634 	/* Set the path max_retrans.  */
635 	peer->pathmaxrxt = asoc->pathmaxrxt;
636 
637 	/* And the partial failure retrans threshold */
638 	peer->pf_retrans = asoc->pf_retrans;
639 
640 	/* Initialize the peer's SACK delay timeout based on the
641 	 * association configured value.
642 	 */
643 	peer->sackdelay = asoc->sackdelay;
644 	peer->sackfreq = asoc->sackfreq;
645 
646 	/* Enable/disable heartbeat, SACK delay, and path MTU discovery
647 	 * based on association setting.
648 	 */
649 	peer->param_flags = asoc->param_flags;
650 
651 	sctp_transport_route(peer, NULL, sp);
652 
653 	/* Initialize the pmtu of the transport. */
654 	if (peer->param_flags & SPP_PMTUD_DISABLE) {
655 		if (asoc->pathmtu)
656 			peer->pathmtu = asoc->pathmtu;
657 		else
658 			peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
659 	}
660 
661 	/* If this is the first transport addr on this association,
662 	 * initialize the association PMTU to the peer's PMTU.
663 	 * If not and the current association PMTU is higher than the new
664 	 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
665 	 */
666 	if (asoc->pathmtu)
667 		asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
668 	else
669 		asoc->pathmtu = peer->pathmtu;
670 
671 	pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
672 		 asoc->pathmtu);
673 
674 	peer->pmtu_pending = 0;
675 
676 	asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
677 
678 	/* The asoc->peer.port might not be meaningful yet, but
679 	 * initialize the packet structure anyway.
680 	 */
681 	sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
682 			 asoc->peer.port);
683 
684 	/* 7.2.1 Slow-Start
685 	 *
686 	 * o The initial cwnd before DATA transmission or after a sufficiently
687 	 *   long idle period MUST be set to
688 	 *      min(4*MTU, max(2*MTU, 4380 bytes))
689 	 *
690 	 * o The initial value of ssthresh MAY be arbitrarily high
691 	 *   (for example, implementations MAY use the size of the
692 	 *   receiver advertised window).
693 	 */
694 	peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
695 
696 	/* At this point, we may not have the receiver's advertised window,
697 	 * so initialize ssthresh to the default value and it will be set
698 	 * later when we process the INIT.
699 	 */
700 	peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
701 
702 	peer->partial_bytes_acked = 0;
703 	peer->flight_size = 0;
704 	peer->burst_limited = 0;
705 
706 	/* Set the transport's RTO.initial value */
707 	peer->rto = asoc->rto_initial;
708 	sctp_max_rto(asoc, peer);
709 
710 	/* Set the peer's active state. */
711 	peer->state = peer_state;
712 
713 	/* Add this peer into the transport hashtable */
714 	if (sctp_hash_transport(peer)) {
715 		sctp_transport_free(peer);
716 		return NULL;
717 	}
718 
719 	/* Attach the remote transport to our asoc.  */
720 	list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
721 	asoc->peer.transport_count++;
722 
723 	/* If we do not yet have a primary path, set one.  */
724 	if (!asoc->peer.primary_path) {
725 		sctp_assoc_set_primary(asoc, peer);
726 		asoc->peer.retran_path = peer;
727 	}
728 
729 	if (asoc->peer.active_path == asoc->peer.retran_path &&
730 	    peer->state != SCTP_UNCONFIRMED) {
731 		asoc->peer.retran_path = peer;
732 	}
733 
734 	return peer;
735 }
736 
737 /* Delete a transport address from an association.  */
738 void sctp_assoc_del_peer(struct sctp_association *asoc,
739 			 const union sctp_addr *addr)
740 {
741 	struct list_head	*pos;
742 	struct list_head	*temp;
743 	struct sctp_transport	*transport;
744 
745 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
746 		transport = list_entry(pos, struct sctp_transport, transports);
747 		if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
748 			/* Do book keeping for removing the peer and free it. */
749 			sctp_assoc_rm_peer(asoc, transport);
750 			break;
751 		}
752 	}
753 }
754 
755 /* Lookup a transport by address. */
756 struct sctp_transport *sctp_assoc_lookup_paddr(
757 					const struct sctp_association *asoc,
758 					const union sctp_addr *address)
759 {
760 	struct sctp_transport *t;
761 
762 	/* Cycle through all transports searching for a peer address. */
763 
764 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
765 			transports) {
766 		if (sctp_cmp_addr_exact(address, &t->ipaddr))
767 			return t;
768 	}
769 
770 	return NULL;
771 }
772 
773 /* Remove all transports except a give one */
774 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
775 				     struct sctp_transport *primary)
776 {
777 	struct sctp_transport	*temp;
778 	struct sctp_transport	*t;
779 
780 	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
781 				 transports) {
782 		/* if the current transport is not the primary one, delete it */
783 		if (t != primary)
784 			sctp_assoc_rm_peer(asoc, t);
785 	}
786 }
787 
788 /* Engage in transport control operations.
789  * Mark the transport up or down and send a notification to the user.
790  * Select and update the new active and retran paths.
791  */
792 void sctp_assoc_control_transport(struct sctp_association *asoc,
793 				  struct sctp_transport *transport,
794 				  sctp_transport_cmd_t command,
795 				  sctp_sn_error_t error)
796 {
797 	struct sctp_ulpevent *event;
798 	struct sockaddr_storage addr;
799 	int spc_state = 0;
800 	bool ulp_notify = true;
801 
802 	/* Record the transition on the transport.  */
803 	switch (command) {
804 	case SCTP_TRANSPORT_UP:
805 		/* If we are moving from UNCONFIRMED state due
806 		 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
807 		 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
808 		 */
809 		if (SCTP_UNCONFIRMED == transport->state &&
810 		    SCTP_HEARTBEAT_SUCCESS == error)
811 			spc_state = SCTP_ADDR_CONFIRMED;
812 		else
813 			spc_state = SCTP_ADDR_AVAILABLE;
814 		/* Don't inform ULP about transition from PF to
815 		 * active state and set cwnd to 1 MTU, see SCTP
816 		 * Quick failover draft section 5.1, point 5
817 		 */
818 		if (transport->state == SCTP_PF) {
819 			ulp_notify = false;
820 			transport->cwnd = asoc->pathmtu;
821 		}
822 		transport->state = SCTP_ACTIVE;
823 		break;
824 
825 	case SCTP_TRANSPORT_DOWN:
826 		/* If the transport was never confirmed, do not transition it
827 		 * to inactive state.  Also, release the cached route since
828 		 * there may be a better route next time.
829 		 */
830 		if (transport->state != SCTP_UNCONFIRMED)
831 			transport->state = SCTP_INACTIVE;
832 		else {
833 			sctp_transport_dst_release(transport);
834 			ulp_notify = false;
835 		}
836 
837 		spc_state = SCTP_ADDR_UNREACHABLE;
838 		break;
839 
840 	case SCTP_TRANSPORT_PF:
841 		transport->state = SCTP_PF;
842 		ulp_notify = false;
843 		break;
844 
845 	default:
846 		return;
847 	}
848 
849 	/* Generate and send a SCTP_PEER_ADDR_CHANGE notification
850 	 * to the user.
851 	 */
852 	if (ulp_notify) {
853 		memset(&addr, 0, sizeof(struct sockaddr_storage));
854 		memcpy(&addr, &transport->ipaddr,
855 		       transport->af_specific->sockaddr_len);
856 
857 		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
858 					0, spc_state, error, GFP_ATOMIC);
859 		if (event)
860 			sctp_ulpq_tail_event(&asoc->ulpq, event);
861 	}
862 
863 	/* Select new active and retran paths. */
864 	sctp_select_active_and_retran_path(asoc);
865 }
866 
867 /* Hold a reference to an association. */
868 void sctp_association_hold(struct sctp_association *asoc)
869 {
870 	atomic_inc(&asoc->base.refcnt);
871 }
872 
873 /* Release a reference to an association and cleanup
874  * if there are no more references.
875  */
876 void sctp_association_put(struct sctp_association *asoc)
877 {
878 	if (atomic_dec_and_test(&asoc->base.refcnt))
879 		sctp_association_destroy(asoc);
880 }
881 
882 /* Allocate the next TSN, Transmission Sequence Number, for the given
883  * association.
884  */
885 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
886 {
887 	/* From Section 1.6 Serial Number Arithmetic:
888 	 * Transmission Sequence Numbers wrap around when they reach
889 	 * 2**32 - 1.  That is, the next TSN a DATA chunk MUST use
890 	 * after transmitting TSN = 2*32 - 1 is TSN = 0.
891 	 */
892 	__u32 retval = asoc->next_tsn;
893 	asoc->next_tsn++;
894 	asoc->unack_data++;
895 
896 	return retval;
897 }
898 
899 /* Compare two addresses to see if they match.  Wildcard addresses
900  * only match themselves.
901  */
902 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
903 			const union sctp_addr *ss2)
904 {
905 	struct sctp_af *af;
906 
907 	af = sctp_get_af_specific(ss1->sa.sa_family);
908 	if (unlikely(!af))
909 		return 0;
910 
911 	return af->cmp_addr(ss1, ss2);
912 }
913 
914 /* Return an ecne chunk to get prepended to a packet.
915  * Note:  We are sly and return a shared, prealloced chunk.  FIXME:
916  * No we don't, but we could/should.
917  */
918 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
919 {
920 	if (!asoc->need_ecne)
921 		return NULL;
922 
923 	/* Send ECNE if needed.
924 	 * Not being able to allocate a chunk here is not deadly.
925 	 */
926 	return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
927 }
928 
929 /*
930  * Find which transport this TSN was sent on.
931  */
932 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
933 					     __u32 tsn)
934 {
935 	struct sctp_transport *active;
936 	struct sctp_transport *match;
937 	struct sctp_transport *transport;
938 	struct sctp_chunk *chunk;
939 	__be32 key = htonl(tsn);
940 
941 	match = NULL;
942 
943 	/*
944 	 * FIXME: In general, find a more efficient data structure for
945 	 * searching.
946 	 */
947 
948 	/*
949 	 * The general strategy is to search each transport's transmitted
950 	 * list.   Return which transport this TSN lives on.
951 	 *
952 	 * Let's be hopeful and check the active_path first.
953 	 * Another optimization would be to know if there is only one
954 	 * outbound path and not have to look for the TSN at all.
955 	 *
956 	 */
957 
958 	active = asoc->peer.active_path;
959 
960 	list_for_each_entry(chunk, &active->transmitted,
961 			transmitted_list) {
962 
963 		if (key == chunk->subh.data_hdr->tsn) {
964 			match = active;
965 			goto out;
966 		}
967 	}
968 
969 	/* If not found, go search all the other transports. */
970 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
971 			transports) {
972 
973 		if (transport == active)
974 			continue;
975 		list_for_each_entry(chunk, &transport->transmitted,
976 				transmitted_list) {
977 			if (key == chunk->subh.data_hdr->tsn) {
978 				match = transport;
979 				goto out;
980 			}
981 		}
982 	}
983 out:
984 	return match;
985 }
986 
987 /* Is this the association we are looking for? */
988 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
989 					   struct net *net,
990 					   const union sctp_addr *laddr,
991 					   const union sctp_addr *paddr)
992 {
993 	struct sctp_transport *transport;
994 
995 	if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
996 	    (htons(asoc->peer.port) == paddr->v4.sin_port) &&
997 	    net_eq(sock_net(asoc->base.sk), net)) {
998 		transport = sctp_assoc_lookup_paddr(asoc, paddr);
999 		if (!transport)
1000 			goto out;
1001 
1002 		if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1003 					 sctp_sk(asoc->base.sk)))
1004 			goto out;
1005 	}
1006 	transport = NULL;
1007 
1008 out:
1009 	return transport;
1010 }
1011 
1012 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
1013 static void sctp_assoc_bh_rcv(struct work_struct *work)
1014 {
1015 	struct sctp_association *asoc =
1016 		container_of(work, struct sctp_association,
1017 			     base.inqueue.immediate);
1018 	struct net *net = sock_net(asoc->base.sk);
1019 	struct sctp_endpoint *ep;
1020 	struct sctp_chunk *chunk;
1021 	struct sctp_inq *inqueue;
1022 	int state;
1023 	sctp_subtype_t subtype;
1024 	int error = 0;
1025 
1026 	/* The association should be held so we should be safe. */
1027 	ep = asoc->ep;
1028 
1029 	inqueue = &asoc->base.inqueue;
1030 	sctp_association_hold(asoc);
1031 	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1032 		state = asoc->state;
1033 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1034 
1035 		/* SCTP-AUTH, Section 6.3:
1036 		 *    The receiver has a list of chunk types which it expects
1037 		 *    to be received only after an AUTH-chunk.  This list has
1038 		 *    been sent to the peer during the association setup.  It
1039 		 *    MUST silently discard these chunks if they are not placed
1040 		 *    after an AUTH chunk in the packet.
1041 		 */
1042 		if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1043 			continue;
1044 
1045 		/* Remember where the last DATA chunk came from so we
1046 		 * know where to send the SACK.
1047 		 */
1048 		if (sctp_chunk_is_data(chunk))
1049 			asoc->peer.last_data_from = chunk->transport;
1050 		else {
1051 			SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1052 			asoc->stats.ictrlchunks++;
1053 			if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1054 				asoc->stats.isacks++;
1055 		}
1056 
1057 		if (chunk->transport)
1058 			chunk->transport->last_time_heard = ktime_get();
1059 
1060 		/* Run through the state machine. */
1061 		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1062 				   state, ep, asoc, chunk, GFP_ATOMIC);
1063 
1064 		/* Check to see if the association is freed in response to
1065 		 * the incoming chunk.  If so, get out of the while loop.
1066 		 */
1067 		if (asoc->base.dead)
1068 			break;
1069 
1070 		/* If there is an error on chunk, discard this packet. */
1071 		if (error && chunk)
1072 			chunk->pdiscard = 1;
1073 	}
1074 	sctp_association_put(asoc);
1075 }
1076 
1077 /* This routine moves an association from its old sk to a new sk.  */
1078 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1079 {
1080 	struct sctp_sock *newsp = sctp_sk(newsk);
1081 	struct sock *oldsk = assoc->base.sk;
1082 
1083 	/* Delete the association from the old endpoint's list of
1084 	 * associations.
1085 	 */
1086 	list_del_init(&assoc->asocs);
1087 
1088 	/* Decrement the backlog value for a TCP-style socket. */
1089 	if (sctp_style(oldsk, TCP))
1090 		oldsk->sk_ack_backlog--;
1091 
1092 	/* Release references to the old endpoint and the sock.  */
1093 	sctp_endpoint_put(assoc->ep);
1094 	sock_put(assoc->base.sk);
1095 
1096 	/* Get a reference to the new endpoint.  */
1097 	assoc->ep = newsp->ep;
1098 	sctp_endpoint_hold(assoc->ep);
1099 
1100 	/* Get a reference to the new sock.  */
1101 	assoc->base.sk = newsk;
1102 	sock_hold(assoc->base.sk);
1103 
1104 	/* Add the association to the new endpoint's list of associations.  */
1105 	sctp_endpoint_add_asoc(newsp->ep, assoc);
1106 }
1107 
1108 /* Update an association (possibly from unexpected COOKIE-ECHO processing).  */
1109 void sctp_assoc_update(struct sctp_association *asoc,
1110 		       struct sctp_association *new)
1111 {
1112 	struct sctp_transport *trans;
1113 	struct list_head *pos, *temp;
1114 
1115 	/* Copy in new parameters of peer. */
1116 	asoc->c = new->c;
1117 	asoc->peer.rwnd = new->peer.rwnd;
1118 	asoc->peer.sack_needed = new->peer.sack_needed;
1119 	asoc->peer.auth_capable = new->peer.auth_capable;
1120 	asoc->peer.i = new->peer.i;
1121 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1122 			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1123 
1124 	/* Remove any peer addresses not present in the new association. */
1125 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1126 		trans = list_entry(pos, struct sctp_transport, transports);
1127 		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1128 			sctp_assoc_rm_peer(asoc, trans);
1129 			continue;
1130 		}
1131 
1132 		if (asoc->state >= SCTP_STATE_ESTABLISHED)
1133 			sctp_transport_reset(trans);
1134 	}
1135 
1136 	/* If the case is A (association restart), use
1137 	 * initial_tsn as next_tsn. If the case is B, use
1138 	 * current next_tsn in case data sent to peer
1139 	 * has been discarded and needs retransmission.
1140 	 */
1141 	if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1142 		asoc->next_tsn = new->next_tsn;
1143 		asoc->ctsn_ack_point = new->ctsn_ack_point;
1144 		asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1145 
1146 		/* Reinitialize SSN for both local streams
1147 		 * and peer's streams.
1148 		 */
1149 		sctp_stream_clear(asoc->stream);
1150 
1151 		/* Flush the ULP reassembly and ordered queue.
1152 		 * Any data there will now be stale and will
1153 		 * cause problems.
1154 		 */
1155 		sctp_ulpq_flush(&asoc->ulpq);
1156 
1157 		/* reset the overall association error count so
1158 		 * that the restarted association doesn't get torn
1159 		 * down on the next retransmission timer.
1160 		 */
1161 		asoc->overall_error_count = 0;
1162 
1163 	} else {
1164 		/* Add any peer addresses from the new association. */
1165 		list_for_each_entry(trans, &new->peer.transport_addr_list,
1166 				transports) {
1167 			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1168 				sctp_assoc_add_peer(asoc, &trans->ipaddr,
1169 						    GFP_ATOMIC, trans->state);
1170 		}
1171 
1172 		asoc->ctsn_ack_point = asoc->next_tsn - 1;
1173 		asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1174 		if (!asoc->stream) {
1175 			asoc->stream = new->stream;
1176 			new->stream = NULL;
1177 		}
1178 
1179 		if (!asoc->assoc_id) {
1180 			/* get a new association id since we don't have one
1181 			 * yet.
1182 			 */
1183 			sctp_assoc_set_id(asoc, GFP_ATOMIC);
1184 		}
1185 	}
1186 
1187 	/* SCTP-AUTH: Save the peer parameters from the new associations
1188 	 * and also move the association shared keys over
1189 	 */
1190 	kfree(asoc->peer.peer_random);
1191 	asoc->peer.peer_random = new->peer.peer_random;
1192 	new->peer.peer_random = NULL;
1193 
1194 	kfree(asoc->peer.peer_chunks);
1195 	asoc->peer.peer_chunks = new->peer.peer_chunks;
1196 	new->peer.peer_chunks = NULL;
1197 
1198 	kfree(asoc->peer.peer_hmacs);
1199 	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1200 	new->peer.peer_hmacs = NULL;
1201 
1202 	sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1203 }
1204 
1205 /* Update the retran path for sending a retransmitted packet.
1206  * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1207  *
1208  *   When there is outbound data to send and the primary path
1209  *   becomes inactive (e.g., due to failures), or where the
1210  *   SCTP user explicitly requests to send data to an
1211  *   inactive destination transport address, before reporting
1212  *   an error to its ULP, the SCTP endpoint should try to send
1213  *   the data to an alternate active destination transport
1214  *   address if one exists.
1215  *
1216  *   When retransmitting data that timed out, if the endpoint
1217  *   is multihomed, it should consider each source-destination
1218  *   address pair in its retransmission selection policy.
1219  *   When retransmitting timed-out data, the endpoint should
1220  *   attempt to pick the most divergent source-destination
1221  *   pair from the original source-destination pair to which
1222  *   the packet was transmitted.
1223  *
1224  *   Note: Rules for picking the most divergent source-destination
1225  *   pair are an implementation decision and are not specified
1226  *   within this document.
1227  *
1228  * Our basic strategy is to round-robin transports in priorities
1229  * according to sctp_trans_score() e.g., if no such
1230  * transport with state SCTP_ACTIVE exists, round-robin through
1231  * SCTP_UNKNOWN, etc. You get the picture.
1232  */
1233 static u8 sctp_trans_score(const struct sctp_transport *trans)
1234 {
1235 	switch (trans->state) {
1236 	case SCTP_ACTIVE:
1237 		return 3;	/* best case */
1238 	case SCTP_UNKNOWN:
1239 		return 2;
1240 	case SCTP_PF:
1241 		return 1;
1242 	default: /* case SCTP_INACTIVE */
1243 		return 0;	/* worst case */
1244 	}
1245 }
1246 
1247 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1248 						   struct sctp_transport *trans2)
1249 {
1250 	if (trans1->error_count > trans2->error_count) {
1251 		return trans2;
1252 	} else if (trans1->error_count == trans2->error_count &&
1253 		   ktime_after(trans2->last_time_heard,
1254 			       trans1->last_time_heard)) {
1255 		return trans2;
1256 	} else {
1257 		return trans1;
1258 	}
1259 }
1260 
1261 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1262 						    struct sctp_transport *best)
1263 {
1264 	u8 score_curr, score_best;
1265 
1266 	if (best == NULL || curr == best)
1267 		return curr;
1268 
1269 	score_curr = sctp_trans_score(curr);
1270 	score_best = sctp_trans_score(best);
1271 
1272 	/* First, try a score-based selection if both transport states
1273 	 * differ. If we're in a tie, lets try to make a more clever
1274 	 * decision here based on error counts and last time heard.
1275 	 */
1276 	if (score_curr > score_best)
1277 		return curr;
1278 	else if (score_curr == score_best)
1279 		return sctp_trans_elect_tie(best, curr);
1280 	else
1281 		return best;
1282 }
1283 
1284 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1285 {
1286 	struct sctp_transport *trans = asoc->peer.retran_path;
1287 	struct sctp_transport *trans_next = NULL;
1288 
1289 	/* We're done as we only have the one and only path. */
1290 	if (asoc->peer.transport_count == 1)
1291 		return;
1292 	/* If active_path and retran_path are the same and active,
1293 	 * then this is the only active path. Use it.
1294 	 */
1295 	if (asoc->peer.active_path == asoc->peer.retran_path &&
1296 	    asoc->peer.active_path->state == SCTP_ACTIVE)
1297 		return;
1298 
1299 	/* Iterate from retran_path's successor back to retran_path. */
1300 	for (trans = list_next_entry(trans, transports); 1;
1301 	     trans = list_next_entry(trans, transports)) {
1302 		/* Manually skip the head element. */
1303 		if (&trans->transports == &asoc->peer.transport_addr_list)
1304 			continue;
1305 		if (trans->state == SCTP_UNCONFIRMED)
1306 			continue;
1307 		trans_next = sctp_trans_elect_best(trans, trans_next);
1308 		/* Active is good enough for immediate return. */
1309 		if (trans_next->state == SCTP_ACTIVE)
1310 			break;
1311 		/* We've reached the end, time to update path. */
1312 		if (trans == asoc->peer.retran_path)
1313 			break;
1314 	}
1315 
1316 	asoc->peer.retran_path = trans_next;
1317 
1318 	pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1319 		 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1320 }
1321 
1322 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1323 {
1324 	struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1325 	struct sctp_transport *trans_pf = NULL;
1326 
1327 	/* Look for the two most recently used active transports. */
1328 	list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1329 			    transports) {
1330 		/* Skip uninteresting transports. */
1331 		if (trans->state == SCTP_INACTIVE ||
1332 		    trans->state == SCTP_UNCONFIRMED)
1333 			continue;
1334 		/* Keep track of the best PF transport from our
1335 		 * list in case we don't find an active one.
1336 		 */
1337 		if (trans->state == SCTP_PF) {
1338 			trans_pf = sctp_trans_elect_best(trans, trans_pf);
1339 			continue;
1340 		}
1341 		/* For active transports, pick the most recent ones. */
1342 		if (trans_pri == NULL ||
1343 		    ktime_after(trans->last_time_heard,
1344 				trans_pri->last_time_heard)) {
1345 			trans_sec = trans_pri;
1346 			trans_pri = trans;
1347 		} else if (trans_sec == NULL ||
1348 			   ktime_after(trans->last_time_heard,
1349 				       trans_sec->last_time_heard)) {
1350 			trans_sec = trans;
1351 		}
1352 	}
1353 
1354 	/* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1355 	 *
1356 	 * By default, an endpoint should always transmit to the primary
1357 	 * path, unless the SCTP user explicitly specifies the
1358 	 * destination transport address (and possibly source transport
1359 	 * address) to use. [If the primary is active but not most recent,
1360 	 * bump the most recently used transport.]
1361 	 */
1362 	if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1363 	     asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1364 	     asoc->peer.primary_path != trans_pri) {
1365 		trans_sec = trans_pri;
1366 		trans_pri = asoc->peer.primary_path;
1367 	}
1368 
1369 	/* We did not find anything useful for a possible retransmission
1370 	 * path; either primary path that we found is the the same as
1371 	 * the current one, or we didn't generally find an active one.
1372 	 */
1373 	if (trans_sec == NULL)
1374 		trans_sec = trans_pri;
1375 
1376 	/* If we failed to find a usable transport, just camp on the
1377 	 * active or pick a PF iff it's the better choice.
1378 	 */
1379 	if (trans_pri == NULL) {
1380 		trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1381 		trans_sec = trans_pri;
1382 	}
1383 
1384 	/* Set the active and retran transports. */
1385 	asoc->peer.active_path = trans_pri;
1386 	asoc->peer.retran_path = trans_sec;
1387 }
1388 
1389 struct sctp_transport *
1390 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1391 				  struct sctp_transport *last_sent_to)
1392 {
1393 	/* If this is the first time packet is sent, use the active path,
1394 	 * else use the retran path. If the last packet was sent over the
1395 	 * retran path, update the retran path and use it.
1396 	 */
1397 	if (last_sent_to == NULL) {
1398 		return asoc->peer.active_path;
1399 	} else {
1400 		if (last_sent_to == asoc->peer.retran_path)
1401 			sctp_assoc_update_retran_path(asoc);
1402 
1403 		return asoc->peer.retran_path;
1404 	}
1405 }
1406 
1407 /* Update the association's pmtu and frag_point by going through all the
1408  * transports. This routine is called when a transport's PMTU has changed.
1409  */
1410 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1411 {
1412 	struct sctp_transport *t;
1413 	__u32 pmtu = 0;
1414 
1415 	if (!asoc)
1416 		return;
1417 
1418 	/* Get the lowest pmtu of all the transports. */
1419 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
1420 				transports) {
1421 		if (t->pmtu_pending && t->dst) {
1422 			sctp_transport_update_pmtu(sk, t,
1423 						   SCTP_TRUNC4(dst_mtu(t->dst)));
1424 			t->pmtu_pending = 0;
1425 		}
1426 		if (!pmtu || (t->pathmtu < pmtu))
1427 			pmtu = t->pathmtu;
1428 	}
1429 
1430 	if (pmtu) {
1431 		asoc->pathmtu = pmtu;
1432 		asoc->frag_point = sctp_frag_point(asoc, pmtu);
1433 	}
1434 
1435 	pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1436 		 asoc->pathmtu, asoc->frag_point);
1437 }
1438 
1439 /* Should we send a SACK to update our peer? */
1440 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1441 {
1442 	struct net *net = sock_net(asoc->base.sk);
1443 	switch (asoc->state) {
1444 	case SCTP_STATE_ESTABLISHED:
1445 	case SCTP_STATE_SHUTDOWN_PENDING:
1446 	case SCTP_STATE_SHUTDOWN_RECEIVED:
1447 	case SCTP_STATE_SHUTDOWN_SENT:
1448 		if ((asoc->rwnd > asoc->a_rwnd) &&
1449 		    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1450 			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1451 			   asoc->pathmtu)))
1452 			return true;
1453 		break;
1454 	default:
1455 		break;
1456 	}
1457 	return false;
1458 }
1459 
1460 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1461 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1462 {
1463 	struct sctp_chunk *sack;
1464 	struct timer_list *timer;
1465 
1466 	if (asoc->rwnd_over) {
1467 		if (asoc->rwnd_over >= len) {
1468 			asoc->rwnd_over -= len;
1469 		} else {
1470 			asoc->rwnd += (len - asoc->rwnd_over);
1471 			asoc->rwnd_over = 0;
1472 		}
1473 	} else {
1474 		asoc->rwnd += len;
1475 	}
1476 
1477 	/* If we had window pressure, start recovering it
1478 	 * once our rwnd had reached the accumulated pressure
1479 	 * threshold.  The idea is to recover slowly, but up
1480 	 * to the initial advertised window.
1481 	 */
1482 	if (asoc->rwnd_press) {
1483 		int change = min(asoc->pathmtu, asoc->rwnd_press);
1484 		asoc->rwnd += change;
1485 		asoc->rwnd_press -= change;
1486 	}
1487 
1488 	pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1489 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1490 		 asoc->a_rwnd);
1491 
1492 	/* Send a window update SACK if the rwnd has increased by at least the
1493 	 * minimum of the association's PMTU and half of the receive buffer.
1494 	 * The algorithm used is similar to the one described in
1495 	 * Section 4.2.3.3 of RFC 1122.
1496 	 */
1497 	if (sctp_peer_needs_update(asoc)) {
1498 		asoc->a_rwnd = asoc->rwnd;
1499 
1500 		pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1501 			 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1502 			 asoc->a_rwnd);
1503 
1504 		sack = sctp_make_sack(asoc);
1505 		if (!sack)
1506 			return;
1507 
1508 		asoc->peer.sack_needed = 0;
1509 
1510 		sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1511 
1512 		/* Stop the SACK timer.  */
1513 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1514 		if (del_timer(timer))
1515 			sctp_association_put(asoc);
1516 	}
1517 }
1518 
1519 /* Decrease asoc's rwnd by len. */
1520 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1521 {
1522 	int rx_count;
1523 	int over = 0;
1524 
1525 	if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1526 		pr_debug("%s: association:%p has asoc->rwnd:%u, "
1527 			 "asoc->rwnd_over:%u!\n", __func__, asoc,
1528 			 asoc->rwnd, asoc->rwnd_over);
1529 
1530 	if (asoc->ep->rcvbuf_policy)
1531 		rx_count = atomic_read(&asoc->rmem_alloc);
1532 	else
1533 		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1534 
1535 	/* If we've reached or overflowed our receive buffer, announce
1536 	 * a 0 rwnd if rwnd would still be positive.  Store the
1537 	 * the potential pressure overflow so that the window can be restored
1538 	 * back to original value.
1539 	 */
1540 	if (rx_count >= asoc->base.sk->sk_rcvbuf)
1541 		over = 1;
1542 
1543 	if (asoc->rwnd >= len) {
1544 		asoc->rwnd -= len;
1545 		if (over) {
1546 			asoc->rwnd_press += asoc->rwnd;
1547 			asoc->rwnd = 0;
1548 		}
1549 	} else {
1550 		asoc->rwnd_over += len - asoc->rwnd;
1551 		asoc->rwnd = 0;
1552 	}
1553 
1554 	pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1555 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1556 		 asoc->rwnd_press);
1557 }
1558 
1559 /* Build the bind address list for the association based on info from the
1560  * local endpoint and the remote peer.
1561  */
1562 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1563 				     sctp_scope_t scope, gfp_t gfp)
1564 {
1565 	int flags;
1566 
1567 	/* Use scoping rules to determine the subset of addresses from
1568 	 * the endpoint.
1569 	 */
1570 	flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1571 	if (asoc->peer.ipv4_address)
1572 		flags |= SCTP_ADDR4_PEERSUPP;
1573 	if (asoc->peer.ipv6_address)
1574 		flags |= SCTP_ADDR6_PEERSUPP;
1575 
1576 	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1577 				   &asoc->base.bind_addr,
1578 				   &asoc->ep->base.bind_addr,
1579 				   scope, gfp, flags);
1580 }
1581 
1582 /* Build the association's bind address list from the cookie.  */
1583 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1584 					 struct sctp_cookie *cookie,
1585 					 gfp_t gfp)
1586 {
1587 	int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1588 	int var_size3 = cookie->raw_addr_list_len;
1589 	__u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1590 
1591 	return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1592 				      asoc->ep->base.bind_addr.port, gfp);
1593 }
1594 
1595 /* Lookup laddr in the bind address list of an association. */
1596 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1597 			    const union sctp_addr *laddr)
1598 {
1599 	int found = 0;
1600 
1601 	if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1602 	    sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1603 				 sctp_sk(asoc->base.sk)))
1604 		found = 1;
1605 
1606 	return found;
1607 }
1608 
1609 /* Set an association id for a given association */
1610 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1611 {
1612 	bool preload = gfpflags_allow_blocking(gfp);
1613 	int ret;
1614 
1615 	/* If the id is already assigned, keep it. */
1616 	if (asoc->assoc_id)
1617 		return 0;
1618 
1619 	if (preload)
1620 		idr_preload(gfp);
1621 	spin_lock_bh(&sctp_assocs_id_lock);
1622 	/* 0 is not a valid assoc_id, must be >= 1 */
1623 	ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1624 	spin_unlock_bh(&sctp_assocs_id_lock);
1625 	if (preload)
1626 		idr_preload_end();
1627 	if (ret < 0)
1628 		return ret;
1629 
1630 	asoc->assoc_id = (sctp_assoc_t)ret;
1631 	return 0;
1632 }
1633 
1634 /* Free the ASCONF queue */
1635 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1636 {
1637 	struct sctp_chunk *asconf;
1638 	struct sctp_chunk *tmp;
1639 
1640 	list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1641 		list_del_init(&asconf->list);
1642 		sctp_chunk_free(asconf);
1643 	}
1644 }
1645 
1646 /* Free asconf_ack cache */
1647 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1648 {
1649 	struct sctp_chunk *ack;
1650 	struct sctp_chunk *tmp;
1651 
1652 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1653 				transmitted_list) {
1654 		list_del_init(&ack->transmitted_list);
1655 		sctp_chunk_free(ack);
1656 	}
1657 }
1658 
1659 /* Clean up the ASCONF_ACK queue */
1660 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1661 {
1662 	struct sctp_chunk *ack;
1663 	struct sctp_chunk *tmp;
1664 
1665 	/* We can remove all the entries from the queue up to
1666 	 * the "Peer-Sequence-Number".
1667 	 */
1668 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1669 				transmitted_list) {
1670 		if (ack->subh.addip_hdr->serial ==
1671 				htonl(asoc->peer.addip_serial))
1672 			break;
1673 
1674 		list_del_init(&ack->transmitted_list);
1675 		sctp_chunk_free(ack);
1676 	}
1677 }
1678 
1679 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1680 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1681 					const struct sctp_association *asoc,
1682 					__be32 serial)
1683 {
1684 	struct sctp_chunk *ack;
1685 
1686 	/* Walk through the list of cached ASCONF-ACKs and find the
1687 	 * ack chunk whose serial number matches that of the request.
1688 	 */
1689 	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1690 		if (sctp_chunk_pending(ack))
1691 			continue;
1692 		if (ack->subh.addip_hdr->serial == serial) {
1693 			sctp_chunk_hold(ack);
1694 			return ack;
1695 		}
1696 	}
1697 
1698 	return NULL;
1699 }
1700 
1701 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1702 {
1703 	/* Free any cached ASCONF_ACK chunk. */
1704 	sctp_assoc_free_asconf_acks(asoc);
1705 
1706 	/* Free the ASCONF queue. */
1707 	sctp_assoc_free_asconf_queue(asoc);
1708 
1709 	/* Free any cached ASCONF chunk. */
1710 	if (asoc->addip_last_asconf)
1711 		sctp_chunk_free(asoc->addip_last_asconf);
1712 }
1713