xref: /openbmc/linux/net/sctp/associola.c (revision 8fdff1dc)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 La Monte H.P. Yarroll
7  *
8  * This file is part of the SCTP kernel implementation
9  *
10  * This module provides the abstraction for an SCTP association.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, write to
26  * the Free Software Foundation, 59 Temple Place - Suite 330,
27  * Boston, MA 02111-1307, USA.
28  *
29  * Please send any bug reports or fixes you make to the
30  * email address(es):
31  *    lksctp developers <lksctp-developers@lists.sourceforge.net>
32  *
33  * Or submit a bug report through the following website:
34  *    http://www.sf.net/projects/lksctp
35  *
36  * Written or modified by:
37  *    La Monte H.P. Yarroll <piggy@acm.org>
38  *    Karl Knutson          <karl@athena.chicago.il.us>
39  *    Jon Grimm             <jgrimm@us.ibm.com>
40  *    Xingang Guo           <xingang.guo@intel.com>
41  *    Hui Huang             <hui.huang@nokia.com>
42  *    Sridhar Samudrala	    <sri@us.ibm.com>
43  *    Daisy Chang	    <daisyc@us.ibm.com>
44  *    Ryan Layer	    <rmlayer@us.ibm.com>
45  *    Kevin Gao             <kevin.gao@intel.com>
46  *
47  * Any bugs reported given to us we will try to fix... any fixes shared will
48  * be incorporated into the next SCTP release.
49  */
50 
51 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52 
53 #include <linux/types.h>
54 #include <linux/fcntl.h>
55 #include <linux/poll.h>
56 #include <linux/init.h>
57 
58 #include <linux/slab.h>
59 #include <linux/in.h>
60 #include <net/ipv6.h>
61 #include <net/sctp/sctp.h>
62 #include <net/sctp/sm.h>
63 
64 /* Forward declarations for internal functions. */
65 static void sctp_assoc_bh_rcv(struct work_struct *work);
66 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
67 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
68 
69 /* Keep track of the new idr low so that we don't re-use association id
70  * numbers too fast.  It is protected by they idr spin lock is in the
71  * range of 1 - INT_MAX.
72  */
73 static u32 idr_low = 1;
74 
75 
76 /* 1st Level Abstractions. */
77 
78 /* Initialize a new association from provided memory. */
79 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
80 					  const struct sctp_endpoint *ep,
81 					  const struct sock *sk,
82 					  sctp_scope_t scope,
83 					  gfp_t gfp)
84 {
85 	struct net *net = sock_net(sk);
86 	struct sctp_sock *sp;
87 	int i;
88 	sctp_paramhdr_t *p;
89 	int err;
90 
91 	/* Retrieve the SCTP per socket area.  */
92 	sp = sctp_sk((struct sock *)sk);
93 
94 	/* Discarding const is appropriate here.  */
95 	asoc->ep = (struct sctp_endpoint *)ep;
96 	sctp_endpoint_hold(asoc->ep);
97 
98 	/* Hold the sock.  */
99 	asoc->base.sk = (struct sock *)sk;
100 	sock_hold(asoc->base.sk);
101 
102 	/* Initialize the common base substructure.  */
103 	asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
104 
105 	/* Initialize the object handling fields.  */
106 	atomic_set(&asoc->base.refcnt, 1);
107 	asoc->base.dead = 0;
108 	asoc->base.malloced = 0;
109 
110 	/* Initialize the bind addr area.  */
111 	sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
112 
113 	asoc->state = SCTP_STATE_CLOSED;
114 
115 	/* Set these values from the socket values, a conversion between
116 	 * millsecons to seconds/microseconds must also be done.
117 	 */
118 	asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
119 	asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
120 					* 1000;
121 	asoc->frag_point = 0;
122 	asoc->user_frag = sp->user_frag;
123 
124 	/* Set the association max_retrans and RTO values from the
125 	 * socket values.
126 	 */
127 	asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
128 	asoc->pf_retrans  = net->sctp.pf_retrans;
129 
130 	asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
131 	asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
132 	asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
133 
134 	asoc->overall_error_count = 0;
135 
136 	/* Initialize the association's heartbeat interval based on the
137 	 * sock configured value.
138 	 */
139 	asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
140 
141 	/* Initialize path max retrans value. */
142 	asoc->pathmaxrxt = sp->pathmaxrxt;
143 
144 	/* Initialize default path MTU. */
145 	asoc->pathmtu = sp->pathmtu;
146 
147 	/* Set association default SACK delay */
148 	asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
149 	asoc->sackfreq = sp->sackfreq;
150 
151 	/* Set the association default flags controlling
152 	 * Heartbeat, SACK delay, and Path MTU Discovery.
153 	 */
154 	asoc->param_flags = sp->param_flags;
155 
156 	/* Initialize the maximum mumber of new data packets that can be sent
157 	 * in a burst.
158 	 */
159 	asoc->max_burst = sp->max_burst;
160 
161 	/* initialize association timers */
162 	asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
163 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
164 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
165 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
166 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
167 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
168 
169 	/* sctpimpguide Section 2.12.2
170 	 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
171 	 * recommended value of 5 times 'RTO.Max'.
172 	 */
173 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
174 		= 5 * asoc->rto_max;
175 
176 	asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
177 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
178 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
179 		min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
180 
181 	/* Initializes the timers */
182 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
183 		setup_timer(&asoc->timers[i], sctp_timer_events[i],
184 				(unsigned long)asoc);
185 
186 	/* Pull default initialization values from the sock options.
187 	 * Note: This assumes that the values have already been
188 	 * validated in the sock.
189 	 */
190 	asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
191 	asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
192 	asoc->max_init_attempts	= sp->initmsg.sinit_max_attempts;
193 
194 	asoc->max_init_timeo =
195 		 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
196 
197 	/* Allocate storage for the ssnmap after the inbound and outbound
198 	 * streams have been negotiated during Init.
199 	 */
200 	asoc->ssnmap = NULL;
201 
202 	/* Set the local window size for receive.
203 	 * This is also the rcvbuf space per association.
204 	 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
205 	 * 1500 bytes in one SCTP packet.
206 	 */
207 	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
208 		asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
209 	else
210 		asoc->rwnd = sk->sk_rcvbuf/2;
211 
212 	asoc->a_rwnd = asoc->rwnd;
213 
214 	asoc->rwnd_over = 0;
215 	asoc->rwnd_press = 0;
216 
217 	/* Use my own max window until I learn something better.  */
218 	asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
219 
220 	/* Set the sndbuf size for transmit.  */
221 	asoc->sndbuf_used = 0;
222 
223 	/* Initialize the receive memory counter */
224 	atomic_set(&asoc->rmem_alloc, 0);
225 
226 	init_waitqueue_head(&asoc->wait);
227 
228 	asoc->c.my_vtag = sctp_generate_tag(ep);
229 	asoc->peer.i.init_tag = 0;     /* INIT needs a vtag of 0. */
230 	asoc->c.peer_vtag = 0;
231 	asoc->c.my_ttag   = 0;
232 	asoc->c.peer_ttag = 0;
233 	asoc->c.my_port = ep->base.bind_addr.port;
234 
235 	asoc->c.initial_tsn = sctp_generate_tsn(ep);
236 
237 	asoc->next_tsn = asoc->c.initial_tsn;
238 
239 	asoc->ctsn_ack_point = asoc->next_tsn - 1;
240 	asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
241 	asoc->highest_sacked = asoc->ctsn_ack_point;
242 	asoc->last_cwr_tsn = asoc->ctsn_ack_point;
243 	asoc->unack_data = 0;
244 
245 	/* ADDIP Section 4.1 Asconf Chunk Procedures
246 	 *
247 	 * When an endpoint has an ASCONF signaled change to be sent to the
248 	 * remote endpoint it should do the following:
249 	 * ...
250 	 * A2) a serial number should be assigned to the chunk. The serial
251 	 * number SHOULD be a monotonically increasing number. The serial
252 	 * numbers SHOULD be initialized at the start of the
253 	 * association to the same value as the initial TSN.
254 	 */
255 	asoc->addip_serial = asoc->c.initial_tsn;
256 
257 	INIT_LIST_HEAD(&asoc->addip_chunk_list);
258 	INIT_LIST_HEAD(&asoc->asconf_ack_list);
259 
260 	/* Make an empty list of remote transport addresses.  */
261 	INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
262 	asoc->peer.transport_count = 0;
263 
264 	/* RFC 2960 5.1 Normal Establishment of an Association
265 	 *
266 	 * After the reception of the first data chunk in an
267 	 * association the endpoint must immediately respond with a
268 	 * sack to acknowledge the data chunk.  Subsequent
269 	 * acknowledgements should be done as described in Section
270 	 * 6.2.
271 	 *
272 	 * [We implement this by telling a new association that it
273 	 * already received one packet.]
274 	 */
275 	asoc->peer.sack_needed = 1;
276 	asoc->peer.sack_cnt = 0;
277 	asoc->peer.sack_generation = 1;
278 
279 	/* Assume that the peer will tell us if he recognizes ASCONF
280 	 * as part of INIT exchange.
281 	 * The sctp_addip_noauth option is there for backward compatibilty
282 	 * and will revert old behavior.
283 	 */
284 	asoc->peer.asconf_capable = 0;
285 	if (net->sctp.addip_noauth)
286 		asoc->peer.asconf_capable = 1;
287 	asoc->asconf_addr_del_pending = NULL;
288 	asoc->src_out_of_asoc_ok = 0;
289 	asoc->new_transport = NULL;
290 
291 	/* Create an input queue.  */
292 	sctp_inq_init(&asoc->base.inqueue);
293 	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
294 
295 	/* Create an output queue.  */
296 	sctp_outq_init(asoc, &asoc->outqueue);
297 
298 	if (!sctp_ulpq_init(&asoc->ulpq, asoc))
299 		goto fail_init;
300 
301 	memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
302 
303 	asoc->need_ecne = 0;
304 
305 	asoc->assoc_id = 0;
306 
307 	/* Assume that peer would support both address types unless we are
308 	 * told otherwise.
309 	 */
310 	asoc->peer.ipv4_address = 1;
311 	if (asoc->base.sk->sk_family == PF_INET6)
312 		asoc->peer.ipv6_address = 1;
313 	INIT_LIST_HEAD(&asoc->asocs);
314 
315 	asoc->autoclose = sp->autoclose;
316 
317 	asoc->default_stream = sp->default_stream;
318 	asoc->default_ppid = sp->default_ppid;
319 	asoc->default_flags = sp->default_flags;
320 	asoc->default_context = sp->default_context;
321 	asoc->default_timetolive = sp->default_timetolive;
322 	asoc->default_rcv_context = sp->default_rcv_context;
323 
324 	/* SCTP_GET_ASSOC_STATS COUNTERS */
325 	memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats));
326 
327 	/* AUTH related initializations */
328 	INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
329 	err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
330 	if (err)
331 		goto fail_init;
332 
333 	asoc->active_key_id = ep->active_key_id;
334 	asoc->asoc_shared_key = NULL;
335 
336 	asoc->default_hmac_id = 0;
337 	/* Save the hmacs and chunks list into this association */
338 	if (ep->auth_hmacs_list)
339 		memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
340 			ntohs(ep->auth_hmacs_list->param_hdr.length));
341 	if (ep->auth_chunk_list)
342 		memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
343 			ntohs(ep->auth_chunk_list->param_hdr.length));
344 
345 	/* Get the AUTH random number for this association */
346 	p = (sctp_paramhdr_t *)asoc->c.auth_random;
347 	p->type = SCTP_PARAM_RANDOM;
348 	p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
349 	get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
350 
351 	return asoc;
352 
353 fail_init:
354 	sctp_endpoint_put(asoc->ep);
355 	sock_put(asoc->base.sk);
356 	return NULL;
357 }
358 
359 /* Allocate and initialize a new association */
360 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
361 					 const struct sock *sk,
362 					 sctp_scope_t scope,
363 					 gfp_t gfp)
364 {
365 	struct sctp_association *asoc;
366 
367 	asoc = t_new(struct sctp_association, gfp);
368 	if (!asoc)
369 		goto fail;
370 
371 	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
372 		goto fail_init;
373 
374 	asoc->base.malloced = 1;
375 	SCTP_DBG_OBJCNT_INC(assoc);
376 	SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc);
377 
378 	return asoc;
379 
380 fail_init:
381 	kfree(asoc);
382 fail:
383 	return NULL;
384 }
385 
386 /* Free this association if possible.  There may still be users, so
387  * the actual deallocation may be delayed.
388  */
389 void sctp_association_free(struct sctp_association *asoc)
390 {
391 	struct sock *sk = asoc->base.sk;
392 	struct sctp_transport *transport;
393 	struct list_head *pos, *temp;
394 	int i;
395 
396 	/* Only real associations count against the endpoint, so
397 	 * don't bother for if this is a temporary association.
398 	 */
399 	if (!asoc->temp) {
400 		list_del(&asoc->asocs);
401 
402 		/* Decrement the backlog value for a TCP-style listening
403 		 * socket.
404 		 */
405 		if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
406 			sk->sk_ack_backlog--;
407 	}
408 
409 	/* Mark as dead, so other users can know this structure is
410 	 * going away.
411 	 */
412 	asoc->base.dead = 1;
413 
414 	/* Dispose of any data lying around in the outqueue. */
415 	sctp_outq_free(&asoc->outqueue);
416 
417 	/* Dispose of any pending messages for the upper layer. */
418 	sctp_ulpq_free(&asoc->ulpq);
419 
420 	/* Dispose of any pending chunks on the inqueue. */
421 	sctp_inq_free(&asoc->base.inqueue);
422 
423 	sctp_tsnmap_free(&asoc->peer.tsn_map);
424 
425 	/* Free ssnmap storage. */
426 	sctp_ssnmap_free(asoc->ssnmap);
427 
428 	/* Clean up the bound address list. */
429 	sctp_bind_addr_free(&asoc->base.bind_addr);
430 
431 	/* Do we need to go through all of our timers and
432 	 * delete them?   To be safe we will try to delete all, but we
433 	 * should be able to go through and make a guess based
434 	 * on our state.
435 	 */
436 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
437 		if (timer_pending(&asoc->timers[i]) &&
438 		    del_timer(&asoc->timers[i]))
439 			sctp_association_put(asoc);
440 	}
441 
442 	/* Free peer's cached cookie. */
443 	kfree(asoc->peer.cookie);
444 	kfree(asoc->peer.peer_random);
445 	kfree(asoc->peer.peer_chunks);
446 	kfree(asoc->peer.peer_hmacs);
447 
448 	/* Release the transport structures. */
449 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
450 		transport = list_entry(pos, struct sctp_transport, transports);
451 		list_del_rcu(pos);
452 		sctp_transport_free(transport);
453 	}
454 
455 	asoc->peer.transport_count = 0;
456 
457 	sctp_asconf_queue_teardown(asoc);
458 
459 	/* Free pending address space being deleted */
460 	if (asoc->asconf_addr_del_pending != NULL)
461 		kfree(asoc->asconf_addr_del_pending);
462 
463 	/* AUTH - Free the endpoint shared keys */
464 	sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
465 
466 	/* AUTH - Free the association shared key */
467 	sctp_auth_key_put(asoc->asoc_shared_key);
468 
469 	sctp_association_put(asoc);
470 }
471 
472 /* Cleanup and free up an association. */
473 static void sctp_association_destroy(struct sctp_association *asoc)
474 {
475 	SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
476 
477 	sctp_endpoint_put(asoc->ep);
478 	sock_put(asoc->base.sk);
479 
480 	if (asoc->assoc_id != 0) {
481 		spin_lock_bh(&sctp_assocs_id_lock);
482 		idr_remove(&sctp_assocs_id, asoc->assoc_id);
483 		spin_unlock_bh(&sctp_assocs_id_lock);
484 	}
485 
486 	WARN_ON(atomic_read(&asoc->rmem_alloc));
487 
488 	if (asoc->base.malloced) {
489 		kfree(asoc);
490 		SCTP_DBG_OBJCNT_DEC(assoc);
491 	}
492 }
493 
494 /* Change the primary destination address for the peer. */
495 void sctp_assoc_set_primary(struct sctp_association *asoc,
496 			    struct sctp_transport *transport)
497 {
498 	int changeover = 0;
499 
500 	/* it's a changeover only if we already have a primary path
501 	 * that we are changing
502 	 */
503 	if (asoc->peer.primary_path != NULL &&
504 	    asoc->peer.primary_path != transport)
505 		changeover = 1 ;
506 
507 	asoc->peer.primary_path = transport;
508 
509 	/* Set a default msg_name for events. */
510 	memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
511 	       sizeof(union sctp_addr));
512 
513 	/* If the primary path is changing, assume that the
514 	 * user wants to use this new path.
515 	 */
516 	if ((transport->state == SCTP_ACTIVE) ||
517 	    (transport->state == SCTP_UNKNOWN))
518 		asoc->peer.active_path = transport;
519 
520 	/*
521 	 * SFR-CACC algorithm:
522 	 * Upon the receipt of a request to change the primary
523 	 * destination address, on the data structure for the new
524 	 * primary destination, the sender MUST do the following:
525 	 *
526 	 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
527 	 * to this destination address earlier. The sender MUST set
528 	 * CYCLING_CHANGEOVER to indicate that this switch is a
529 	 * double switch to the same destination address.
530 	 *
531 	 * Really, only bother is we have data queued or outstanding on
532 	 * the association.
533 	 */
534 	if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
535 		return;
536 
537 	if (transport->cacc.changeover_active)
538 		transport->cacc.cycling_changeover = changeover;
539 
540 	/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
541 	 * a changeover has occurred.
542 	 */
543 	transport->cacc.changeover_active = changeover;
544 
545 	/* 3) The sender MUST store the next TSN to be sent in
546 	 * next_tsn_at_change.
547 	 */
548 	transport->cacc.next_tsn_at_change = asoc->next_tsn;
549 }
550 
551 /* Remove a transport from an association.  */
552 void sctp_assoc_rm_peer(struct sctp_association *asoc,
553 			struct sctp_transport *peer)
554 {
555 	struct list_head	*pos;
556 	struct sctp_transport	*transport;
557 
558 	SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ",
559 				 " port: %d\n",
560 				 asoc,
561 				 (&peer->ipaddr),
562 				 ntohs(peer->ipaddr.v4.sin_port));
563 
564 	/* If we are to remove the current retran_path, update it
565 	 * to the next peer before removing this peer from the list.
566 	 */
567 	if (asoc->peer.retran_path == peer)
568 		sctp_assoc_update_retran_path(asoc);
569 
570 	/* Remove this peer from the list. */
571 	list_del_rcu(&peer->transports);
572 
573 	/* Get the first transport of asoc. */
574 	pos = asoc->peer.transport_addr_list.next;
575 	transport = list_entry(pos, struct sctp_transport, transports);
576 
577 	/* Update any entries that match the peer to be deleted. */
578 	if (asoc->peer.primary_path == peer)
579 		sctp_assoc_set_primary(asoc, transport);
580 	if (asoc->peer.active_path == peer)
581 		asoc->peer.active_path = transport;
582 	if (asoc->peer.retran_path == peer)
583 		asoc->peer.retran_path = transport;
584 	if (asoc->peer.last_data_from == peer)
585 		asoc->peer.last_data_from = transport;
586 
587 	/* If we remove the transport an INIT was last sent to, set it to
588 	 * NULL. Combined with the update of the retran path above, this
589 	 * will cause the next INIT to be sent to the next available
590 	 * transport, maintaining the cycle.
591 	 */
592 	if (asoc->init_last_sent_to == peer)
593 		asoc->init_last_sent_to = NULL;
594 
595 	/* If we remove the transport an SHUTDOWN was last sent to, set it
596 	 * to NULL. Combined with the update of the retran path above, this
597 	 * will cause the next SHUTDOWN to be sent to the next available
598 	 * transport, maintaining the cycle.
599 	 */
600 	if (asoc->shutdown_last_sent_to == peer)
601 		asoc->shutdown_last_sent_to = NULL;
602 
603 	/* If we remove the transport an ASCONF was last sent to, set it to
604 	 * NULL.
605 	 */
606 	if (asoc->addip_last_asconf &&
607 	    asoc->addip_last_asconf->transport == peer)
608 		asoc->addip_last_asconf->transport = NULL;
609 
610 	/* If we have something on the transmitted list, we have to
611 	 * save it off.  The best place is the active path.
612 	 */
613 	if (!list_empty(&peer->transmitted)) {
614 		struct sctp_transport *active = asoc->peer.active_path;
615 		struct sctp_chunk *ch;
616 
617 		/* Reset the transport of each chunk on this list */
618 		list_for_each_entry(ch, &peer->transmitted,
619 					transmitted_list) {
620 			ch->transport = NULL;
621 			ch->rtt_in_progress = 0;
622 		}
623 
624 		list_splice_tail_init(&peer->transmitted,
625 					&active->transmitted);
626 
627 		/* Start a T3 timer here in case it wasn't running so
628 		 * that these migrated packets have a chance to get
629 		 * retrnasmitted.
630 		 */
631 		if (!timer_pending(&active->T3_rtx_timer))
632 			if (!mod_timer(&active->T3_rtx_timer,
633 					jiffies + active->rto))
634 				sctp_transport_hold(active);
635 	}
636 
637 	asoc->peer.transport_count--;
638 
639 	sctp_transport_free(peer);
640 }
641 
642 /* Add a transport address to an association.  */
643 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
644 					   const union sctp_addr *addr,
645 					   const gfp_t gfp,
646 					   const int peer_state)
647 {
648 	struct net *net = sock_net(asoc->base.sk);
649 	struct sctp_transport *peer;
650 	struct sctp_sock *sp;
651 	unsigned short port;
652 
653 	sp = sctp_sk(asoc->base.sk);
654 
655 	/* AF_INET and AF_INET6 share common port field. */
656 	port = ntohs(addr->v4.sin_port);
657 
658 	SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
659 				 " port: %d state:%d\n",
660 				 asoc,
661 				 addr,
662 				 port,
663 				 peer_state);
664 
665 	/* Set the port if it has not been set yet.  */
666 	if (0 == asoc->peer.port)
667 		asoc->peer.port = port;
668 
669 	/* Check to see if this is a duplicate. */
670 	peer = sctp_assoc_lookup_paddr(asoc, addr);
671 	if (peer) {
672 		/* An UNKNOWN state is only set on transports added by
673 		 * user in sctp_connectx() call.  Such transports should be
674 		 * considered CONFIRMED per RFC 4960, Section 5.4.
675 		 */
676 		if (peer->state == SCTP_UNKNOWN) {
677 			peer->state = SCTP_ACTIVE;
678 		}
679 		return peer;
680 	}
681 
682 	peer = sctp_transport_new(net, addr, gfp);
683 	if (!peer)
684 		return NULL;
685 
686 	sctp_transport_set_owner(peer, asoc);
687 
688 	/* Initialize the peer's heartbeat interval based on the
689 	 * association configured value.
690 	 */
691 	peer->hbinterval = asoc->hbinterval;
692 
693 	/* Set the path max_retrans.  */
694 	peer->pathmaxrxt = asoc->pathmaxrxt;
695 
696 	/* And the partial failure retrnas threshold */
697 	peer->pf_retrans = asoc->pf_retrans;
698 
699 	/* Initialize the peer's SACK delay timeout based on the
700 	 * association configured value.
701 	 */
702 	peer->sackdelay = asoc->sackdelay;
703 	peer->sackfreq = asoc->sackfreq;
704 
705 	/* Enable/disable heartbeat, SACK delay, and path MTU discovery
706 	 * based on association setting.
707 	 */
708 	peer->param_flags = asoc->param_flags;
709 
710 	sctp_transport_route(peer, NULL, sp);
711 
712 	/* Initialize the pmtu of the transport. */
713 	if (peer->param_flags & SPP_PMTUD_DISABLE) {
714 		if (asoc->pathmtu)
715 			peer->pathmtu = asoc->pathmtu;
716 		else
717 			peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
718 	}
719 
720 	/* If this is the first transport addr on this association,
721 	 * initialize the association PMTU to the peer's PMTU.
722 	 * If not and the current association PMTU is higher than the new
723 	 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
724 	 */
725 	if (asoc->pathmtu)
726 		asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
727 	else
728 		asoc->pathmtu = peer->pathmtu;
729 
730 	SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
731 			  "%d\n", asoc, asoc->pathmtu);
732 	peer->pmtu_pending = 0;
733 
734 	asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
735 
736 	/* The asoc->peer.port might not be meaningful yet, but
737 	 * initialize the packet structure anyway.
738 	 */
739 	sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
740 			 asoc->peer.port);
741 
742 	/* 7.2.1 Slow-Start
743 	 *
744 	 * o The initial cwnd before DATA transmission or after a sufficiently
745 	 *   long idle period MUST be set to
746 	 *      min(4*MTU, max(2*MTU, 4380 bytes))
747 	 *
748 	 * o The initial value of ssthresh MAY be arbitrarily high
749 	 *   (for example, implementations MAY use the size of the
750 	 *   receiver advertised window).
751 	 */
752 	peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
753 
754 	/* At this point, we may not have the receiver's advertised window,
755 	 * so initialize ssthresh to the default value and it will be set
756 	 * later when we process the INIT.
757 	 */
758 	peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
759 
760 	peer->partial_bytes_acked = 0;
761 	peer->flight_size = 0;
762 	peer->burst_limited = 0;
763 
764 	/* Set the transport's RTO.initial value */
765 	peer->rto = asoc->rto_initial;
766 	sctp_max_rto(asoc, peer);
767 
768 	/* Set the peer's active state. */
769 	peer->state = peer_state;
770 
771 	/* Attach the remote transport to our asoc.  */
772 	list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
773 	asoc->peer.transport_count++;
774 
775 	/* If we do not yet have a primary path, set one.  */
776 	if (!asoc->peer.primary_path) {
777 		sctp_assoc_set_primary(asoc, peer);
778 		asoc->peer.retran_path = peer;
779 	}
780 
781 	if (asoc->peer.active_path == asoc->peer.retran_path &&
782 	    peer->state != SCTP_UNCONFIRMED) {
783 		asoc->peer.retran_path = peer;
784 	}
785 
786 	return peer;
787 }
788 
789 /* Delete a transport address from an association.  */
790 void sctp_assoc_del_peer(struct sctp_association *asoc,
791 			 const union sctp_addr *addr)
792 {
793 	struct list_head	*pos;
794 	struct list_head	*temp;
795 	struct sctp_transport	*transport;
796 
797 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
798 		transport = list_entry(pos, struct sctp_transport, transports);
799 		if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
800 			/* Do book keeping for removing the peer and free it. */
801 			sctp_assoc_rm_peer(asoc, transport);
802 			break;
803 		}
804 	}
805 }
806 
807 /* Lookup a transport by address. */
808 struct sctp_transport *sctp_assoc_lookup_paddr(
809 					const struct sctp_association *asoc,
810 					const union sctp_addr *address)
811 {
812 	struct sctp_transport *t;
813 
814 	/* Cycle through all transports searching for a peer address. */
815 
816 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
817 			transports) {
818 		if (sctp_cmp_addr_exact(address, &t->ipaddr))
819 			return t;
820 	}
821 
822 	return NULL;
823 }
824 
825 /* Remove all transports except a give one */
826 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
827 				     struct sctp_transport *primary)
828 {
829 	struct sctp_transport	*temp;
830 	struct sctp_transport	*t;
831 
832 	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
833 				 transports) {
834 		/* if the current transport is not the primary one, delete it */
835 		if (t != primary)
836 			sctp_assoc_rm_peer(asoc, t);
837 	}
838 }
839 
840 /* Engage in transport control operations.
841  * Mark the transport up or down and send a notification to the user.
842  * Select and update the new active and retran paths.
843  */
844 void sctp_assoc_control_transport(struct sctp_association *asoc,
845 				  struct sctp_transport *transport,
846 				  sctp_transport_cmd_t command,
847 				  sctp_sn_error_t error)
848 {
849 	struct sctp_transport *t = NULL;
850 	struct sctp_transport *first;
851 	struct sctp_transport *second;
852 	struct sctp_ulpevent *event;
853 	struct sockaddr_storage addr;
854 	int spc_state = 0;
855 	bool ulp_notify = true;
856 
857 	/* Record the transition on the transport.  */
858 	switch (command) {
859 	case SCTP_TRANSPORT_UP:
860 		/* If we are moving from UNCONFIRMED state due
861 		 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
862 		 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
863 		 */
864 		if (SCTP_UNCONFIRMED == transport->state &&
865 		    SCTP_HEARTBEAT_SUCCESS == error)
866 			spc_state = SCTP_ADDR_CONFIRMED;
867 		else
868 			spc_state = SCTP_ADDR_AVAILABLE;
869 		/* Don't inform ULP about transition from PF to
870 		 * active state and set cwnd to 1, see SCTP
871 		 * Quick failover draft section 5.1, point 5
872 		 */
873 		if (transport->state == SCTP_PF) {
874 			ulp_notify = false;
875 			transport->cwnd = 1;
876 		}
877 		transport->state = SCTP_ACTIVE;
878 		break;
879 
880 	case SCTP_TRANSPORT_DOWN:
881 		/* If the transport was never confirmed, do not transition it
882 		 * to inactive state.  Also, release the cached route since
883 		 * there may be a better route next time.
884 		 */
885 		if (transport->state != SCTP_UNCONFIRMED)
886 			transport->state = SCTP_INACTIVE;
887 		else {
888 			dst_release(transport->dst);
889 			transport->dst = NULL;
890 		}
891 
892 		spc_state = SCTP_ADDR_UNREACHABLE;
893 		break;
894 
895 	case SCTP_TRANSPORT_PF:
896 		transport->state = SCTP_PF;
897 		ulp_notify = false;
898 		break;
899 
900 	default:
901 		return;
902 	}
903 
904 	/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
905 	 * user.
906 	 */
907 	if (ulp_notify) {
908 		memset(&addr, 0, sizeof(struct sockaddr_storage));
909 		memcpy(&addr, &transport->ipaddr,
910 		       transport->af_specific->sockaddr_len);
911 		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
912 					0, spc_state, error, GFP_ATOMIC);
913 		if (event)
914 			sctp_ulpq_tail_event(&asoc->ulpq, event);
915 	}
916 
917 	/* Select new active and retran paths. */
918 
919 	/* Look for the two most recently used active transports.
920 	 *
921 	 * This code produces the wrong ordering whenever jiffies
922 	 * rolls over, but we still get usable transports, so we don't
923 	 * worry about it.
924 	 */
925 	first = NULL; second = NULL;
926 
927 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
928 			transports) {
929 
930 		if ((t->state == SCTP_INACTIVE) ||
931 		    (t->state == SCTP_UNCONFIRMED) ||
932 		    (t->state == SCTP_PF))
933 			continue;
934 		if (!first || t->last_time_heard > first->last_time_heard) {
935 			second = first;
936 			first = t;
937 		}
938 		if (!second || t->last_time_heard > second->last_time_heard)
939 			second = t;
940 	}
941 
942 	/* RFC 2960 6.4 Multi-Homed SCTP Endpoints
943 	 *
944 	 * By default, an endpoint should always transmit to the
945 	 * primary path, unless the SCTP user explicitly specifies the
946 	 * destination transport address (and possibly source
947 	 * transport address) to use.
948 	 *
949 	 * [If the primary is active but not most recent, bump the most
950 	 * recently used transport.]
951 	 */
952 	if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
953 	     (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
954 	    first != asoc->peer.primary_path) {
955 		second = first;
956 		first = asoc->peer.primary_path;
957 	}
958 
959 	/* If we failed to find a usable transport, just camp on the
960 	 * primary, even if it is inactive.
961 	 */
962 	if (!first) {
963 		first = asoc->peer.primary_path;
964 		second = asoc->peer.primary_path;
965 	}
966 
967 	/* Set the active and retran transports.  */
968 	asoc->peer.active_path = first;
969 	asoc->peer.retran_path = second;
970 }
971 
972 /* Hold a reference to an association. */
973 void sctp_association_hold(struct sctp_association *asoc)
974 {
975 	atomic_inc(&asoc->base.refcnt);
976 }
977 
978 /* Release a reference to an association and cleanup
979  * if there are no more references.
980  */
981 void sctp_association_put(struct sctp_association *asoc)
982 {
983 	if (atomic_dec_and_test(&asoc->base.refcnt))
984 		sctp_association_destroy(asoc);
985 }
986 
987 /* Allocate the next TSN, Transmission Sequence Number, for the given
988  * association.
989  */
990 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
991 {
992 	/* From Section 1.6 Serial Number Arithmetic:
993 	 * Transmission Sequence Numbers wrap around when they reach
994 	 * 2**32 - 1.  That is, the next TSN a DATA chunk MUST use
995 	 * after transmitting TSN = 2*32 - 1 is TSN = 0.
996 	 */
997 	__u32 retval = asoc->next_tsn;
998 	asoc->next_tsn++;
999 	asoc->unack_data++;
1000 
1001 	return retval;
1002 }
1003 
1004 /* Compare two addresses to see if they match.  Wildcard addresses
1005  * only match themselves.
1006  */
1007 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
1008 			const union sctp_addr *ss2)
1009 {
1010 	struct sctp_af *af;
1011 
1012 	af = sctp_get_af_specific(ss1->sa.sa_family);
1013 	if (unlikely(!af))
1014 		return 0;
1015 
1016 	return af->cmp_addr(ss1, ss2);
1017 }
1018 
1019 /* Return an ecne chunk to get prepended to a packet.
1020  * Note:  We are sly and return a shared, prealloced chunk.  FIXME:
1021  * No we don't, but we could/should.
1022  */
1023 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
1024 {
1025 	struct sctp_chunk *chunk;
1026 
1027 	/* Send ECNE if needed.
1028 	 * Not being able to allocate a chunk here is not deadly.
1029 	 */
1030 	if (asoc->need_ecne)
1031 		chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
1032 	else
1033 		chunk = NULL;
1034 
1035 	return chunk;
1036 }
1037 
1038 /*
1039  * Find which transport this TSN was sent on.
1040  */
1041 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1042 					     __u32 tsn)
1043 {
1044 	struct sctp_transport *active;
1045 	struct sctp_transport *match;
1046 	struct sctp_transport *transport;
1047 	struct sctp_chunk *chunk;
1048 	__be32 key = htonl(tsn);
1049 
1050 	match = NULL;
1051 
1052 	/*
1053 	 * FIXME: In general, find a more efficient data structure for
1054 	 * searching.
1055 	 */
1056 
1057 	/*
1058 	 * The general strategy is to search each transport's transmitted
1059 	 * list.   Return which transport this TSN lives on.
1060 	 *
1061 	 * Let's be hopeful and check the active_path first.
1062 	 * Another optimization would be to know if there is only one
1063 	 * outbound path and not have to look for the TSN at all.
1064 	 *
1065 	 */
1066 
1067 	active = asoc->peer.active_path;
1068 
1069 	list_for_each_entry(chunk, &active->transmitted,
1070 			transmitted_list) {
1071 
1072 		if (key == chunk->subh.data_hdr->tsn) {
1073 			match = active;
1074 			goto out;
1075 		}
1076 	}
1077 
1078 	/* If not found, go search all the other transports. */
1079 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
1080 			transports) {
1081 
1082 		if (transport == active)
1083 			break;
1084 		list_for_each_entry(chunk, &transport->transmitted,
1085 				transmitted_list) {
1086 			if (key == chunk->subh.data_hdr->tsn) {
1087 				match = transport;
1088 				goto out;
1089 			}
1090 		}
1091 	}
1092 out:
1093 	return match;
1094 }
1095 
1096 /* Is this the association we are looking for? */
1097 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1098 					   struct net *net,
1099 					   const union sctp_addr *laddr,
1100 					   const union sctp_addr *paddr)
1101 {
1102 	struct sctp_transport *transport;
1103 
1104 	if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1105 	    (htons(asoc->peer.port) == paddr->v4.sin_port) &&
1106 	    net_eq(sock_net(asoc->base.sk), net)) {
1107 		transport = sctp_assoc_lookup_paddr(asoc, paddr);
1108 		if (!transport)
1109 			goto out;
1110 
1111 		if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1112 					 sctp_sk(asoc->base.sk)))
1113 			goto out;
1114 	}
1115 	transport = NULL;
1116 
1117 out:
1118 	return transport;
1119 }
1120 
1121 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
1122 static void sctp_assoc_bh_rcv(struct work_struct *work)
1123 {
1124 	struct sctp_association *asoc =
1125 		container_of(work, struct sctp_association,
1126 			     base.inqueue.immediate);
1127 	struct net *net = sock_net(asoc->base.sk);
1128 	struct sctp_endpoint *ep;
1129 	struct sctp_chunk *chunk;
1130 	struct sctp_inq *inqueue;
1131 	int state;
1132 	sctp_subtype_t subtype;
1133 	int error = 0;
1134 
1135 	/* The association should be held so we should be safe. */
1136 	ep = asoc->ep;
1137 
1138 	inqueue = &asoc->base.inqueue;
1139 	sctp_association_hold(asoc);
1140 	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1141 		state = asoc->state;
1142 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1143 
1144 		/* SCTP-AUTH, Section 6.3:
1145 		 *    The receiver has a list of chunk types which it expects
1146 		 *    to be received only after an AUTH-chunk.  This list has
1147 		 *    been sent to the peer during the association setup.  It
1148 		 *    MUST silently discard these chunks if they are not placed
1149 		 *    after an AUTH chunk in the packet.
1150 		 */
1151 		if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1152 			continue;
1153 
1154 		/* Remember where the last DATA chunk came from so we
1155 		 * know where to send the SACK.
1156 		 */
1157 		if (sctp_chunk_is_data(chunk))
1158 			asoc->peer.last_data_from = chunk->transport;
1159 		else {
1160 			SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1161 			asoc->stats.ictrlchunks++;
1162 			if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1163 				asoc->stats.isacks++;
1164 		}
1165 
1166 		if (chunk->transport)
1167 			chunk->transport->last_time_heard = jiffies;
1168 
1169 		/* Run through the state machine. */
1170 		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1171 				   state, ep, asoc, chunk, GFP_ATOMIC);
1172 
1173 		/* Check to see if the association is freed in response to
1174 		 * the incoming chunk.  If so, get out of the while loop.
1175 		 */
1176 		if (asoc->base.dead)
1177 			break;
1178 
1179 		/* If there is an error on chunk, discard this packet. */
1180 		if (error && chunk)
1181 			chunk->pdiscard = 1;
1182 	}
1183 	sctp_association_put(asoc);
1184 }
1185 
1186 /* This routine moves an association from its old sk to a new sk.  */
1187 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1188 {
1189 	struct sctp_sock *newsp = sctp_sk(newsk);
1190 	struct sock *oldsk = assoc->base.sk;
1191 
1192 	/* Delete the association from the old endpoint's list of
1193 	 * associations.
1194 	 */
1195 	list_del_init(&assoc->asocs);
1196 
1197 	/* Decrement the backlog value for a TCP-style socket. */
1198 	if (sctp_style(oldsk, TCP))
1199 		oldsk->sk_ack_backlog--;
1200 
1201 	/* Release references to the old endpoint and the sock.  */
1202 	sctp_endpoint_put(assoc->ep);
1203 	sock_put(assoc->base.sk);
1204 
1205 	/* Get a reference to the new endpoint.  */
1206 	assoc->ep = newsp->ep;
1207 	sctp_endpoint_hold(assoc->ep);
1208 
1209 	/* Get a reference to the new sock.  */
1210 	assoc->base.sk = newsk;
1211 	sock_hold(assoc->base.sk);
1212 
1213 	/* Add the association to the new endpoint's list of associations.  */
1214 	sctp_endpoint_add_asoc(newsp->ep, assoc);
1215 }
1216 
1217 /* Update an association (possibly from unexpected COOKIE-ECHO processing).  */
1218 void sctp_assoc_update(struct sctp_association *asoc,
1219 		       struct sctp_association *new)
1220 {
1221 	struct sctp_transport *trans;
1222 	struct list_head *pos, *temp;
1223 
1224 	/* Copy in new parameters of peer. */
1225 	asoc->c = new->c;
1226 	asoc->peer.rwnd = new->peer.rwnd;
1227 	asoc->peer.sack_needed = new->peer.sack_needed;
1228 	asoc->peer.i = new->peer.i;
1229 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1230 			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1231 
1232 	/* Remove any peer addresses not present in the new association. */
1233 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1234 		trans = list_entry(pos, struct sctp_transport, transports);
1235 		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1236 			sctp_assoc_rm_peer(asoc, trans);
1237 			continue;
1238 		}
1239 
1240 		if (asoc->state >= SCTP_STATE_ESTABLISHED)
1241 			sctp_transport_reset(trans);
1242 	}
1243 
1244 	/* If the case is A (association restart), use
1245 	 * initial_tsn as next_tsn. If the case is B, use
1246 	 * current next_tsn in case data sent to peer
1247 	 * has been discarded and needs retransmission.
1248 	 */
1249 	if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1250 		asoc->next_tsn = new->next_tsn;
1251 		asoc->ctsn_ack_point = new->ctsn_ack_point;
1252 		asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1253 
1254 		/* Reinitialize SSN for both local streams
1255 		 * and peer's streams.
1256 		 */
1257 		sctp_ssnmap_clear(asoc->ssnmap);
1258 
1259 		/* Flush the ULP reassembly and ordered queue.
1260 		 * Any data there will now be stale and will
1261 		 * cause problems.
1262 		 */
1263 		sctp_ulpq_flush(&asoc->ulpq);
1264 
1265 		/* reset the overall association error count so
1266 		 * that the restarted association doesn't get torn
1267 		 * down on the next retransmission timer.
1268 		 */
1269 		asoc->overall_error_count = 0;
1270 
1271 	} else {
1272 		/* Add any peer addresses from the new association. */
1273 		list_for_each_entry(trans, &new->peer.transport_addr_list,
1274 				transports) {
1275 			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1276 				sctp_assoc_add_peer(asoc, &trans->ipaddr,
1277 						    GFP_ATOMIC, trans->state);
1278 		}
1279 
1280 		asoc->ctsn_ack_point = asoc->next_tsn - 1;
1281 		asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1282 		if (!asoc->ssnmap) {
1283 			/* Move the ssnmap. */
1284 			asoc->ssnmap = new->ssnmap;
1285 			new->ssnmap = NULL;
1286 		}
1287 
1288 		if (!asoc->assoc_id) {
1289 			/* get a new association id since we don't have one
1290 			 * yet.
1291 			 */
1292 			sctp_assoc_set_id(asoc, GFP_ATOMIC);
1293 		}
1294 	}
1295 
1296 	/* SCTP-AUTH: Save the peer parameters from the new assocaitions
1297 	 * and also move the association shared keys over
1298 	 */
1299 	kfree(asoc->peer.peer_random);
1300 	asoc->peer.peer_random = new->peer.peer_random;
1301 	new->peer.peer_random = NULL;
1302 
1303 	kfree(asoc->peer.peer_chunks);
1304 	asoc->peer.peer_chunks = new->peer.peer_chunks;
1305 	new->peer.peer_chunks = NULL;
1306 
1307 	kfree(asoc->peer.peer_hmacs);
1308 	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1309 	new->peer.peer_hmacs = NULL;
1310 
1311 	sctp_auth_key_put(asoc->asoc_shared_key);
1312 	sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1313 }
1314 
1315 /* Update the retran path for sending a retransmitted packet.
1316  * Round-robin through the active transports, else round-robin
1317  * through the inactive transports as this is the next best thing
1318  * we can try.
1319  */
1320 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1321 {
1322 	struct sctp_transport *t, *next;
1323 	struct list_head *head = &asoc->peer.transport_addr_list;
1324 	struct list_head *pos;
1325 
1326 	if (asoc->peer.transport_count == 1)
1327 		return;
1328 
1329 	/* Find the next transport in a round-robin fashion. */
1330 	t = asoc->peer.retran_path;
1331 	pos = &t->transports;
1332 	next = NULL;
1333 
1334 	while (1) {
1335 		/* Skip the head. */
1336 		if (pos->next == head)
1337 			pos = head->next;
1338 		else
1339 			pos = pos->next;
1340 
1341 		t = list_entry(pos, struct sctp_transport, transports);
1342 
1343 		/* We have exhausted the list, but didn't find any
1344 		 * other active transports.  If so, use the next
1345 		 * transport.
1346 		 */
1347 		if (t == asoc->peer.retran_path) {
1348 			t = next;
1349 			break;
1350 		}
1351 
1352 		/* Try to find an active transport. */
1353 
1354 		if ((t->state == SCTP_ACTIVE) ||
1355 		    (t->state == SCTP_UNKNOWN)) {
1356 			break;
1357 		} else {
1358 			/* Keep track of the next transport in case
1359 			 * we don't find any active transport.
1360 			 */
1361 			if (t->state != SCTP_UNCONFIRMED && !next)
1362 				next = t;
1363 		}
1364 	}
1365 
1366 	if (t)
1367 		asoc->peer.retran_path = t;
1368 	else
1369 		t = asoc->peer.retran_path;
1370 
1371 	SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1372 				 " %p addr: ",
1373 				 " port: %d\n",
1374 				 asoc,
1375 				 (&t->ipaddr),
1376 				 ntohs(t->ipaddr.v4.sin_port));
1377 }
1378 
1379 /* Choose the transport for sending retransmit packet.  */
1380 struct sctp_transport *sctp_assoc_choose_alter_transport(
1381 	struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1382 {
1383 	/* If this is the first time packet is sent, use the active path,
1384 	 * else use the retran path. If the last packet was sent over the
1385 	 * retran path, update the retran path and use it.
1386 	 */
1387 	if (!last_sent_to)
1388 		return asoc->peer.active_path;
1389 	else {
1390 		if (last_sent_to == asoc->peer.retran_path)
1391 			sctp_assoc_update_retran_path(asoc);
1392 		return asoc->peer.retran_path;
1393 	}
1394 }
1395 
1396 /* Update the association's pmtu and frag_point by going through all the
1397  * transports. This routine is called when a transport's PMTU has changed.
1398  */
1399 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1400 {
1401 	struct sctp_transport *t;
1402 	__u32 pmtu = 0;
1403 
1404 	if (!asoc)
1405 		return;
1406 
1407 	/* Get the lowest pmtu of all the transports. */
1408 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
1409 				transports) {
1410 		if (t->pmtu_pending && t->dst) {
1411 			sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1412 			t->pmtu_pending = 0;
1413 		}
1414 		if (!pmtu || (t->pathmtu < pmtu))
1415 			pmtu = t->pathmtu;
1416 	}
1417 
1418 	if (pmtu) {
1419 		asoc->pathmtu = pmtu;
1420 		asoc->frag_point = sctp_frag_point(asoc, pmtu);
1421 	}
1422 
1423 	SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
1424 			  __func__, asoc, asoc->pathmtu, asoc->frag_point);
1425 }
1426 
1427 /* Should we send a SACK to update our peer? */
1428 static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1429 {
1430 	struct net *net = sock_net(asoc->base.sk);
1431 	switch (asoc->state) {
1432 	case SCTP_STATE_ESTABLISHED:
1433 	case SCTP_STATE_SHUTDOWN_PENDING:
1434 	case SCTP_STATE_SHUTDOWN_RECEIVED:
1435 	case SCTP_STATE_SHUTDOWN_SENT:
1436 		if ((asoc->rwnd > asoc->a_rwnd) &&
1437 		    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1438 			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1439 			   asoc->pathmtu)))
1440 			return 1;
1441 		break;
1442 	default:
1443 		break;
1444 	}
1445 	return 0;
1446 }
1447 
1448 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1449 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1450 {
1451 	struct sctp_chunk *sack;
1452 	struct timer_list *timer;
1453 
1454 	if (asoc->rwnd_over) {
1455 		if (asoc->rwnd_over >= len) {
1456 			asoc->rwnd_over -= len;
1457 		} else {
1458 			asoc->rwnd += (len - asoc->rwnd_over);
1459 			asoc->rwnd_over = 0;
1460 		}
1461 	} else {
1462 		asoc->rwnd += len;
1463 	}
1464 
1465 	/* If we had window pressure, start recovering it
1466 	 * once our rwnd had reached the accumulated pressure
1467 	 * threshold.  The idea is to recover slowly, but up
1468 	 * to the initial advertised window.
1469 	 */
1470 	if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1471 		int change = min(asoc->pathmtu, asoc->rwnd_press);
1472 		asoc->rwnd += change;
1473 		asoc->rwnd_press -= change;
1474 	}
1475 
1476 	SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
1477 			  "- %u\n", __func__, asoc, len, asoc->rwnd,
1478 			  asoc->rwnd_over, asoc->a_rwnd);
1479 
1480 	/* Send a window update SACK if the rwnd has increased by at least the
1481 	 * minimum of the association's PMTU and half of the receive buffer.
1482 	 * The algorithm used is similar to the one described in
1483 	 * Section 4.2.3.3 of RFC 1122.
1484 	 */
1485 	if (sctp_peer_needs_update(asoc)) {
1486 		asoc->a_rwnd = asoc->rwnd;
1487 		SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
1488 				  "rwnd: %u a_rwnd: %u\n", __func__,
1489 				  asoc, asoc->rwnd, asoc->a_rwnd);
1490 		sack = sctp_make_sack(asoc);
1491 		if (!sack)
1492 			return;
1493 
1494 		asoc->peer.sack_needed = 0;
1495 
1496 		sctp_outq_tail(&asoc->outqueue, sack);
1497 
1498 		/* Stop the SACK timer.  */
1499 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1500 		if (timer_pending(timer) && del_timer(timer))
1501 			sctp_association_put(asoc);
1502 	}
1503 }
1504 
1505 /* Decrease asoc's rwnd by len. */
1506 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1507 {
1508 	int rx_count;
1509 	int over = 0;
1510 
1511 	SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
1512 	SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
1513 
1514 	if (asoc->ep->rcvbuf_policy)
1515 		rx_count = atomic_read(&asoc->rmem_alloc);
1516 	else
1517 		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1518 
1519 	/* If we've reached or overflowed our receive buffer, announce
1520 	 * a 0 rwnd if rwnd would still be positive.  Store the
1521 	 * the pottential pressure overflow so that the window can be restored
1522 	 * back to original value.
1523 	 */
1524 	if (rx_count >= asoc->base.sk->sk_rcvbuf)
1525 		over = 1;
1526 
1527 	if (asoc->rwnd >= len) {
1528 		asoc->rwnd -= len;
1529 		if (over) {
1530 			asoc->rwnd_press += asoc->rwnd;
1531 			asoc->rwnd = 0;
1532 		}
1533 	} else {
1534 		asoc->rwnd_over = len - asoc->rwnd;
1535 		asoc->rwnd = 0;
1536 	}
1537 	SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
1538 			  __func__, asoc, len, asoc->rwnd,
1539 			  asoc->rwnd_over, asoc->rwnd_press);
1540 }
1541 
1542 /* Build the bind address list for the association based on info from the
1543  * local endpoint and the remote peer.
1544  */
1545 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1546 				     sctp_scope_t scope, gfp_t gfp)
1547 {
1548 	int flags;
1549 
1550 	/* Use scoping rules to determine the subset of addresses from
1551 	 * the endpoint.
1552 	 */
1553 	flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1554 	if (asoc->peer.ipv4_address)
1555 		flags |= SCTP_ADDR4_PEERSUPP;
1556 	if (asoc->peer.ipv6_address)
1557 		flags |= SCTP_ADDR6_PEERSUPP;
1558 
1559 	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1560 				   &asoc->base.bind_addr,
1561 				   &asoc->ep->base.bind_addr,
1562 				   scope, gfp, flags);
1563 }
1564 
1565 /* Build the association's bind address list from the cookie.  */
1566 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1567 					 struct sctp_cookie *cookie,
1568 					 gfp_t gfp)
1569 {
1570 	int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1571 	int var_size3 = cookie->raw_addr_list_len;
1572 	__u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1573 
1574 	return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1575 				      asoc->ep->base.bind_addr.port, gfp);
1576 }
1577 
1578 /* Lookup laddr in the bind address list of an association. */
1579 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1580 			    const union sctp_addr *laddr)
1581 {
1582 	int found = 0;
1583 
1584 	if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1585 	    sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1586 				 sctp_sk(asoc->base.sk)))
1587 		found = 1;
1588 
1589 	return found;
1590 }
1591 
1592 /* Set an association id for a given association */
1593 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1594 {
1595 	int assoc_id;
1596 	int error = 0;
1597 
1598 	/* If the id is already assigned, keep it. */
1599 	if (asoc->assoc_id)
1600 		return error;
1601 retry:
1602 	if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
1603 		return -ENOMEM;
1604 
1605 	spin_lock_bh(&sctp_assocs_id_lock);
1606 	error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
1607 				    idr_low, &assoc_id);
1608 	if (!error) {
1609 		idr_low = assoc_id + 1;
1610 		if (idr_low == INT_MAX)
1611 			idr_low = 1;
1612 	}
1613 	spin_unlock_bh(&sctp_assocs_id_lock);
1614 	if (error == -EAGAIN)
1615 		goto retry;
1616 	else if (error)
1617 		return error;
1618 
1619 	asoc->assoc_id = (sctp_assoc_t) assoc_id;
1620 	return error;
1621 }
1622 
1623 /* Free the ASCONF queue */
1624 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1625 {
1626 	struct sctp_chunk *asconf;
1627 	struct sctp_chunk *tmp;
1628 
1629 	list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1630 		list_del_init(&asconf->list);
1631 		sctp_chunk_free(asconf);
1632 	}
1633 }
1634 
1635 /* Free asconf_ack cache */
1636 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1637 {
1638 	struct sctp_chunk *ack;
1639 	struct sctp_chunk *tmp;
1640 
1641 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1642 				transmitted_list) {
1643 		list_del_init(&ack->transmitted_list);
1644 		sctp_chunk_free(ack);
1645 	}
1646 }
1647 
1648 /* Clean up the ASCONF_ACK queue */
1649 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1650 {
1651 	struct sctp_chunk *ack;
1652 	struct sctp_chunk *tmp;
1653 
1654 	/* We can remove all the entries from the queue up to
1655 	 * the "Peer-Sequence-Number".
1656 	 */
1657 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1658 				transmitted_list) {
1659 		if (ack->subh.addip_hdr->serial ==
1660 				htonl(asoc->peer.addip_serial))
1661 			break;
1662 
1663 		list_del_init(&ack->transmitted_list);
1664 		sctp_chunk_free(ack);
1665 	}
1666 }
1667 
1668 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1669 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1670 					const struct sctp_association *asoc,
1671 					__be32 serial)
1672 {
1673 	struct sctp_chunk *ack;
1674 
1675 	/* Walk through the list of cached ASCONF-ACKs and find the
1676 	 * ack chunk whose serial number matches that of the request.
1677 	 */
1678 	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1679 		if (ack->subh.addip_hdr->serial == serial) {
1680 			sctp_chunk_hold(ack);
1681 			return ack;
1682 		}
1683 	}
1684 
1685 	return NULL;
1686 }
1687 
1688 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1689 {
1690 	/* Free any cached ASCONF_ACK chunk. */
1691 	sctp_assoc_free_asconf_acks(asoc);
1692 
1693 	/* Free the ASCONF queue. */
1694 	sctp_assoc_free_asconf_queue(asoc);
1695 
1696 	/* Free any cached ASCONF chunk. */
1697 	if (asoc->addip_last_asconf)
1698 		sctp_chunk_free(asoc->addip_last_asconf);
1699 }
1700