xref: /openbmc/linux/net/sctp/transport.c (revision 5d4a2e29)
1 /* SCTP kernel implementation
2  * Copyright (c) 1999-2000 Cisco, Inc.
3  * Copyright (c) 1999-2001 Motorola, Inc.
4  * Copyright (c) 2001-2003 International Business Machines Corp.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 La Monte H.P. Yarroll
7  *
8  * This file is part of the SCTP kernel implementation
9  *
10  * This module provides the abstraction for an SCTP tranport representing
11  * a remote transport address.  For local transport addresses, we just use
12  * union sctp_addr.
13  *
14  * This SCTP implementation is free software;
15  * you can redistribute it and/or modify it under the terms of
16  * the GNU General Public License as published by
17  * the Free Software Foundation; either version 2, or (at your option)
18  * any later version.
19  *
20  * This SCTP implementation is distributed in the hope that it
21  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22  *                 ************************
23  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24  * See the GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with GNU CC; see the file COPYING.  If not, write to
28  * the Free Software Foundation, 59 Temple Place - Suite 330,
29  * Boston, MA 02111-1307, USA.
30  *
31  * Please send any bug reports or fixes you make to the
32  * email address(es):
33  *    lksctp developers <lksctp-developers@lists.sourceforge.net>
34  *
35  * Or submit a bug report through the following website:
36  *    http://www.sf.net/projects/lksctp
37  *
38  * Written or modified by:
39  *    La Monte H.P. Yarroll <piggy@acm.org>
40  *    Karl Knutson          <karl@athena.chicago.il.us>
41  *    Jon Grimm             <jgrimm@us.ibm.com>
42  *    Xingang Guo           <xingang.guo@intel.com>
43  *    Hui Huang             <hui.huang@nokia.com>
44  *    Sridhar Samudrala	    <sri@us.ibm.com>
45  *    Ardelle Fan	    <ardelle.fan@intel.com>
46  *
47  * Any bugs reported given to us we will try to fix... any fixes shared will
48  * be incorporated into the next SCTP release.
49  */
50 
51 #include <linux/slab.h>
52 #include <linux/types.h>
53 #include <linux/random.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
56 
57 /* 1st Level Abstractions.  */
58 
59 /* Initialize a new transport from provided memory.  */
60 static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
61 						  const union sctp_addr *addr,
62 						  gfp_t gfp)
63 {
64 	/* Copy in the address.  */
65 	peer->ipaddr = *addr;
66 	peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
67 	memset(&peer->saddr, 0, sizeof(union sctp_addr));
68 
69 	/* From 6.3.1 RTO Calculation:
70 	 *
71 	 * C1) Until an RTT measurement has been made for a packet sent to the
72 	 * given destination transport address, set RTO to the protocol
73 	 * parameter 'RTO.Initial'.
74 	 */
75 	peer->rto = msecs_to_jiffies(sctp_rto_initial);
76 
77 	peer->last_time_heard = jiffies;
78 	peer->last_time_ecne_reduced = jiffies;
79 
80 	peer->param_flags = SPP_HB_DISABLE |
81 			    SPP_PMTUD_ENABLE |
82 			    SPP_SACKDELAY_ENABLE;
83 
84 	/* Initialize the default path max_retrans.  */
85 	peer->pathmaxrxt  = sctp_max_retrans_path;
86 
87 	INIT_LIST_HEAD(&peer->transmitted);
88 	INIT_LIST_HEAD(&peer->send_ready);
89 	INIT_LIST_HEAD(&peer->transports);
90 
91 	setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
92 			(unsigned long)peer);
93 	setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
94 			(unsigned long)peer);
95 	setup_timer(&peer->proto_unreach_timer,
96 		    sctp_generate_proto_unreach_event, (unsigned long)peer);
97 
98 	/* Initialize the 64-bit random nonce sent with heartbeat. */
99 	get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
100 
101 	atomic_set(&peer->refcnt, 1);
102 
103 	return peer;
104 }
105 
106 /* Allocate and initialize a new transport.  */
107 struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
108 					  gfp_t gfp)
109 {
110 	struct sctp_transport *transport;
111 
112 	transport = t_new(struct sctp_transport, gfp);
113 	if (!transport)
114 		goto fail;
115 
116 	if (!sctp_transport_init(transport, addr, gfp))
117 		goto fail_init;
118 
119 	transport->malloced = 1;
120 	SCTP_DBG_OBJCNT_INC(transport);
121 
122 	return transport;
123 
124 fail_init:
125 	kfree(transport);
126 
127 fail:
128 	return NULL;
129 }
130 
131 /* This transport is no longer needed.  Free up if possible, or
132  * delay until it last reference count.
133  */
134 void sctp_transport_free(struct sctp_transport *transport)
135 {
136 	transport->dead = 1;
137 
138 	/* Try to delete the heartbeat timer.  */
139 	if (del_timer(&transport->hb_timer))
140 		sctp_transport_put(transport);
141 
142 	/* Delete the T3_rtx timer if it's active.
143 	 * There is no point in not doing this now and letting
144 	 * structure hang around in memory since we know
145 	 * the tranport is going away.
146 	 */
147 	if (timer_pending(&transport->T3_rtx_timer) &&
148 	    del_timer(&transport->T3_rtx_timer))
149 		sctp_transport_put(transport);
150 
151 	/* Delete the ICMP proto unreachable timer if it's active. */
152 	if (timer_pending(&transport->proto_unreach_timer) &&
153 	    del_timer(&transport->proto_unreach_timer))
154 		sctp_association_put(transport->asoc);
155 
156 	sctp_transport_put(transport);
157 }
158 
159 /* Destroy the transport data structure.
160  * Assumes there are no more users of this structure.
161  */
162 static void sctp_transport_destroy(struct sctp_transport *transport)
163 {
164 	SCTP_ASSERT(transport->dead, "Transport is not dead", return);
165 
166 	if (transport->asoc)
167 		sctp_association_put(transport->asoc);
168 
169 	sctp_packet_free(&transport->packet);
170 
171 	dst_release(transport->dst);
172 	kfree(transport);
173 	SCTP_DBG_OBJCNT_DEC(transport);
174 }
175 
176 /* Start T3_rtx timer if it is not already running and update the heartbeat
177  * timer.  This routine is called every time a DATA chunk is sent.
178  */
179 void sctp_transport_reset_timers(struct sctp_transport *transport)
180 {
181 	/* RFC 2960 6.3.2 Retransmission Timer Rules
182 	 *
183 	 * R1) Every time a DATA chunk is sent to any address(including a
184 	 * retransmission), if the T3-rtx timer of that address is not running
185 	 * start it running so that it will expire after the RTO of that
186 	 * address.
187 	 */
188 
189 	if (!timer_pending(&transport->T3_rtx_timer))
190 		if (!mod_timer(&transport->T3_rtx_timer,
191 			       jiffies + transport->rto))
192 			sctp_transport_hold(transport);
193 
194 	/* When a data chunk is sent, reset the heartbeat interval.  */
195 	if (!mod_timer(&transport->hb_timer,
196 		       sctp_transport_timeout(transport)))
197 	    sctp_transport_hold(transport);
198 }
199 
200 /* This transport has been assigned to an association.
201  * Initialize fields from the association or from the sock itself.
202  * Register the reference count in the association.
203  */
204 void sctp_transport_set_owner(struct sctp_transport *transport,
205 			      struct sctp_association *asoc)
206 {
207 	transport->asoc = asoc;
208 	sctp_association_hold(asoc);
209 }
210 
211 /* Initialize the pmtu of a transport. */
212 void sctp_transport_pmtu(struct sctp_transport *transport)
213 {
214 	struct dst_entry *dst;
215 
216 	dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL);
217 
218 	if (dst) {
219 		transport->pathmtu = dst_mtu(dst);
220 		dst_release(dst);
221 	} else
222 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
223 }
224 
225 /* this is a complete rip-off from __sk_dst_check
226  * the cookie is always 0 since this is how it's used in the
227  * pmtu code
228  */
229 static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
230 {
231 	struct dst_entry *dst = t->dst;
232 
233 	if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
234 		dst_release(t->dst);
235 		t->dst = NULL;
236 		return NULL;
237 	}
238 
239 	return dst;
240 }
241 
242 void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
243 {
244 	struct dst_entry *dst;
245 
246 	if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
247 		printk(KERN_WARNING "%s: Reported pmtu %d too low, "
248 		       "using default minimum of %d\n",
249 		       __func__, pmtu,
250 		       SCTP_DEFAULT_MINSEGMENT);
251 		/* Use default minimum segment size and disable
252 		 * pmtu discovery on this transport.
253 		 */
254 		t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
255 	} else {
256 		t->pathmtu = pmtu;
257 	}
258 
259 	dst = sctp_transport_dst_check(t);
260 	if (dst)
261 		dst->ops->update_pmtu(dst, pmtu);
262 }
263 
264 /* Caches the dst entry and source address for a transport's destination
265  * address.
266  */
267 void sctp_transport_route(struct sctp_transport *transport,
268 			  union sctp_addr *saddr, struct sctp_sock *opt)
269 {
270 	struct sctp_association *asoc = transport->asoc;
271 	struct sctp_af *af = transport->af_specific;
272 	union sctp_addr *daddr = &transport->ipaddr;
273 	struct dst_entry *dst;
274 
275 	dst = af->get_dst(asoc, daddr, saddr);
276 
277 	if (saddr)
278 		memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
279 	else
280 		af->get_saddr(opt, asoc, dst, daddr, &transport->saddr);
281 
282 	transport->dst = dst;
283 	if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
284 		return;
285 	}
286 	if (dst) {
287 		transport->pathmtu = dst_mtu(dst);
288 
289 		/* Initialize sk->sk_rcv_saddr, if the transport is the
290 		 * association's active path for getsockname().
291 		 */
292 		if (asoc && (!asoc->peer.primary_path ||
293 				(transport == asoc->peer.active_path)))
294 			opt->pf->af->to_sk_saddr(&transport->saddr,
295 						 asoc->base.sk);
296 	} else
297 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
298 }
299 
300 /* Hold a reference to a transport.  */
301 void sctp_transport_hold(struct sctp_transport *transport)
302 {
303 	atomic_inc(&transport->refcnt);
304 }
305 
306 /* Release a reference to a transport and clean up
307  * if there are no more references.
308  */
309 void sctp_transport_put(struct sctp_transport *transport)
310 {
311 	if (atomic_dec_and_test(&transport->refcnt))
312 		sctp_transport_destroy(transport);
313 }
314 
315 /* Update transport's RTO based on the newly calculated RTT. */
316 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
317 {
318 	/* Check for valid transport.  */
319 	SCTP_ASSERT(tp, "NULL transport", return);
320 
321 	/* We should not be doing any RTO updates unless rto_pending is set.  */
322 	SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
323 
324 	if (tp->rttvar || tp->srtt) {
325 		/* 6.3.1 C3) When a new RTT measurement R' is made, set
326 		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
327 		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
328 		 */
329 
330 		/* Note:  The above algorithm has been rewritten to
331 		 * express rto_beta and rto_alpha as inverse powers
332 		 * of two.
333 		 * For example, assuming the default value of RTO.Alpha of
334 		 * 1/8, rto_alpha would be expressed as 3.
335 		 */
336 		tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
337 			+ ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
338 		tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
339 			+ (rtt >> sctp_rto_alpha);
340 	} else {
341 		/* 6.3.1 C2) When the first RTT measurement R is made, set
342 		 * SRTT <- R, RTTVAR <- R/2.
343 		 */
344 		tp->srtt = rtt;
345 		tp->rttvar = rtt >> 1;
346 	}
347 
348 	/* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
349 	 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
350 	 */
351 	if (tp->rttvar == 0)
352 		tp->rttvar = SCTP_CLOCK_GRANULARITY;
353 
354 	/* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
355 	tp->rto = tp->srtt + (tp->rttvar << 2);
356 
357 	/* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
358 	 * seconds then it is rounded up to RTO.Min seconds.
359 	 */
360 	if (tp->rto < tp->asoc->rto_min)
361 		tp->rto = tp->asoc->rto_min;
362 
363 	/* 6.3.1 C7) A maximum value may be placed on RTO provided it is
364 	 * at least RTO.max seconds.
365 	 */
366 	if (tp->rto > tp->asoc->rto_max)
367 		tp->rto = tp->asoc->rto_max;
368 
369 	tp->rtt = rtt;
370 
371 	/* Reset rto_pending so that a new RTT measurement is started when a
372 	 * new data chunk is sent.
373 	 */
374 	tp->rto_pending = 0;
375 
376 	SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
377 			  "rttvar: %d, rto: %ld\n", __func__,
378 			  tp, rtt, tp->srtt, tp->rttvar, tp->rto);
379 }
380 
381 /* This routine updates the transport's cwnd and partial_bytes_acked
382  * parameters based on the bytes acked in the received SACK.
383  */
384 void sctp_transport_raise_cwnd(struct sctp_transport *transport,
385 			       __u32 sack_ctsn, __u32 bytes_acked)
386 {
387 	struct sctp_association *asoc = transport->asoc;
388 	__u32 cwnd, ssthresh, flight_size, pba, pmtu;
389 
390 	cwnd = transport->cwnd;
391 	flight_size = transport->flight_size;
392 
393 	/* See if we need to exit Fast Recovery first */
394 	if (asoc->fast_recovery &&
395 	    TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
396 		asoc->fast_recovery = 0;
397 
398 	/* The appropriate cwnd increase algorithm is performed if, and only
399 	 * if the cumulative TSN whould advanced and the congestion window is
400 	 * being fully utilized.
401 	 */
402 	if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
403 	    (flight_size < cwnd))
404 		return;
405 
406 	ssthresh = transport->ssthresh;
407 	pba = transport->partial_bytes_acked;
408 	pmtu = transport->asoc->pathmtu;
409 
410 	if (cwnd <= ssthresh) {
411 		/* RFC 4960 7.2.1
412 		 * o  When cwnd is less than or equal to ssthresh, an SCTP
413 		 *    endpoint MUST use the slow-start algorithm to increase
414 		 *    cwnd only if the current congestion window is being fully
415 		 *    utilized, an incoming SACK advances the Cumulative TSN
416 		 *    Ack Point, and the data sender is not in Fast Recovery.
417 		 *    Only when these three conditions are met can the cwnd be
418 		 *    increased; otherwise, the cwnd MUST not be increased.
419 		 *    If these conditions are met, then cwnd MUST be increased
420 		 *    by, at most, the lesser of 1) the total size of the
421 		 *    previously outstanding DATA chunk(s) acknowledged, and
422 		 *    2) the destination's path MTU.  This upper bound protects
423 		 *    against the ACK-Splitting attack outlined in [SAVAGE99].
424 		 */
425 		if (asoc->fast_recovery)
426 			return;
427 
428 		if (bytes_acked > pmtu)
429 			cwnd += pmtu;
430 		else
431 			cwnd += bytes_acked;
432 		SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, "
433 				  "bytes_acked: %d, cwnd: %d, ssthresh: %d, "
434 				  "flight_size: %d, pba: %d\n",
435 				  __func__,
436 				  transport, bytes_acked, cwnd,
437 				  ssthresh, flight_size, pba);
438 	} else {
439 		/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
440 		 * upon each SACK arrival that advances the Cumulative TSN Ack
441 		 * Point, increase partial_bytes_acked by the total number of
442 		 * bytes of all new chunks acknowledged in that SACK including
443 		 * chunks acknowledged by the new Cumulative TSN Ack and by
444 		 * Gap Ack Blocks.
445 		 *
446 		 * When partial_bytes_acked is equal to or greater than cwnd
447 		 * and before the arrival of the SACK the sender had cwnd or
448 		 * more bytes of data outstanding (i.e., before arrival of the
449 		 * SACK, flightsize was greater than or equal to cwnd),
450 		 * increase cwnd by MTU, and reset partial_bytes_acked to
451 		 * (partial_bytes_acked - cwnd).
452 		 */
453 		pba += bytes_acked;
454 		if (pba >= cwnd) {
455 			cwnd += pmtu;
456 			pba = ((cwnd < pba) ? (pba - cwnd) : 0);
457 		}
458 		SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: "
459 				  "transport: %p, bytes_acked: %d, cwnd: %d, "
460 				  "ssthresh: %d, flight_size: %d, pba: %d\n",
461 				  __func__,
462 				  transport, bytes_acked, cwnd,
463 				  ssthresh, flight_size, pba);
464 	}
465 
466 	transport->cwnd = cwnd;
467 	transport->partial_bytes_acked = pba;
468 }
469 
470 /* This routine is used to lower the transport's cwnd when congestion is
471  * detected.
472  */
473 void sctp_transport_lower_cwnd(struct sctp_transport *transport,
474 			       sctp_lower_cwnd_t reason)
475 {
476 	struct sctp_association *asoc = transport->asoc;
477 
478 	switch (reason) {
479 	case SCTP_LOWER_CWND_T3_RTX:
480 		/* RFC 2960 Section 7.2.3, sctpimpguide
481 		 * When the T3-rtx timer expires on an address, SCTP should
482 		 * perform slow start by:
483 		 *      ssthresh = max(cwnd/2, 4*MTU)
484 		 *      cwnd = 1*MTU
485 		 *      partial_bytes_acked = 0
486 		 */
487 		transport->ssthresh = max(transport->cwnd/2,
488 					  4*asoc->pathmtu);
489 		transport->cwnd = asoc->pathmtu;
490 
491 		/* T3-rtx also clears fast recovery */
492 		asoc->fast_recovery = 0;
493 		break;
494 
495 	case SCTP_LOWER_CWND_FAST_RTX:
496 		/* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
497 		 * destination address(es) to which the missing DATA chunks
498 		 * were last sent, according to the formula described in
499 		 * Section 7.2.3.
500 		 *
501 		 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
502 		 * losses from SACK (see Section 7.2.4), An endpoint
503 		 * should do the following:
504 		 *      ssthresh = max(cwnd/2, 4*MTU)
505 		 *      cwnd = ssthresh
506 		 *      partial_bytes_acked = 0
507 		 */
508 		if (asoc->fast_recovery)
509 			return;
510 
511 		/* Mark Fast recovery */
512 		asoc->fast_recovery = 1;
513 		asoc->fast_recovery_exit = asoc->next_tsn - 1;
514 
515 		transport->ssthresh = max(transport->cwnd/2,
516 					  4*asoc->pathmtu);
517 		transport->cwnd = transport->ssthresh;
518 		break;
519 
520 	case SCTP_LOWER_CWND_ECNE:
521 		/* RFC 2481 Section 6.1.2.
522 		 * If the sender receives an ECN-Echo ACK packet
523 		 * then the sender knows that congestion was encountered in the
524 		 * network on the path from the sender to the receiver. The
525 		 * indication of congestion should be treated just as a
526 		 * congestion loss in non-ECN Capable TCP. That is, the TCP
527 		 * source halves the congestion window "cwnd" and reduces the
528 		 * slow start threshold "ssthresh".
529 		 * A critical condition is that TCP does not react to
530 		 * congestion indications more than once every window of
531 		 * data (or more loosely more than once every round-trip time).
532 		 */
533 		if (time_after(jiffies, transport->last_time_ecne_reduced +
534 					transport->rtt)) {
535 			transport->ssthresh = max(transport->cwnd/2,
536 						  4*asoc->pathmtu);
537 			transport->cwnd = transport->ssthresh;
538 			transport->last_time_ecne_reduced = jiffies;
539 		}
540 		break;
541 
542 	case SCTP_LOWER_CWND_INACTIVE:
543 		/* RFC 2960 Section 7.2.1, sctpimpguide
544 		 * When the endpoint does not transmit data on a given
545 		 * transport address, the cwnd of the transport address
546 		 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
547 		 * NOTE: Although the draft recommends that this check needs
548 		 * to be done every RTO interval, we do it every hearbeat
549 		 * interval.
550 		 */
551 		transport->cwnd = max(transport->cwnd/2,
552 					 4*asoc->pathmtu);
553 		break;
554 	}
555 
556 	transport->partial_bytes_acked = 0;
557 	SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: "
558 			  "%d ssthresh: %d\n", __func__,
559 			  transport, reason,
560 			  transport->cwnd, transport->ssthresh);
561 }
562 
563 /* Apply Max.Burst limit to the congestion window:
564  * sctpimpguide-05 2.14.2
565  * D) When the time comes for the sender to
566  * transmit new DATA chunks, the protocol parameter Max.Burst MUST
567  * first be applied to limit how many new DATA chunks may be sent.
568  * The limit is applied by adjusting cwnd as follows:
569  * 	if ((flightsize+ Max.Burst * MTU) < cwnd)
570  * 		cwnd = flightsize + Max.Burst * MTU
571  */
572 
573 void sctp_transport_burst_limited(struct sctp_transport *t)
574 {
575 	struct sctp_association *asoc = t->asoc;
576 	u32 old_cwnd = t->cwnd;
577 	u32 max_burst_bytes;
578 
579 	if (t->burst_limited)
580 		return;
581 
582 	max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
583 	if (max_burst_bytes < old_cwnd) {
584 		t->cwnd = max_burst_bytes;
585 		t->burst_limited = old_cwnd;
586 	}
587 }
588 
589 /* Restore the old cwnd congestion window, after the burst had it's
590  * desired effect.
591  */
592 void sctp_transport_burst_reset(struct sctp_transport *t)
593 {
594 	if (t->burst_limited) {
595 		t->cwnd = t->burst_limited;
596 		t->burst_limited = 0;
597 	}
598 }
599 
600 /* What is the next timeout value for this transport? */
601 unsigned long sctp_transport_timeout(struct sctp_transport *t)
602 {
603 	unsigned long timeout;
604 	timeout = t->rto + sctp_jitter(t->rto);
605 	if (t->state != SCTP_UNCONFIRMED)
606 		timeout += t->hbinterval;
607 	timeout += jiffies;
608 	return timeout;
609 }
610 
611 /* Reset transport variables to their initial values */
612 void sctp_transport_reset(struct sctp_transport *t)
613 {
614 	struct sctp_association *asoc = t->asoc;
615 
616 	/* RFC 2960 (bis), Section 5.2.4
617 	 * All the congestion control parameters (e.g., cwnd, ssthresh)
618 	 * related to this peer MUST be reset to their initial values
619 	 * (see Section 6.2.1)
620 	 */
621 	t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
622 	t->burst_limited = 0;
623 	t->ssthresh = asoc->peer.i.a_rwnd;
624 	t->rto = asoc->rto_initial;
625 	t->rtt = 0;
626 	t->srtt = 0;
627 	t->rttvar = 0;
628 
629 	/* Reset these additional varibles so that we have a clean
630 	 * slate.
631 	 */
632 	t->partial_bytes_acked = 0;
633 	t->flight_size = 0;
634 	t->error_count = 0;
635 	t->rto_pending = 0;
636 	t->hb_sent = 0;
637 
638 	/* Initialize the state information for SFR-CACC */
639 	t->cacc.changeover_active = 0;
640 	t->cacc.cycling_changeover = 0;
641 	t->cacc.next_tsn_at_change = 0;
642 	t->cacc.cacc_saw_newack = 0;
643 }
644