xref: /openbmc/linux/net/sctp/transport.c (revision 6dfcd296)
1 /* SCTP kernel implementation
2  * Copyright (c) 1999-2000 Cisco, Inc.
3  * Copyright (c) 1999-2001 Motorola, Inc.
4  * Copyright (c) 2001-2003 International Business Machines Corp.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 La Monte H.P. Yarroll
7  *
8  * This file is part of the SCTP kernel implementation
9  *
10  * This module provides the abstraction for an SCTP tranport representing
11  * a remote transport address.  For local transport addresses, we just use
12  * union sctp_addr.
13  *
14  * This SCTP implementation is free software;
15  * you can redistribute it and/or modify it under the terms of
16  * the GNU General Public License as published by
17  * the Free Software Foundation; either version 2, or (at your option)
18  * any later version.
19  *
20  * This SCTP implementation is distributed in the hope that it
21  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22  *                 ************************
23  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24  * See the GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with GNU CC; see the file COPYING.  If not, see
28  * <http://www.gnu.org/licenses/>.
29  *
30  * Please send any bug reports or fixes you make to the
31  * email address(es):
32  *    lksctp developers <linux-sctp@vger.kernel.org>
33  *
34  * Written or modified by:
35  *    La Monte H.P. Yarroll <piggy@acm.org>
36  *    Karl Knutson          <karl@athena.chicago.il.us>
37  *    Jon Grimm             <jgrimm@us.ibm.com>
38  *    Xingang Guo           <xingang.guo@intel.com>
39  *    Hui Huang             <hui.huang@nokia.com>
40  *    Sridhar Samudrala	    <sri@us.ibm.com>
41  *    Ardelle Fan	    <ardelle.fan@intel.com>
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/slab.h>
47 #include <linux/types.h>
48 #include <linux/random.h>
49 #include <net/sctp/sctp.h>
50 #include <net/sctp/sm.h>
51 
52 /* 1st Level Abstractions.  */
53 
54 /* Initialize a new transport from provided memory.  */
55 static struct sctp_transport *sctp_transport_init(struct net *net,
56 						  struct sctp_transport *peer,
57 						  const union sctp_addr *addr,
58 						  gfp_t gfp)
59 {
60 	/* Copy in the address.  */
61 	peer->ipaddr = *addr;
62 	peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
63 	memset(&peer->saddr, 0, sizeof(union sctp_addr));
64 
65 	peer->sack_generation = 0;
66 
67 	/* From 6.3.1 RTO Calculation:
68 	 *
69 	 * C1) Until an RTT measurement has been made for a packet sent to the
70 	 * given destination transport address, set RTO to the protocol
71 	 * parameter 'RTO.Initial'.
72 	 */
73 	peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
74 
75 	peer->last_time_heard = ktime_set(0, 0);
76 	peer->last_time_ecne_reduced = jiffies;
77 
78 	peer->param_flags = SPP_HB_DISABLE |
79 			    SPP_PMTUD_ENABLE |
80 			    SPP_SACKDELAY_ENABLE;
81 
82 	/* Initialize the default path max_retrans.  */
83 	peer->pathmaxrxt  = net->sctp.max_retrans_path;
84 	peer->pf_retrans  = net->sctp.pf_retrans;
85 
86 	INIT_LIST_HEAD(&peer->transmitted);
87 	INIT_LIST_HEAD(&peer->send_ready);
88 	INIT_LIST_HEAD(&peer->transports);
89 
90 	setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
91 			(unsigned long)peer);
92 	setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
93 			(unsigned long)peer);
94 	setup_timer(&peer->proto_unreach_timer,
95 		    sctp_generate_proto_unreach_event, (unsigned long)peer);
96 
97 	/* Initialize the 64-bit random nonce sent with heartbeat. */
98 	get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
99 
100 	atomic_set(&peer->refcnt, 1);
101 
102 	return peer;
103 }
104 
105 /* Allocate and initialize a new transport.  */
106 struct sctp_transport *sctp_transport_new(struct net *net,
107 					  const union sctp_addr *addr,
108 					  gfp_t gfp)
109 {
110 	struct sctp_transport *transport;
111 
112 	transport = kzalloc(sizeof(*transport), gfp);
113 	if (!transport)
114 		goto fail;
115 
116 	if (!sctp_transport_init(net, transport, addr, gfp))
117 		goto fail_init;
118 
119 	SCTP_DBG_OBJCNT_INC(transport);
120 
121 	return transport;
122 
123 fail_init:
124 	kfree(transport);
125 
126 fail:
127 	return NULL;
128 }
129 
130 /* This transport is no longer needed.  Free up if possible, or
131  * delay until it last reference count.
132  */
133 void sctp_transport_free(struct sctp_transport *transport)
134 {
135 	/* Try to delete the heartbeat timer.  */
136 	if (del_timer(&transport->hb_timer))
137 		sctp_transport_put(transport);
138 
139 	/* Delete the T3_rtx timer if it's active.
140 	 * There is no point in not doing this now and letting
141 	 * structure hang around in memory since we know
142 	 * the tranport is going away.
143 	 */
144 	if (del_timer(&transport->T3_rtx_timer))
145 		sctp_transport_put(transport);
146 
147 	/* Delete the ICMP proto unreachable timer if it's active. */
148 	if (del_timer(&transport->proto_unreach_timer))
149 		sctp_association_put(transport->asoc);
150 
151 	sctp_transport_put(transport);
152 }
153 
154 static void sctp_transport_destroy_rcu(struct rcu_head *head)
155 {
156 	struct sctp_transport *transport;
157 
158 	transport = container_of(head, struct sctp_transport, rcu);
159 
160 	dst_release(transport->dst);
161 	kfree(transport);
162 	SCTP_DBG_OBJCNT_DEC(transport);
163 }
164 
165 /* Destroy the transport data structure.
166  * Assumes there are no more users of this structure.
167  */
168 static void sctp_transport_destroy(struct sctp_transport *transport)
169 {
170 	if (unlikely(atomic_read(&transport->refcnt))) {
171 		WARN(1, "Attempt to destroy undead transport %p!\n", transport);
172 		return;
173 	}
174 
175 	sctp_packet_free(&transport->packet);
176 
177 	if (transport->asoc)
178 		sctp_association_put(transport->asoc);
179 
180 	call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
181 }
182 
183 /* Start T3_rtx timer if it is not already running and update the heartbeat
184  * timer.  This routine is called every time a DATA chunk is sent.
185  */
186 void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
187 {
188 	/* RFC 2960 6.3.2 Retransmission Timer Rules
189 	 *
190 	 * R1) Every time a DATA chunk is sent to any address(including a
191 	 * retransmission), if the T3-rtx timer of that address is not running
192 	 * start it running so that it will expire after the RTO of that
193 	 * address.
194 	 */
195 
196 	if (!timer_pending(&transport->T3_rtx_timer))
197 		if (!mod_timer(&transport->T3_rtx_timer,
198 			       jiffies + transport->rto))
199 			sctp_transport_hold(transport);
200 }
201 
202 void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
203 {
204 	unsigned long expires;
205 
206 	/* When a data chunk is sent, reset the heartbeat interval.  */
207 	expires = jiffies + sctp_transport_timeout(transport);
208 	if (time_before(transport->hb_timer.expires, expires) &&
209 	    !mod_timer(&transport->hb_timer,
210 		       expires + prandom_u32_max(transport->rto)))
211 		sctp_transport_hold(transport);
212 }
213 
214 /* This transport has been assigned to an association.
215  * Initialize fields from the association or from the sock itself.
216  * Register the reference count in the association.
217  */
218 void sctp_transport_set_owner(struct sctp_transport *transport,
219 			      struct sctp_association *asoc)
220 {
221 	transport->asoc = asoc;
222 	sctp_association_hold(asoc);
223 }
224 
225 /* Initialize the pmtu of a transport. */
226 void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
227 {
228 	/* If we don't have a fresh route, look one up */
229 	if (!transport->dst || transport->dst->obsolete) {
230 		dst_release(transport->dst);
231 		transport->af_specific->get_dst(transport, &transport->saddr,
232 						&transport->fl, sk);
233 	}
234 
235 	if (transport->dst) {
236 		transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
237 	} else
238 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
239 }
240 
241 void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
242 {
243 	struct dst_entry *dst;
244 
245 	if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
246 		pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
247 			__func__, pmtu,
248 			SCTP_DEFAULT_MINSEGMENT);
249 		/* Use default minimum segment size and disable
250 		 * pmtu discovery on this transport.
251 		 */
252 		t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
253 	} else {
254 		t->pathmtu = pmtu;
255 	}
256 
257 	dst = sctp_transport_dst_check(t);
258 	if (!dst)
259 		t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
260 
261 	if (dst) {
262 		dst->ops->update_pmtu(dst, sk, NULL, pmtu);
263 
264 		dst = sctp_transport_dst_check(t);
265 		if (!dst)
266 			t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
267 	}
268 }
269 
270 /* Caches the dst entry and source address for a transport's destination
271  * address.
272  */
273 void sctp_transport_route(struct sctp_transport *transport,
274 			  union sctp_addr *saddr, struct sctp_sock *opt)
275 {
276 	struct sctp_association *asoc = transport->asoc;
277 	struct sctp_af *af = transport->af_specific;
278 
279 	af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
280 
281 	if (saddr)
282 		memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
283 	else
284 		af->get_saddr(opt, transport, &transport->fl);
285 
286 	if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
287 		return;
288 	}
289 	if (transport->dst) {
290 		transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
291 
292 		/* Initialize sk->sk_rcv_saddr, if the transport is the
293 		 * association's active path for getsockname().
294 		 */
295 		if (asoc && (!asoc->peer.primary_path ||
296 				(transport == asoc->peer.active_path)))
297 			opt->pf->to_sk_saddr(&transport->saddr,
298 					     asoc->base.sk);
299 	} else
300 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
301 }
302 
303 /* Hold a reference to a transport.  */
304 int sctp_transport_hold(struct sctp_transport *transport)
305 {
306 	return atomic_add_unless(&transport->refcnt, 1, 0);
307 }
308 
309 /* Release a reference to a transport and clean up
310  * if there are no more references.
311  */
312 void sctp_transport_put(struct sctp_transport *transport)
313 {
314 	if (atomic_dec_and_test(&transport->refcnt))
315 		sctp_transport_destroy(transport);
316 }
317 
318 /* Update transport's RTO based on the newly calculated RTT. */
319 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
320 {
321 	if (unlikely(!tp->rto_pending))
322 		/* We should not be doing any RTO updates unless rto_pending is set.  */
323 		pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
324 
325 	if (tp->rttvar || tp->srtt) {
326 		struct net *net = sock_net(tp->asoc->base.sk);
327 		/* 6.3.1 C3) When a new RTT measurement R' is made, set
328 		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
329 		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
330 		 */
331 
332 		/* Note:  The above algorithm has been rewritten to
333 		 * express rto_beta and rto_alpha as inverse powers
334 		 * of two.
335 		 * For example, assuming the default value of RTO.Alpha of
336 		 * 1/8, rto_alpha would be expressed as 3.
337 		 */
338 		tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
339 			+ (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
340 		tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
341 			+ (rtt >> net->sctp.rto_alpha);
342 	} else {
343 		/* 6.3.1 C2) When the first RTT measurement R is made, set
344 		 * SRTT <- R, RTTVAR <- R/2.
345 		 */
346 		tp->srtt = rtt;
347 		tp->rttvar = rtt >> 1;
348 	}
349 
350 	/* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
351 	 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
352 	 */
353 	if (tp->rttvar == 0)
354 		tp->rttvar = SCTP_CLOCK_GRANULARITY;
355 
356 	/* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
357 	tp->rto = tp->srtt + (tp->rttvar << 2);
358 
359 	/* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
360 	 * seconds then it is rounded up to RTO.Min seconds.
361 	 */
362 	if (tp->rto < tp->asoc->rto_min)
363 		tp->rto = tp->asoc->rto_min;
364 
365 	/* 6.3.1 C7) A maximum value may be placed on RTO provided it is
366 	 * at least RTO.max seconds.
367 	 */
368 	if (tp->rto > tp->asoc->rto_max)
369 		tp->rto = tp->asoc->rto_max;
370 
371 	sctp_max_rto(tp->asoc, tp);
372 	tp->rtt = rtt;
373 
374 	/* Reset rto_pending so that a new RTT measurement is started when a
375 	 * new data chunk is sent.
376 	 */
377 	tp->rto_pending = 0;
378 
379 	pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
380 		 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
381 }
382 
383 /* This routine updates the transport's cwnd and partial_bytes_acked
384  * parameters based on the bytes acked in the received SACK.
385  */
386 void sctp_transport_raise_cwnd(struct sctp_transport *transport,
387 			       __u32 sack_ctsn, __u32 bytes_acked)
388 {
389 	struct sctp_association *asoc = transport->asoc;
390 	__u32 cwnd, ssthresh, flight_size, pba, pmtu;
391 
392 	cwnd = transport->cwnd;
393 	flight_size = transport->flight_size;
394 
395 	/* See if we need to exit Fast Recovery first */
396 	if (asoc->fast_recovery &&
397 	    TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
398 		asoc->fast_recovery = 0;
399 
400 	/* The appropriate cwnd increase algorithm is performed if, and only
401 	 * if the cumulative TSN whould advanced and the congestion window is
402 	 * being fully utilized.
403 	 */
404 	if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
405 	    (flight_size < cwnd))
406 		return;
407 
408 	ssthresh = transport->ssthresh;
409 	pba = transport->partial_bytes_acked;
410 	pmtu = transport->asoc->pathmtu;
411 
412 	if (cwnd <= ssthresh) {
413 		/* RFC 4960 7.2.1
414 		 * o  When cwnd is less than or equal to ssthresh, an SCTP
415 		 *    endpoint MUST use the slow-start algorithm to increase
416 		 *    cwnd only if the current congestion window is being fully
417 		 *    utilized, an incoming SACK advances the Cumulative TSN
418 		 *    Ack Point, and the data sender is not in Fast Recovery.
419 		 *    Only when these three conditions are met can the cwnd be
420 		 *    increased; otherwise, the cwnd MUST not be increased.
421 		 *    If these conditions are met, then cwnd MUST be increased
422 		 *    by, at most, the lesser of 1) the total size of the
423 		 *    previously outstanding DATA chunk(s) acknowledged, and
424 		 *    2) the destination's path MTU.  This upper bound protects
425 		 *    against the ACK-Splitting attack outlined in [SAVAGE99].
426 		 */
427 		if (asoc->fast_recovery)
428 			return;
429 
430 		if (bytes_acked > pmtu)
431 			cwnd += pmtu;
432 		else
433 			cwnd += bytes_acked;
434 
435 		pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
436 			 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
437 			 __func__, transport, bytes_acked, cwnd, ssthresh,
438 			 flight_size, pba);
439 	} else {
440 		/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
441 		 * upon each SACK arrival that advances the Cumulative TSN Ack
442 		 * Point, increase partial_bytes_acked by the total number of
443 		 * bytes of all new chunks acknowledged in that SACK including
444 		 * chunks acknowledged by the new Cumulative TSN Ack and by
445 		 * Gap Ack Blocks.
446 		 *
447 		 * When partial_bytes_acked is equal to or greater than cwnd
448 		 * and before the arrival of the SACK the sender had cwnd or
449 		 * more bytes of data outstanding (i.e., before arrival of the
450 		 * SACK, flightsize was greater than or equal to cwnd),
451 		 * increase cwnd by MTU, and reset partial_bytes_acked to
452 		 * (partial_bytes_acked - cwnd).
453 		 */
454 		pba += bytes_acked;
455 		if (pba >= cwnd) {
456 			cwnd += pmtu;
457 			pba = ((cwnd < pba) ? (pba - cwnd) : 0);
458 		}
459 
460 		pr_debug("%s: congestion avoidance: transport:%p, "
461 			 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
462 			 "flight_size:%d, pba:%d\n", __func__,
463 			 transport, bytes_acked, cwnd, ssthresh,
464 			 flight_size, pba);
465 	}
466 
467 	transport->cwnd = cwnd;
468 	transport->partial_bytes_acked = pba;
469 }
470 
471 /* This routine is used to lower the transport's cwnd when congestion is
472  * detected.
473  */
474 void sctp_transport_lower_cwnd(struct sctp_transport *transport,
475 			       sctp_lower_cwnd_t reason)
476 {
477 	struct sctp_association *asoc = transport->asoc;
478 
479 	switch (reason) {
480 	case SCTP_LOWER_CWND_T3_RTX:
481 		/* RFC 2960 Section 7.2.3, sctpimpguide
482 		 * When the T3-rtx timer expires on an address, SCTP should
483 		 * perform slow start by:
484 		 *      ssthresh = max(cwnd/2, 4*MTU)
485 		 *      cwnd = 1*MTU
486 		 *      partial_bytes_acked = 0
487 		 */
488 		transport->ssthresh = max(transport->cwnd/2,
489 					  4*asoc->pathmtu);
490 		transport->cwnd = asoc->pathmtu;
491 
492 		/* T3-rtx also clears fast recovery */
493 		asoc->fast_recovery = 0;
494 		break;
495 
496 	case SCTP_LOWER_CWND_FAST_RTX:
497 		/* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
498 		 * destination address(es) to which the missing DATA chunks
499 		 * were last sent, according to the formula described in
500 		 * Section 7.2.3.
501 		 *
502 		 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
503 		 * losses from SACK (see Section 7.2.4), An endpoint
504 		 * should do the following:
505 		 *      ssthresh = max(cwnd/2, 4*MTU)
506 		 *      cwnd = ssthresh
507 		 *      partial_bytes_acked = 0
508 		 */
509 		if (asoc->fast_recovery)
510 			return;
511 
512 		/* Mark Fast recovery */
513 		asoc->fast_recovery = 1;
514 		asoc->fast_recovery_exit = asoc->next_tsn - 1;
515 
516 		transport->ssthresh = max(transport->cwnd/2,
517 					  4*asoc->pathmtu);
518 		transport->cwnd = transport->ssthresh;
519 		break;
520 
521 	case SCTP_LOWER_CWND_ECNE:
522 		/* RFC 2481 Section 6.1.2.
523 		 * If the sender receives an ECN-Echo ACK packet
524 		 * then the sender knows that congestion was encountered in the
525 		 * network on the path from the sender to the receiver. The
526 		 * indication of congestion should be treated just as a
527 		 * congestion loss in non-ECN Capable TCP. That is, the TCP
528 		 * source halves the congestion window "cwnd" and reduces the
529 		 * slow start threshold "ssthresh".
530 		 * A critical condition is that TCP does not react to
531 		 * congestion indications more than once every window of
532 		 * data (or more loosely more than once every round-trip time).
533 		 */
534 		if (time_after(jiffies, transport->last_time_ecne_reduced +
535 					transport->rtt)) {
536 			transport->ssthresh = max(transport->cwnd/2,
537 						  4*asoc->pathmtu);
538 			transport->cwnd = transport->ssthresh;
539 			transport->last_time_ecne_reduced = jiffies;
540 		}
541 		break;
542 
543 	case SCTP_LOWER_CWND_INACTIVE:
544 		/* RFC 2960 Section 7.2.1, sctpimpguide
545 		 * When the endpoint does not transmit data on a given
546 		 * transport address, the cwnd of the transport address
547 		 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
548 		 * NOTE: Although the draft recommends that this check needs
549 		 * to be done every RTO interval, we do it every hearbeat
550 		 * interval.
551 		 */
552 		transport->cwnd = max(transport->cwnd/2,
553 					 4*asoc->pathmtu);
554 		break;
555 	}
556 
557 	transport->partial_bytes_acked = 0;
558 
559 	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
560 		 __func__, transport, reason, transport->cwnd,
561 		 transport->ssthresh);
562 }
563 
564 /* Apply Max.Burst limit to the congestion window:
565  * sctpimpguide-05 2.14.2
566  * D) When the time comes for the sender to
567  * transmit new DATA chunks, the protocol parameter Max.Burst MUST
568  * first be applied to limit how many new DATA chunks may be sent.
569  * The limit is applied by adjusting cwnd as follows:
570  * 	if ((flightsize+ Max.Burst * MTU) < cwnd)
571  * 		cwnd = flightsize + Max.Burst * MTU
572  */
573 
574 void sctp_transport_burst_limited(struct sctp_transport *t)
575 {
576 	struct sctp_association *asoc = t->asoc;
577 	u32 old_cwnd = t->cwnd;
578 	u32 max_burst_bytes;
579 
580 	if (t->burst_limited || asoc->max_burst == 0)
581 		return;
582 
583 	max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
584 	if (max_burst_bytes < old_cwnd) {
585 		t->cwnd = max_burst_bytes;
586 		t->burst_limited = old_cwnd;
587 	}
588 }
589 
590 /* Restore the old cwnd congestion window, after the burst had it's
591  * desired effect.
592  */
593 void sctp_transport_burst_reset(struct sctp_transport *t)
594 {
595 	if (t->burst_limited) {
596 		t->cwnd = t->burst_limited;
597 		t->burst_limited = 0;
598 	}
599 }
600 
601 /* What is the next timeout value for this transport? */
602 unsigned long sctp_transport_timeout(struct sctp_transport *trans)
603 {
604 	/* RTO + timer slack +/- 50% of RTO */
605 	unsigned long timeout = trans->rto >> 1;
606 
607 	if (trans->state != SCTP_UNCONFIRMED &&
608 	    trans->state != SCTP_PF)
609 		timeout += trans->hbinterval;
610 
611 	return timeout;
612 }
613 
614 /* Reset transport variables to their initial values */
615 void sctp_transport_reset(struct sctp_transport *t)
616 {
617 	struct sctp_association *asoc = t->asoc;
618 
619 	/* RFC 2960 (bis), Section 5.2.4
620 	 * All the congestion control parameters (e.g., cwnd, ssthresh)
621 	 * related to this peer MUST be reset to their initial values
622 	 * (see Section 6.2.1)
623 	 */
624 	t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
625 	t->burst_limited = 0;
626 	t->ssthresh = asoc->peer.i.a_rwnd;
627 	t->rto = asoc->rto_initial;
628 	sctp_max_rto(asoc, t);
629 	t->rtt = 0;
630 	t->srtt = 0;
631 	t->rttvar = 0;
632 
633 	/* Reset these additional varibles so that we have a clean
634 	 * slate.
635 	 */
636 	t->partial_bytes_acked = 0;
637 	t->flight_size = 0;
638 	t->error_count = 0;
639 	t->rto_pending = 0;
640 	t->hb_sent = 0;
641 
642 	/* Initialize the state information for SFR-CACC */
643 	t->cacc.changeover_active = 0;
644 	t->cacc.cycling_changeover = 0;
645 	t->cacc.next_tsn_at_change = 0;
646 	t->cacc.cacc_saw_newack = 0;
647 }
648 
649 /* Schedule retransmission on the given transport */
650 void sctp_transport_immediate_rtx(struct sctp_transport *t)
651 {
652 	/* Stop pending T3_rtx_timer */
653 	if (del_timer(&t->T3_rtx_timer))
654 		sctp_transport_put(t);
655 
656 	sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
657 	if (!timer_pending(&t->T3_rtx_timer)) {
658 		if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
659 			sctp_transport_hold(t);
660 	}
661 }
662