1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 International Business Machines Corp. 6 * Copyright (c) 2001 Intel Corp. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * This module provides the abstraction for an SCTP transport representing 12 * a remote transport address. For local transport addresses, we just use 13 * union sctp_addr. 14 * 15 * Please send any bug reports or fixes you make to the 16 * email address(es): 17 * lksctp developers <linux-sctp@vger.kernel.org> 18 * 19 * Written or modified by: 20 * La Monte H.P. Yarroll <piggy@acm.org> 21 * Karl Knutson <karl@athena.chicago.il.us> 22 * Jon Grimm <jgrimm@us.ibm.com> 23 * Xingang Guo <xingang.guo@intel.com> 24 * Hui Huang <hui.huang@nokia.com> 25 * Sridhar Samudrala <sri@us.ibm.com> 26 * Ardelle Fan <ardelle.fan@intel.com> 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 #include <linux/random.h> 34 #include <net/sctp/sctp.h> 35 #include <net/sctp/sm.h> 36 37 /* 1st Level Abstractions. */ 38 39 /* Initialize a new transport from provided memory. */ 40 static struct sctp_transport *sctp_transport_init(struct net *net, 41 struct sctp_transport *peer, 42 const union sctp_addr *addr, 43 gfp_t gfp) 44 { 45 /* Copy in the address. */ 46 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); 47 memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len); 48 memset(&peer->saddr, 0, sizeof(union sctp_addr)); 49 50 peer->sack_generation = 0; 51 52 /* From 6.3.1 RTO Calculation: 53 * 54 * C1) Until an RTT measurement has been made for a packet sent to the 55 * given destination transport address, set RTO to the protocol 56 * parameter 'RTO.Initial'. 57 */ 58 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); 59 60 peer->last_time_heard = 0; 61 peer->last_time_ecne_reduced = jiffies; 62 63 peer->param_flags = SPP_HB_DISABLE | 64 SPP_PMTUD_ENABLE | 65 SPP_SACKDELAY_ENABLE; 66 67 /* Initialize the default path max_retrans. */ 68 peer->pathmaxrxt = net->sctp.max_retrans_path; 69 peer->pf_retrans = net->sctp.pf_retrans; 70 71 INIT_LIST_HEAD(&peer->transmitted); 72 INIT_LIST_HEAD(&peer->send_ready); 73 INIT_LIST_HEAD(&peer->transports); 74 75 timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0); 76 timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0); 77 timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0); 78 timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0); 79 timer_setup(&peer->proto_unreach_timer, 80 sctp_generate_proto_unreach_event, 0); 81 82 /* Initialize the 64-bit random nonce sent with heartbeat. */ 83 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); 84 85 refcount_set(&peer->refcnt, 1); 86 87 return peer; 88 } 89 90 /* Allocate and initialize a new transport. */ 91 struct sctp_transport *sctp_transport_new(struct net *net, 92 const union sctp_addr *addr, 93 gfp_t gfp) 94 { 95 struct sctp_transport *transport; 96 97 transport = kzalloc(sizeof(*transport), gfp); 98 if (!transport) 99 goto fail; 100 101 if (!sctp_transport_init(net, transport, addr, gfp)) 102 goto fail_init; 103 104 SCTP_DBG_OBJCNT_INC(transport); 105 106 return transport; 107 108 fail_init: 109 kfree(transport); 110 111 fail: 112 return NULL; 113 } 114 115 /* This transport is no longer needed. Free up if possible, or 116 * delay until it last reference count. 117 */ 118 void sctp_transport_free(struct sctp_transport *transport) 119 { 120 /* Try to delete the heartbeat timer. */ 121 if (del_timer(&transport->hb_timer)) 122 sctp_transport_put(transport); 123 124 /* Delete the T3_rtx timer if it's active. 125 * There is no point in not doing this now and letting 126 * structure hang around in memory since we know 127 * the transport is going away. 128 */ 129 if (del_timer(&transport->T3_rtx_timer)) 130 sctp_transport_put(transport); 131 132 if (del_timer(&transport->reconf_timer)) 133 sctp_transport_put(transport); 134 135 if (del_timer(&transport->probe_timer)) 136 sctp_transport_put(transport); 137 138 /* Delete the ICMP proto unreachable timer if it's active. */ 139 if (del_timer(&transport->proto_unreach_timer)) 140 sctp_transport_put(transport); 141 142 sctp_transport_put(transport); 143 } 144 145 static void sctp_transport_destroy_rcu(struct rcu_head *head) 146 { 147 struct sctp_transport *transport; 148 149 transport = container_of(head, struct sctp_transport, rcu); 150 151 dst_release(transport->dst); 152 kfree(transport); 153 SCTP_DBG_OBJCNT_DEC(transport); 154 } 155 156 /* Destroy the transport data structure. 157 * Assumes there are no more users of this structure. 158 */ 159 static void sctp_transport_destroy(struct sctp_transport *transport) 160 { 161 if (unlikely(refcount_read(&transport->refcnt))) { 162 WARN(1, "Attempt to destroy undead transport %p!\n", transport); 163 return; 164 } 165 166 sctp_packet_free(&transport->packet); 167 168 if (transport->asoc) 169 sctp_association_put(transport->asoc); 170 171 call_rcu(&transport->rcu, sctp_transport_destroy_rcu); 172 } 173 174 /* Start T3_rtx timer if it is not already running and update the heartbeat 175 * timer. This routine is called every time a DATA chunk is sent. 176 */ 177 void sctp_transport_reset_t3_rtx(struct sctp_transport *transport) 178 { 179 /* RFC 2960 6.3.2 Retransmission Timer Rules 180 * 181 * R1) Every time a DATA chunk is sent to any address(including a 182 * retransmission), if the T3-rtx timer of that address is not running 183 * start it running so that it will expire after the RTO of that 184 * address. 185 */ 186 187 if (!timer_pending(&transport->T3_rtx_timer)) 188 if (!mod_timer(&transport->T3_rtx_timer, 189 jiffies + transport->rto)) 190 sctp_transport_hold(transport); 191 } 192 193 void sctp_transport_reset_hb_timer(struct sctp_transport *transport) 194 { 195 unsigned long expires; 196 197 /* When a data chunk is sent, reset the heartbeat interval. */ 198 expires = jiffies + sctp_transport_timeout(transport); 199 if ((time_before(transport->hb_timer.expires, expires) || 200 !timer_pending(&transport->hb_timer)) && 201 !mod_timer(&transport->hb_timer, 202 expires + prandom_u32_max(transport->rto))) 203 sctp_transport_hold(transport); 204 } 205 206 void sctp_transport_reset_reconf_timer(struct sctp_transport *transport) 207 { 208 if (!timer_pending(&transport->reconf_timer)) 209 if (!mod_timer(&transport->reconf_timer, 210 jiffies + transport->rto)) 211 sctp_transport_hold(transport); 212 } 213 214 void sctp_transport_reset_probe_timer(struct sctp_transport *transport) 215 { 216 if (timer_pending(&transport->probe_timer)) 217 return; 218 if (!mod_timer(&transport->probe_timer, 219 jiffies + transport->probe_interval)) 220 sctp_transport_hold(transport); 221 } 222 223 /* This transport has been assigned to an association. 224 * Initialize fields from the association or from the sock itself. 225 * Register the reference count in the association. 226 */ 227 void sctp_transport_set_owner(struct sctp_transport *transport, 228 struct sctp_association *asoc) 229 { 230 transport->asoc = asoc; 231 sctp_association_hold(asoc); 232 } 233 234 /* Initialize the pmtu of a transport. */ 235 void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) 236 { 237 /* If we don't have a fresh route, look one up */ 238 if (!transport->dst || transport->dst->obsolete) { 239 sctp_transport_dst_release(transport); 240 transport->af_specific->get_dst(transport, &transport->saddr, 241 &transport->fl, sk); 242 } 243 244 if (transport->param_flags & SPP_PMTUD_DISABLE) { 245 struct sctp_association *asoc = transport->asoc; 246 247 if (!transport->pathmtu && asoc && asoc->pathmtu) 248 transport->pathmtu = asoc->pathmtu; 249 if (transport->pathmtu) 250 return; 251 } 252 253 if (transport->dst) 254 transport->pathmtu = sctp_dst_mtu(transport->dst); 255 else 256 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 257 258 sctp_transport_pl_update(transport); 259 } 260 261 bool sctp_transport_pl_send(struct sctp_transport *t) 262 { 263 if (t->pl.probe_count < SCTP_MAX_PROBES) 264 goto out; 265 266 t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks; 267 t->pl.probe_count = 0; 268 if (t->pl.state == SCTP_PL_BASE) { 269 if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ 270 t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ 271 272 t->pl.pmtu = SCTP_MIN_PLPMTU; 273 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 274 sctp_assoc_sync_pmtu(t->asoc); 275 } 276 } else if (t->pl.state == SCTP_PL_SEARCH) { 277 if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ 278 t->pl.state = SCTP_PL_BASE; /* Search -> Base */ 279 t->pl.probe_size = SCTP_BASE_PLPMTU; 280 t->pl.probe_high = 0; 281 282 t->pl.pmtu = SCTP_BASE_PLPMTU; 283 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 284 sctp_assoc_sync_pmtu(t->asoc); 285 } else { /* Normal probe failure. */ 286 t->pl.probe_high = t->pl.probe_size; 287 t->pl.probe_size = t->pl.pmtu; 288 } 289 } else if (t->pl.state == SCTP_PL_COMPLETE) { 290 if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ 291 t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */ 292 t->pl.probe_size = SCTP_BASE_PLPMTU; 293 294 t->pl.pmtu = SCTP_BASE_PLPMTU; 295 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 296 sctp_assoc_sync_pmtu(t->asoc); 297 } 298 } 299 300 out: 301 if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 && 302 !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) { 303 t->pl.raise_count++; 304 return false; 305 } 306 307 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", 308 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); 309 310 t->pl.probe_count++; 311 return true; 312 } 313 314 bool sctp_transport_pl_recv(struct sctp_transport *t) 315 { 316 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", 317 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); 318 319 t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks; 320 t->pl.pmtu = t->pl.probe_size; 321 t->pl.probe_count = 0; 322 if (t->pl.state == SCTP_PL_BASE) { 323 t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */ 324 t->pl.probe_size += SCTP_PL_BIG_STEP; 325 } else if (t->pl.state == SCTP_PL_ERROR) { 326 t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */ 327 328 t->pl.pmtu = t->pl.probe_size; 329 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 330 sctp_assoc_sync_pmtu(t->asoc); 331 t->pl.probe_size += SCTP_PL_BIG_STEP; 332 } else if (t->pl.state == SCTP_PL_SEARCH) { 333 if (!t->pl.probe_high) { 334 t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, 335 SCTP_MAX_PLPMTU); 336 return false; 337 } 338 t->pl.probe_size += SCTP_PL_MIN_STEP; 339 if (t->pl.probe_size >= t->pl.probe_high) { 340 t->pl.probe_high = 0; 341 t->pl.raise_count = 0; 342 t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */ 343 344 t->pl.probe_size = t->pl.pmtu; 345 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 346 sctp_assoc_sync_pmtu(t->asoc); 347 } 348 } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) { 349 /* Raise probe_size again after 30 * interval in Search Complete */ 350 t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ 351 t->pl.probe_size += SCTP_PL_MIN_STEP; 352 } 353 354 return t->pl.state == SCTP_PL_COMPLETE; 355 } 356 357 static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) 358 { 359 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n", 360 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu); 361 362 if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size) 363 return false; 364 365 if (t->pl.state == SCTP_PL_BASE) { 366 if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) { 367 t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ 368 369 t->pl.pmtu = SCTP_MIN_PLPMTU; 370 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 371 } 372 } else if (t->pl.state == SCTP_PL_SEARCH) { 373 if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { 374 t->pl.state = SCTP_PL_BASE; /* Search -> Base */ 375 t->pl.probe_size = SCTP_BASE_PLPMTU; 376 t->pl.probe_count = 0; 377 378 t->pl.probe_high = 0; 379 t->pl.pmtu = SCTP_BASE_PLPMTU; 380 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 381 } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) { 382 t->pl.probe_size = pmtu; 383 t->pl.probe_count = 0; 384 385 return false; 386 } 387 } else if (t->pl.state == SCTP_PL_COMPLETE) { 388 if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { 389 t->pl.state = SCTP_PL_BASE; /* Complete -> Base */ 390 t->pl.probe_size = SCTP_BASE_PLPMTU; 391 t->pl.probe_count = 0; 392 393 t->pl.probe_high = 0; 394 t->pl.pmtu = SCTP_BASE_PLPMTU; 395 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 396 } 397 } 398 399 return true; 400 } 401 402 bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 403 { 404 struct sock *sk = t->asoc->base.sk; 405 struct dst_entry *dst; 406 bool change = true; 407 408 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 409 pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n", 410 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); 411 /* Use default minimum segment instead */ 412 pmtu = SCTP_DEFAULT_MINSEGMENT; 413 } 414 pmtu = SCTP_TRUNC4(pmtu); 415 416 if (sctp_transport_pl_enabled(t)) 417 return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t)); 418 419 dst = sctp_transport_dst_check(t); 420 if (dst) { 421 struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family); 422 union sctp_addr addr; 423 424 pf->af->from_sk(&addr, sk); 425 pf->to_sk_daddr(&t->ipaddr, sk); 426 dst->ops->update_pmtu(dst, sk, NULL, pmtu, true); 427 pf->to_sk_daddr(&addr, sk); 428 429 dst = sctp_transport_dst_check(t); 430 } 431 432 if (!dst) { 433 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); 434 dst = t->dst; 435 } 436 437 if (dst) { 438 /* Re-fetch, as under layers may have a higher minimum size */ 439 pmtu = sctp_dst_mtu(dst); 440 change = t->pathmtu != pmtu; 441 } 442 t->pathmtu = pmtu; 443 444 return change; 445 } 446 447 /* Caches the dst entry and source address for a transport's destination 448 * address. 449 */ 450 void sctp_transport_route(struct sctp_transport *transport, 451 union sctp_addr *saddr, struct sctp_sock *opt) 452 { 453 struct sctp_association *asoc = transport->asoc; 454 struct sctp_af *af = transport->af_specific; 455 456 sctp_transport_dst_release(transport); 457 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); 458 459 if (saddr) 460 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 461 else 462 af->get_saddr(opt, transport, &transport->fl); 463 464 sctp_transport_pmtu(transport, sctp_opt2sk(opt)); 465 466 /* Initialize sk->sk_rcv_saddr, if the transport is the 467 * association's active path for getsockname(). 468 */ 469 if (transport->dst && asoc && 470 (!asoc->peer.primary_path || transport == asoc->peer.active_path)) 471 opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk); 472 } 473 474 /* Hold a reference to a transport. */ 475 int sctp_transport_hold(struct sctp_transport *transport) 476 { 477 return refcount_inc_not_zero(&transport->refcnt); 478 } 479 480 /* Release a reference to a transport and clean up 481 * if there are no more references. 482 */ 483 void sctp_transport_put(struct sctp_transport *transport) 484 { 485 if (refcount_dec_and_test(&transport->refcnt)) 486 sctp_transport_destroy(transport); 487 } 488 489 /* Update transport's RTO based on the newly calculated RTT. */ 490 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) 491 { 492 if (unlikely(!tp->rto_pending)) 493 /* We should not be doing any RTO updates unless rto_pending is set. */ 494 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); 495 496 if (tp->rttvar || tp->srtt) { 497 struct net *net = tp->asoc->base.net; 498 /* 6.3.1 C3) When a new RTT measurement R' is made, set 499 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| 500 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' 501 */ 502 503 /* Note: The above algorithm has been rewritten to 504 * express rto_beta and rto_alpha as inverse powers 505 * of two. 506 * For example, assuming the default value of RTO.Alpha of 507 * 1/8, rto_alpha would be expressed as 3. 508 */ 509 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) 510 + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); 511 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) 512 + (rtt >> net->sctp.rto_alpha); 513 } else { 514 /* 6.3.1 C2) When the first RTT measurement R is made, set 515 * SRTT <- R, RTTVAR <- R/2. 516 */ 517 tp->srtt = rtt; 518 tp->rttvar = rtt >> 1; 519 } 520 521 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then 522 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. 523 */ 524 if (tp->rttvar == 0) 525 tp->rttvar = SCTP_CLOCK_GRANULARITY; 526 527 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ 528 tp->rto = tp->srtt + (tp->rttvar << 2); 529 530 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min 531 * seconds then it is rounded up to RTO.Min seconds. 532 */ 533 if (tp->rto < tp->asoc->rto_min) 534 tp->rto = tp->asoc->rto_min; 535 536 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is 537 * at least RTO.max seconds. 538 */ 539 if (tp->rto > tp->asoc->rto_max) 540 tp->rto = tp->asoc->rto_max; 541 542 sctp_max_rto(tp->asoc, tp); 543 tp->rtt = rtt; 544 545 /* Reset rto_pending so that a new RTT measurement is started when a 546 * new data chunk is sent. 547 */ 548 tp->rto_pending = 0; 549 550 pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n", 551 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); 552 } 553 554 /* This routine updates the transport's cwnd and partial_bytes_acked 555 * parameters based on the bytes acked in the received SACK. 556 */ 557 void sctp_transport_raise_cwnd(struct sctp_transport *transport, 558 __u32 sack_ctsn, __u32 bytes_acked) 559 { 560 struct sctp_association *asoc = transport->asoc; 561 __u32 cwnd, ssthresh, flight_size, pba, pmtu; 562 563 cwnd = transport->cwnd; 564 flight_size = transport->flight_size; 565 566 /* See if we need to exit Fast Recovery first */ 567 if (asoc->fast_recovery && 568 TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) 569 asoc->fast_recovery = 0; 570 571 ssthresh = transport->ssthresh; 572 pba = transport->partial_bytes_acked; 573 pmtu = transport->asoc->pathmtu; 574 575 if (cwnd <= ssthresh) { 576 /* RFC 4960 7.2.1 577 * o When cwnd is less than or equal to ssthresh, an SCTP 578 * endpoint MUST use the slow-start algorithm to increase 579 * cwnd only if the current congestion window is being fully 580 * utilized, an incoming SACK advances the Cumulative TSN 581 * Ack Point, and the data sender is not in Fast Recovery. 582 * Only when these three conditions are met can the cwnd be 583 * increased; otherwise, the cwnd MUST not be increased. 584 * If these conditions are met, then cwnd MUST be increased 585 * by, at most, the lesser of 1) the total size of the 586 * previously outstanding DATA chunk(s) acknowledged, and 587 * 2) the destination's path MTU. This upper bound protects 588 * against the ACK-Splitting attack outlined in [SAVAGE99]. 589 */ 590 if (asoc->fast_recovery) 591 return; 592 593 /* The appropriate cwnd increase algorithm is performed 594 * if, and only if the congestion window is being fully 595 * utilized. Note that RFC4960 Errata 3.22 removed the 596 * other condition on ctsn moving. 597 */ 598 if (flight_size < cwnd) 599 return; 600 601 if (bytes_acked > pmtu) 602 cwnd += pmtu; 603 else 604 cwnd += bytes_acked; 605 606 pr_debug("%s: slow start: transport:%p, bytes_acked:%d, " 607 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n", 608 __func__, transport, bytes_acked, cwnd, ssthresh, 609 flight_size, pba); 610 } else { 611 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, 612 * upon each SACK arrival, increase partial_bytes_acked 613 * by the total number of bytes of all new chunks 614 * acknowledged in that SACK including chunks 615 * acknowledged by the new Cumulative TSN Ack and by Gap 616 * Ack Blocks. (updated by RFC4960 Errata 3.22) 617 * 618 * When partial_bytes_acked is greater than cwnd and 619 * before the arrival of the SACK the sender had less 620 * bytes of data outstanding than cwnd (i.e., before 621 * arrival of the SACK, flightsize was less than cwnd), 622 * reset partial_bytes_acked to cwnd. (RFC 4960 Errata 623 * 3.26) 624 * 625 * When partial_bytes_acked is equal to or greater than 626 * cwnd and before the arrival of the SACK the sender 627 * had cwnd or more bytes of data outstanding (i.e., 628 * before arrival of the SACK, flightsize was greater 629 * than or equal to cwnd), partial_bytes_acked is reset 630 * to (partial_bytes_acked - cwnd). Next, cwnd is 631 * increased by MTU. (RFC 4960 Errata 3.12) 632 */ 633 pba += bytes_acked; 634 if (pba > cwnd && flight_size < cwnd) 635 pba = cwnd; 636 if (pba >= cwnd && flight_size >= cwnd) { 637 pba = pba - cwnd; 638 cwnd += pmtu; 639 } 640 641 pr_debug("%s: congestion avoidance: transport:%p, " 642 "bytes_acked:%d, cwnd:%d, ssthresh:%d, " 643 "flight_size:%d, pba:%d\n", __func__, 644 transport, bytes_acked, cwnd, ssthresh, 645 flight_size, pba); 646 } 647 648 transport->cwnd = cwnd; 649 transport->partial_bytes_acked = pba; 650 } 651 652 /* This routine is used to lower the transport's cwnd when congestion is 653 * detected. 654 */ 655 void sctp_transport_lower_cwnd(struct sctp_transport *transport, 656 enum sctp_lower_cwnd reason) 657 { 658 struct sctp_association *asoc = transport->asoc; 659 660 switch (reason) { 661 case SCTP_LOWER_CWND_T3_RTX: 662 /* RFC 2960 Section 7.2.3, sctpimpguide 663 * When the T3-rtx timer expires on an address, SCTP should 664 * perform slow start by: 665 * ssthresh = max(cwnd/2, 4*MTU) 666 * cwnd = 1*MTU 667 * partial_bytes_acked = 0 668 */ 669 transport->ssthresh = max(transport->cwnd/2, 670 4*asoc->pathmtu); 671 transport->cwnd = asoc->pathmtu; 672 673 /* T3-rtx also clears fast recovery */ 674 asoc->fast_recovery = 0; 675 break; 676 677 case SCTP_LOWER_CWND_FAST_RTX: 678 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the 679 * destination address(es) to which the missing DATA chunks 680 * were last sent, according to the formula described in 681 * Section 7.2.3. 682 * 683 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet 684 * losses from SACK (see Section 7.2.4), An endpoint 685 * should do the following: 686 * ssthresh = max(cwnd/2, 4*MTU) 687 * cwnd = ssthresh 688 * partial_bytes_acked = 0 689 */ 690 if (asoc->fast_recovery) 691 return; 692 693 /* Mark Fast recovery */ 694 asoc->fast_recovery = 1; 695 asoc->fast_recovery_exit = asoc->next_tsn - 1; 696 697 transport->ssthresh = max(transport->cwnd/2, 698 4*asoc->pathmtu); 699 transport->cwnd = transport->ssthresh; 700 break; 701 702 case SCTP_LOWER_CWND_ECNE: 703 /* RFC 2481 Section 6.1.2. 704 * If the sender receives an ECN-Echo ACK packet 705 * then the sender knows that congestion was encountered in the 706 * network on the path from the sender to the receiver. The 707 * indication of congestion should be treated just as a 708 * congestion loss in non-ECN Capable TCP. That is, the TCP 709 * source halves the congestion window "cwnd" and reduces the 710 * slow start threshold "ssthresh". 711 * A critical condition is that TCP does not react to 712 * congestion indications more than once every window of 713 * data (or more loosely more than once every round-trip time). 714 */ 715 if (time_after(jiffies, transport->last_time_ecne_reduced + 716 transport->rtt)) { 717 transport->ssthresh = max(transport->cwnd/2, 718 4*asoc->pathmtu); 719 transport->cwnd = transport->ssthresh; 720 transport->last_time_ecne_reduced = jiffies; 721 } 722 break; 723 724 case SCTP_LOWER_CWND_INACTIVE: 725 /* RFC 2960 Section 7.2.1, sctpimpguide 726 * When the endpoint does not transmit data on a given 727 * transport address, the cwnd of the transport address 728 * should be adjusted to max(cwnd/2, 4*MTU) per RTO. 729 * NOTE: Although the draft recommends that this check needs 730 * to be done every RTO interval, we do it every hearbeat 731 * interval. 732 */ 733 transport->cwnd = max(transport->cwnd/2, 734 4*asoc->pathmtu); 735 /* RFC 4960 Errata 3.27.2: also adjust sshthresh */ 736 transport->ssthresh = transport->cwnd; 737 break; 738 } 739 740 transport->partial_bytes_acked = 0; 741 742 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n", 743 __func__, transport, reason, transport->cwnd, 744 transport->ssthresh); 745 } 746 747 /* Apply Max.Burst limit to the congestion window: 748 * sctpimpguide-05 2.14.2 749 * D) When the time comes for the sender to 750 * transmit new DATA chunks, the protocol parameter Max.Burst MUST 751 * first be applied to limit how many new DATA chunks may be sent. 752 * The limit is applied by adjusting cwnd as follows: 753 * if ((flightsize+ Max.Burst * MTU) < cwnd) 754 * cwnd = flightsize + Max.Burst * MTU 755 */ 756 757 void sctp_transport_burst_limited(struct sctp_transport *t) 758 { 759 struct sctp_association *asoc = t->asoc; 760 u32 old_cwnd = t->cwnd; 761 u32 max_burst_bytes; 762 763 if (t->burst_limited || asoc->max_burst == 0) 764 return; 765 766 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); 767 if (max_burst_bytes < old_cwnd) { 768 t->cwnd = max_burst_bytes; 769 t->burst_limited = old_cwnd; 770 } 771 } 772 773 /* Restore the old cwnd congestion window, after the burst had it's 774 * desired effect. 775 */ 776 void sctp_transport_burst_reset(struct sctp_transport *t) 777 { 778 if (t->burst_limited) { 779 t->cwnd = t->burst_limited; 780 t->burst_limited = 0; 781 } 782 } 783 784 /* What is the next timeout value for this transport? */ 785 unsigned long sctp_transport_timeout(struct sctp_transport *trans) 786 { 787 /* RTO + timer slack +/- 50% of RTO */ 788 unsigned long timeout = trans->rto >> 1; 789 790 if (trans->state != SCTP_UNCONFIRMED && 791 trans->state != SCTP_PF) 792 timeout += trans->hbinterval; 793 794 return max_t(unsigned long, timeout, HZ / 5); 795 } 796 797 /* Reset transport variables to their initial values */ 798 void sctp_transport_reset(struct sctp_transport *t) 799 { 800 struct sctp_association *asoc = t->asoc; 801 802 /* RFC 2960 (bis), Section 5.2.4 803 * All the congestion control parameters (e.g., cwnd, ssthresh) 804 * related to this peer MUST be reset to their initial values 805 * (see Section 6.2.1) 806 */ 807 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 808 t->burst_limited = 0; 809 t->ssthresh = asoc->peer.i.a_rwnd; 810 t->rto = asoc->rto_initial; 811 sctp_max_rto(asoc, t); 812 t->rtt = 0; 813 t->srtt = 0; 814 t->rttvar = 0; 815 816 /* Reset these additional variables so that we have a clean slate. */ 817 t->partial_bytes_acked = 0; 818 t->flight_size = 0; 819 t->error_count = 0; 820 t->rto_pending = 0; 821 t->hb_sent = 0; 822 823 /* Initialize the state information for SFR-CACC */ 824 t->cacc.changeover_active = 0; 825 t->cacc.cycling_changeover = 0; 826 t->cacc.next_tsn_at_change = 0; 827 t->cacc.cacc_saw_newack = 0; 828 } 829 830 /* Schedule retransmission on the given transport */ 831 void sctp_transport_immediate_rtx(struct sctp_transport *t) 832 { 833 /* Stop pending T3_rtx_timer */ 834 if (del_timer(&t->T3_rtx_timer)) 835 sctp_transport_put(t); 836 837 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); 838 if (!timer_pending(&t->T3_rtx_timer)) { 839 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) 840 sctp_transport_hold(t); 841 } 842 } 843 844 /* Drop dst */ 845 void sctp_transport_dst_release(struct sctp_transport *t) 846 { 847 dst_release(t->dst); 848 t->dst = NULL; 849 t->dst_pending_confirm = 0; 850 } 851 852 /* Schedule neighbour confirm */ 853 void sctp_transport_dst_confirm(struct sctp_transport *t) 854 { 855 t->dst_pending_confirm = 1; 856 } 857