1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 International Business Machines Corp. 6 * Copyright (c) 2001 Intel Corp. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * This module provides the abstraction for an SCTP transport representing 12 * a remote transport address. For local transport addresses, we just use 13 * union sctp_addr. 14 * 15 * Please send any bug reports or fixes you make to the 16 * email address(es): 17 * lksctp developers <linux-sctp@vger.kernel.org> 18 * 19 * Written or modified by: 20 * La Monte H.P. Yarroll <piggy@acm.org> 21 * Karl Knutson <karl@athena.chicago.il.us> 22 * Jon Grimm <jgrimm@us.ibm.com> 23 * Xingang Guo <xingang.guo@intel.com> 24 * Hui Huang <hui.huang@nokia.com> 25 * Sridhar Samudrala <sri@us.ibm.com> 26 * Ardelle Fan <ardelle.fan@intel.com> 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 #include <linux/random.h> 34 #include <net/sctp/sctp.h> 35 #include <net/sctp/sm.h> 36 37 /* 1st Level Abstractions. */ 38 39 /* Initialize a new transport from provided memory. */ 40 static struct sctp_transport *sctp_transport_init(struct net *net, 41 struct sctp_transport *peer, 42 const union sctp_addr *addr, 43 gfp_t gfp) 44 { 45 /* Copy in the address. */ 46 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); 47 memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len); 48 memset(&peer->saddr, 0, sizeof(union sctp_addr)); 49 50 peer->sack_generation = 0; 51 52 /* From 6.3.1 RTO Calculation: 53 * 54 * C1) Until an RTT measurement has been made for a packet sent to the 55 * given destination transport address, set RTO to the protocol 56 * parameter 'RTO.Initial'. 57 */ 58 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); 59 60 peer->last_time_heard = 0; 61 peer->last_time_ecne_reduced = jiffies; 62 63 peer->param_flags = SPP_HB_DISABLE | 64 SPP_PMTUD_ENABLE | 65 SPP_SACKDELAY_ENABLE; 66 67 /* Initialize the default path max_retrans. */ 68 peer->pathmaxrxt = net->sctp.max_retrans_path; 69 peer->pf_retrans = net->sctp.pf_retrans; 70 71 INIT_LIST_HEAD(&peer->transmitted); 72 INIT_LIST_HEAD(&peer->send_ready); 73 INIT_LIST_HEAD(&peer->transports); 74 75 timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0); 76 timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0); 77 timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0); 78 timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0); 79 timer_setup(&peer->proto_unreach_timer, 80 sctp_generate_proto_unreach_event, 0); 81 82 /* Initialize the 64-bit random nonce sent with heartbeat. */ 83 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); 84 85 refcount_set(&peer->refcnt, 1); 86 87 return peer; 88 } 89 90 /* Allocate and initialize a new transport. */ 91 struct sctp_transport *sctp_transport_new(struct net *net, 92 const union sctp_addr *addr, 93 gfp_t gfp) 94 { 95 struct sctp_transport *transport; 96 97 transport = kzalloc(sizeof(*transport), gfp); 98 if (!transport) 99 goto fail; 100 101 if (!sctp_transport_init(net, transport, addr, gfp)) 102 goto fail_init; 103 104 SCTP_DBG_OBJCNT_INC(transport); 105 106 return transport; 107 108 fail_init: 109 kfree(transport); 110 111 fail: 112 return NULL; 113 } 114 115 /* This transport is no longer needed. Free up if possible, or 116 * delay until it last reference count. 117 */ 118 void sctp_transport_free(struct sctp_transport *transport) 119 { 120 /* Try to delete the heartbeat timer. */ 121 if (del_timer(&transport->hb_timer)) 122 sctp_transport_put(transport); 123 124 /* Delete the T3_rtx timer if it's active. 125 * There is no point in not doing this now and letting 126 * structure hang around in memory since we know 127 * the transport is going away. 128 */ 129 if (del_timer(&transport->T3_rtx_timer)) 130 sctp_transport_put(transport); 131 132 if (del_timer(&transport->reconf_timer)) 133 sctp_transport_put(transport); 134 135 if (del_timer(&transport->probe_timer)) 136 sctp_transport_put(transport); 137 138 /* Delete the ICMP proto unreachable timer if it's active. */ 139 if (del_timer(&transport->proto_unreach_timer)) 140 sctp_transport_put(transport); 141 142 sctp_transport_put(transport); 143 } 144 145 static void sctp_transport_destroy_rcu(struct rcu_head *head) 146 { 147 struct sctp_transport *transport; 148 149 transport = container_of(head, struct sctp_transport, rcu); 150 151 dst_release(transport->dst); 152 kfree(transport); 153 SCTP_DBG_OBJCNT_DEC(transport); 154 } 155 156 /* Destroy the transport data structure. 157 * Assumes there are no more users of this structure. 158 */ 159 static void sctp_transport_destroy(struct sctp_transport *transport) 160 { 161 if (unlikely(refcount_read(&transport->refcnt))) { 162 WARN(1, "Attempt to destroy undead transport %p!\n", transport); 163 return; 164 } 165 166 sctp_packet_free(&transport->packet); 167 168 if (transport->asoc) 169 sctp_association_put(transport->asoc); 170 171 call_rcu(&transport->rcu, sctp_transport_destroy_rcu); 172 } 173 174 /* Start T3_rtx timer if it is not already running and update the heartbeat 175 * timer. This routine is called every time a DATA chunk is sent. 176 */ 177 void sctp_transport_reset_t3_rtx(struct sctp_transport *transport) 178 { 179 /* RFC 2960 6.3.2 Retransmission Timer Rules 180 * 181 * R1) Every time a DATA chunk is sent to any address(including a 182 * retransmission), if the T3-rtx timer of that address is not running 183 * start it running so that it will expire after the RTO of that 184 * address. 185 */ 186 187 if (!timer_pending(&transport->T3_rtx_timer)) 188 if (!mod_timer(&transport->T3_rtx_timer, 189 jiffies + transport->rto)) 190 sctp_transport_hold(transport); 191 } 192 193 void sctp_transport_reset_hb_timer(struct sctp_transport *transport) 194 { 195 unsigned long expires; 196 197 /* When a data chunk is sent, reset the heartbeat interval. */ 198 expires = jiffies + sctp_transport_timeout(transport); 199 if (!mod_timer(&transport->hb_timer, 200 expires + get_random_u32_below(transport->rto))) 201 sctp_transport_hold(transport); 202 } 203 204 void sctp_transport_reset_reconf_timer(struct sctp_transport *transport) 205 { 206 if (!timer_pending(&transport->reconf_timer)) 207 if (!mod_timer(&transport->reconf_timer, 208 jiffies + transport->rto)) 209 sctp_transport_hold(transport); 210 } 211 212 void sctp_transport_reset_probe_timer(struct sctp_transport *transport) 213 { 214 if (!mod_timer(&transport->probe_timer, 215 jiffies + transport->probe_interval)) 216 sctp_transport_hold(transport); 217 } 218 219 void sctp_transport_reset_raise_timer(struct sctp_transport *transport) 220 { 221 if (!mod_timer(&transport->probe_timer, 222 jiffies + transport->probe_interval * 30)) 223 sctp_transport_hold(transport); 224 } 225 226 /* This transport has been assigned to an association. 227 * Initialize fields from the association or from the sock itself. 228 * Register the reference count in the association. 229 */ 230 void sctp_transport_set_owner(struct sctp_transport *transport, 231 struct sctp_association *asoc) 232 { 233 transport->asoc = asoc; 234 sctp_association_hold(asoc); 235 } 236 237 /* Initialize the pmtu of a transport. */ 238 void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) 239 { 240 /* If we don't have a fresh route, look one up */ 241 if (!transport->dst || transport->dst->obsolete) { 242 sctp_transport_dst_release(transport); 243 transport->af_specific->get_dst(transport, &transport->saddr, 244 &transport->fl, sk); 245 } 246 247 if (transport->param_flags & SPP_PMTUD_DISABLE) { 248 struct sctp_association *asoc = transport->asoc; 249 250 if (!transport->pathmtu && asoc && asoc->pathmtu) 251 transport->pathmtu = asoc->pathmtu; 252 if (transport->pathmtu) 253 return; 254 } 255 256 if (transport->dst) 257 transport->pathmtu = sctp_dst_mtu(transport->dst); 258 else 259 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 260 261 sctp_transport_pl_update(transport); 262 } 263 264 void sctp_transport_pl_send(struct sctp_transport *t) 265 { 266 if (t->pl.probe_count < SCTP_MAX_PROBES) 267 goto out; 268 269 t->pl.probe_count = 0; 270 if (t->pl.state == SCTP_PL_BASE) { 271 if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ 272 t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ 273 274 t->pl.pmtu = SCTP_BASE_PLPMTU; 275 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 276 sctp_assoc_sync_pmtu(t->asoc); 277 } 278 } else if (t->pl.state == SCTP_PL_SEARCH) { 279 if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ 280 t->pl.state = SCTP_PL_BASE; /* Search -> Base */ 281 t->pl.probe_size = SCTP_BASE_PLPMTU; 282 t->pl.probe_high = 0; 283 284 t->pl.pmtu = SCTP_BASE_PLPMTU; 285 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 286 sctp_assoc_sync_pmtu(t->asoc); 287 } else { /* Normal probe failure. */ 288 t->pl.probe_high = t->pl.probe_size; 289 t->pl.probe_size = t->pl.pmtu; 290 } 291 } else if (t->pl.state == SCTP_PL_COMPLETE) { 292 if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ 293 t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */ 294 t->pl.probe_size = SCTP_BASE_PLPMTU; 295 296 t->pl.pmtu = SCTP_BASE_PLPMTU; 297 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 298 sctp_assoc_sync_pmtu(t->asoc); 299 } 300 } 301 302 out: 303 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", 304 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); 305 t->pl.probe_count++; 306 } 307 308 bool sctp_transport_pl_recv(struct sctp_transport *t) 309 { 310 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", 311 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); 312 313 t->pl.pmtu = t->pl.probe_size; 314 t->pl.probe_count = 0; 315 if (t->pl.state == SCTP_PL_BASE) { 316 t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */ 317 t->pl.probe_size += SCTP_PL_BIG_STEP; 318 } else if (t->pl.state == SCTP_PL_ERROR) { 319 t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */ 320 321 t->pl.pmtu = t->pl.probe_size; 322 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 323 sctp_assoc_sync_pmtu(t->asoc); 324 t->pl.probe_size += SCTP_PL_BIG_STEP; 325 } else if (t->pl.state == SCTP_PL_SEARCH) { 326 if (!t->pl.probe_high) { 327 t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, 328 SCTP_MAX_PLPMTU); 329 return false; 330 } 331 t->pl.probe_size += SCTP_PL_MIN_STEP; 332 if (t->pl.probe_size >= t->pl.probe_high) { 333 t->pl.probe_high = 0; 334 t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */ 335 336 t->pl.probe_size = t->pl.pmtu; 337 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 338 sctp_assoc_sync_pmtu(t->asoc); 339 sctp_transport_reset_raise_timer(t); 340 } 341 } else if (t->pl.state == SCTP_PL_COMPLETE) { 342 /* Raise probe_size again after 30 * interval in Search Complete */ 343 t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ 344 t->pl.probe_size += SCTP_PL_MIN_STEP; 345 } 346 347 return t->pl.state == SCTP_PL_COMPLETE; 348 } 349 350 static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) 351 { 352 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n", 353 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu); 354 355 if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size) 356 return false; 357 358 if (t->pl.state == SCTP_PL_BASE) { 359 if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) { 360 t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ 361 362 t->pl.pmtu = SCTP_BASE_PLPMTU; 363 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 364 return true; 365 } 366 } else if (t->pl.state == SCTP_PL_SEARCH) { 367 if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { 368 t->pl.state = SCTP_PL_BASE; /* Search -> Base */ 369 t->pl.probe_size = SCTP_BASE_PLPMTU; 370 t->pl.probe_count = 0; 371 372 t->pl.probe_high = 0; 373 t->pl.pmtu = SCTP_BASE_PLPMTU; 374 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 375 return true; 376 } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) { 377 t->pl.probe_size = pmtu; 378 t->pl.probe_count = 0; 379 } 380 } else if (t->pl.state == SCTP_PL_COMPLETE) { 381 if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { 382 t->pl.state = SCTP_PL_BASE; /* Complete -> Base */ 383 t->pl.probe_size = SCTP_BASE_PLPMTU; 384 t->pl.probe_count = 0; 385 386 t->pl.probe_high = 0; 387 t->pl.pmtu = SCTP_BASE_PLPMTU; 388 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); 389 sctp_transport_reset_probe_timer(t); 390 return true; 391 } 392 } 393 394 return false; 395 } 396 397 bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 398 { 399 struct sock *sk = t->asoc->base.sk; 400 struct dst_entry *dst; 401 bool change = true; 402 403 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 404 pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n", 405 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); 406 /* Use default minimum segment instead */ 407 pmtu = SCTP_DEFAULT_MINSEGMENT; 408 } 409 pmtu = SCTP_TRUNC4(pmtu); 410 411 if (sctp_transport_pl_enabled(t)) 412 return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t)); 413 414 dst = sctp_transport_dst_check(t); 415 if (dst) { 416 struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family); 417 union sctp_addr addr; 418 419 pf->af->from_sk(&addr, sk); 420 pf->to_sk_daddr(&t->ipaddr, sk); 421 dst->ops->update_pmtu(dst, sk, NULL, pmtu, true); 422 pf->to_sk_daddr(&addr, sk); 423 424 dst = sctp_transport_dst_check(t); 425 } 426 427 if (!dst) { 428 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); 429 dst = t->dst; 430 } 431 432 if (dst) { 433 /* Re-fetch, as under layers may have a higher minimum size */ 434 pmtu = sctp_dst_mtu(dst); 435 change = t->pathmtu != pmtu; 436 } 437 t->pathmtu = pmtu; 438 439 return change; 440 } 441 442 /* Caches the dst entry and source address for a transport's destination 443 * address. 444 */ 445 void sctp_transport_route(struct sctp_transport *transport, 446 union sctp_addr *saddr, struct sctp_sock *opt) 447 { 448 struct sctp_association *asoc = transport->asoc; 449 struct sctp_af *af = transport->af_specific; 450 451 sctp_transport_dst_release(transport); 452 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); 453 454 if (saddr) 455 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 456 else 457 af->get_saddr(opt, transport, &transport->fl); 458 459 sctp_transport_pmtu(transport, sctp_opt2sk(opt)); 460 461 /* Initialize sk->sk_rcv_saddr, if the transport is the 462 * association's active path for getsockname(). 463 */ 464 if (transport->dst && asoc && 465 (!asoc->peer.primary_path || transport == asoc->peer.active_path)) 466 opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk); 467 } 468 469 /* Hold a reference to a transport. */ 470 int sctp_transport_hold(struct sctp_transport *transport) 471 { 472 return refcount_inc_not_zero(&transport->refcnt); 473 } 474 475 /* Release a reference to a transport and clean up 476 * if there are no more references. 477 */ 478 void sctp_transport_put(struct sctp_transport *transport) 479 { 480 if (refcount_dec_and_test(&transport->refcnt)) 481 sctp_transport_destroy(transport); 482 } 483 484 /* Update transport's RTO based on the newly calculated RTT. */ 485 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) 486 { 487 if (unlikely(!tp->rto_pending)) 488 /* We should not be doing any RTO updates unless rto_pending is set. */ 489 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); 490 491 if (tp->rttvar || tp->srtt) { 492 struct net *net = tp->asoc->base.net; 493 /* 6.3.1 C3) When a new RTT measurement R' is made, set 494 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| 495 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' 496 */ 497 498 /* Note: The above algorithm has been rewritten to 499 * express rto_beta and rto_alpha as inverse powers 500 * of two. 501 * For example, assuming the default value of RTO.Alpha of 502 * 1/8, rto_alpha would be expressed as 3. 503 */ 504 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) 505 + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); 506 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) 507 + (rtt >> net->sctp.rto_alpha); 508 } else { 509 /* 6.3.1 C2) When the first RTT measurement R is made, set 510 * SRTT <- R, RTTVAR <- R/2. 511 */ 512 tp->srtt = rtt; 513 tp->rttvar = rtt >> 1; 514 } 515 516 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then 517 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. 518 */ 519 if (tp->rttvar == 0) 520 tp->rttvar = SCTP_CLOCK_GRANULARITY; 521 522 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ 523 tp->rto = tp->srtt + (tp->rttvar << 2); 524 525 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min 526 * seconds then it is rounded up to RTO.Min seconds. 527 */ 528 if (tp->rto < tp->asoc->rto_min) 529 tp->rto = tp->asoc->rto_min; 530 531 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is 532 * at least RTO.max seconds. 533 */ 534 if (tp->rto > tp->asoc->rto_max) 535 tp->rto = tp->asoc->rto_max; 536 537 sctp_max_rto(tp->asoc, tp); 538 tp->rtt = rtt; 539 540 /* Reset rto_pending so that a new RTT measurement is started when a 541 * new data chunk is sent. 542 */ 543 tp->rto_pending = 0; 544 545 pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n", 546 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); 547 } 548 549 /* This routine updates the transport's cwnd and partial_bytes_acked 550 * parameters based on the bytes acked in the received SACK. 551 */ 552 void sctp_transport_raise_cwnd(struct sctp_transport *transport, 553 __u32 sack_ctsn, __u32 bytes_acked) 554 { 555 struct sctp_association *asoc = transport->asoc; 556 __u32 cwnd, ssthresh, flight_size, pba, pmtu; 557 558 cwnd = transport->cwnd; 559 flight_size = transport->flight_size; 560 561 /* See if we need to exit Fast Recovery first */ 562 if (asoc->fast_recovery && 563 TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) 564 asoc->fast_recovery = 0; 565 566 ssthresh = transport->ssthresh; 567 pba = transport->partial_bytes_acked; 568 pmtu = transport->asoc->pathmtu; 569 570 if (cwnd <= ssthresh) { 571 /* RFC 4960 7.2.1 572 * o When cwnd is less than or equal to ssthresh, an SCTP 573 * endpoint MUST use the slow-start algorithm to increase 574 * cwnd only if the current congestion window is being fully 575 * utilized, an incoming SACK advances the Cumulative TSN 576 * Ack Point, and the data sender is not in Fast Recovery. 577 * Only when these three conditions are met can the cwnd be 578 * increased; otherwise, the cwnd MUST not be increased. 579 * If these conditions are met, then cwnd MUST be increased 580 * by, at most, the lesser of 1) the total size of the 581 * previously outstanding DATA chunk(s) acknowledged, and 582 * 2) the destination's path MTU. This upper bound protects 583 * against the ACK-Splitting attack outlined in [SAVAGE99]. 584 */ 585 if (asoc->fast_recovery) 586 return; 587 588 /* The appropriate cwnd increase algorithm is performed 589 * if, and only if the congestion window is being fully 590 * utilized. Note that RFC4960 Errata 3.22 removed the 591 * other condition on ctsn moving. 592 */ 593 if (flight_size < cwnd) 594 return; 595 596 if (bytes_acked > pmtu) 597 cwnd += pmtu; 598 else 599 cwnd += bytes_acked; 600 601 pr_debug("%s: slow start: transport:%p, bytes_acked:%d, " 602 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n", 603 __func__, transport, bytes_acked, cwnd, ssthresh, 604 flight_size, pba); 605 } else { 606 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, 607 * upon each SACK arrival, increase partial_bytes_acked 608 * by the total number of bytes of all new chunks 609 * acknowledged in that SACK including chunks 610 * acknowledged by the new Cumulative TSN Ack and by Gap 611 * Ack Blocks. (updated by RFC4960 Errata 3.22) 612 * 613 * When partial_bytes_acked is greater than cwnd and 614 * before the arrival of the SACK the sender had less 615 * bytes of data outstanding than cwnd (i.e., before 616 * arrival of the SACK, flightsize was less than cwnd), 617 * reset partial_bytes_acked to cwnd. (RFC 4960 Errata 618 * 3.26) 619 * 620 * When partial_bytes_acked is equal to or greater than 621 * cwnd and before the arrival of the SACK the sender 622 * had cwnd or more bytes of data outstanding (i.e., 623 * before arrival of the SACK, flightsize was greater 624 * than or equal to cwnd), partial_bytes_acked is reset 625 * to (partial_bytes_acked - cwnd). Next, cwnd is 626 * increased by MTU. (RFC 4960 Errata 3.12) 627 */ 628 pba += bytes_acked; 629 if (pba > cwnd && flight_size < cwnd) 630 pba = cwnd; 631 if (pba >= cwnd && flight_size >= cwnd) { 632 pba = pba - cwnd; 633 cwnd += pmtu; 634 } 635 636 pr_debug("%s: congestion avoidance: transport:%p, " 637 "bytes_acked:%d, cwnd:%d, ssthresh:%d, " 638 "flight_size:%d, pba:%d\n", __func__, 639 transport, bytes_acked, cwnd, ssthresh, 640 flight_size, pba); 641 } 642 643 transport->cwnd = cwnd; 644 transport->partial_bytes_acked = pba; 645 } 646 647 /* This routine is used to lower the transport's cwnd when congestion is 648 * detected. 649 */ 650 void sctp_transport_lower_cwnd(struct sctp_transport *transport, 651 enum sctp_lower_cwnd reason) 652 { 653 struct sctp_association *asoc = transport->asoc; 654 655 switch (reason) { 656 case SCTP_LOWER_CWND_T3_RTX: 657 /* RFC 2960 Section 7.2.3, sctpimpguide 658 * When the T3-rtx timer expires on an address, SCTP should 659 * perform slow start by: 660 * ssthresh = max(cwnd/2, 4*MTU) 661 * cwnd = 1*MTU 662 * partial_bytes_acked = 0 663 */ 664 transport->ssthresh = max(transport->cwnd/2, 665 4*asoc->pathmtu); 666 transport->cwnd = asoc->pathmtu; 667 668 /* T3-rtx also clears fast recovery */ 669 asoc->fast_recovery = 0; 670 break; 671 672 case SCTP_LOWER_CWND_FAST_RTX: 673 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the 674 * destination address(es) to which the missing DATA chunks 675 * were last sent, according to the formula described in 676 * Section 7.2.3. 677 * 678 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet 679 * losses from SACK (see Section 7.2.4), An endpoint 680 * should do the following: 681 * ssthresh = max(cwnd/2, 4*MTU) 682 * cwnd = ssthresh 683 * partial_bytes_acked = 0 684 */ 685 if (asoc->fast_recovery) 686 return; 687 688 /* Mark Fast recovery */ 689 asoc->fast_recovery = 1; 690 asoc->fast_recovery_exit = asoc->next_tsn - 1; 691 692 transport->ssthresh = max(transport->cwnd/2, 693 4*asoc->pathmtu); 694 transport->cwnd = transport->ssthresh; 695 break; 696 697 case SCTP_LOWER_CWND_ECNE: 698 /* RFC 2481 Section 6.1.2. 699 * If the sender receives an ECN-Echo ACK packet 700 * then the sender knows that congestion was encountered in the 701 * network on the path from the sender to the receiver. The 702 * indication of congestion should be treated just as a 703 * congestion loss in non-ECN Capable TCP. That is, the TCP 704 * source halves the congestion window "cwnd" and reduces the 705 * slow start threshold "ssthresh". 706 * A critical condition is that TCP does not react to 707 * congestion indications more than once every window of 708 * data (or more loosely more than once every round-trip time). 709 */ 710 if (time_after(jiffies, transport->last_time_ecne_reduced + 711 transport->rtt)) { 712 transport->ssthresh = max(transport->cwnd/2, 713 4*asoc->pathmtu); 714 transport->cwnd = transport->ssthresh; 715 transport->last_time_ecne_reduced = jiffies; 716 } 717 break; 718 719 case SCTP_LOWER_CWND_INACTIVE: 720 /* RFC 2960 Section 7.2.1, sctpimpguide 721 * When the endpoint does not transmit data on a given 722 * transport address, the cwnd of the transport address 723 * should be adjusted to max(cwnd/2, 4*MTU) per RTO. 724 * NOTE: Although the draft recommends that this check needs 725 * to be done every RTO interval, we do it every hearbeat 726 * interval. 727 */ 728 transport->cwnd = max(transport->cwnd/2, 729 4*asoc->pathmtu); 730 /* RFC 4960 Errata 3.27.2: also adjust sshthresh */ 731 transport->ssthresh = transport->cwnd; 732 break; 733 } 734 735 transport->partial_bytes_acked = 0; 736 737 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n", 738 __func__, transport, reason, transport->cwnd, 739 transport->ssthresh); 740 } 741 742 /* Apply Max.Burst limit to the congestion window: 743 * sctpimpguide-05 2.14.2 744 * D) When the time comes for the sender to 745 * transmit new DATA chunks, the protocol parameter Max.Burst MUST 746 * first be applied to limit how many new DATA chunks may be sent. 747 * The limit is applied by adjusting cwnd as follows: 748 * if ((flightsize+ Max.Burst * MTU) < cwnd) 749 * cwnd = flightsize + Max.Burst * MTU 750 */ 751 752 void sctp_transport_burst_limited(struct sctp_transport *t) 753 { 754 struct sctp_association *asoc = t->asoc; 755 u32 old_cwnd = t->cwnd; 756 u32 max_burst_bytes; 757 758 if (t->burst_limited || asoc->max_burst == 0) 759 return; 760 761 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); 762 if (max_burst_bytes < old_cwnd) { 763 t->cwnd = max_burst_bytes; 764 t->burst_limited = old_cwnd; 765 } 766 } 767 768 /* Restore the old cwnd congestion window, after the burst had it's 769 * desired effect. 770 */ 771 void sctp_transport_burst_reset(struct sctp_transport *t) 772 { 773 if (t->burst_limited) { 774 t->cwnd = t->burst_limited; 775 t->burst_limited = 0; 776 } 777 } 778 779 /* What is the next timeout value for this transport? */ 780 unsigned long sctp_transport_timeout(struct sctp_transport *trans) 781 { 782 /* RTO + timer slack +/- 50% of RTO */ 783 unsigned long timeout = trans->rto >> 1; 784 785 if (trans->state != SCTP_UNCONFIRMED && 786 trans->state != SCTP_PF) 787 timeout += trans->hbinterval; 788 789 return max_t(unsigned long, timeout, HZ / 5); 790 } 791 792 /* Reset transport variables to their initial values */ 793 void sctp_transport_reset(struct sctp_transport *t) 794 { 795 struct sctp_association *asoc = t->asoc; 796 797 /* RFC 2960 (bis), Section 5.2.4 798 * All the congestion control parameters (e.g., cwnd, ssthresh) 799 * related to this peer MUST be reset to their initial values 800 * (see Section 6.2.1) 801 */ 802 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 803 t->burst_limited = 0; 804 t->ssthresh = asoc->peer.i.a_rwnd; 805 t->rto = asoc->rto_initial; 806 sctp_max_rto(asoc, t); 807 t->rtt = 0; 808 t->srtt = 0; 809 t->rttvar = 0; 810 811 /* Reset these additional variables so that we have a clean slate. */ 812 t->partial_bytes_acked = 0; 813 t->flight_size = 0; 814 t->error_count = 0; 815 t->rto_pending = 0; 816 t->hb_sent = 0; 817 818 /* Initialize the state information for SFR-CACC */ 819 t->cacc.changeover_active = 0; 820 t->cacc.cycling_changeover = 0; 821 t->cacc.next_tsn_at_change = 0; 822 t->cacc.cacc_saw_newack = 0; 823 } 824 825 /* Schedule retransmission on the given transport */ 826 void sctp_transport_immediate_rtx(struct sctp_transport *t) 827 { 828 /* Stop pending T3_rtx_timer */ 829 if (del_timer(&t->T3_rtx_timer)) 830 sctp_transport_put(t); 831 832 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); 833 if (!timer_pending(&t->T3_rtx_timer)) { 834 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) 835 sctp_transport_hold(t); 836 } 837 } 838 839 /* Drop dst */ 840 void sctp_transport_dst_release(struct sctp_transport *t) 841 { 842 dst_release(t->dst); 843 t->dst = NULL; 844 t->dst_pending_confirm = 0; 845 } 846 847 /* Schedule neighbour confirm */ 848 void sctp_transport_dst_confirm(struct sctp_transport *t) 849 { 850 t->dst_pending_confirm = 1; 851 } 852