1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * (C) Copyright IBM Corp. 2001, 2004 4 * Copyright (c) 1999-2000 Cisco, Inc. 5 * Copyright (c) 1999-2001 Motorola, Inc. 6 * Copyright (c) 2001 Intel Corp. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * This module provides the abstraction for an SCTP association. 12 * 13 * Please send any bug reports or fixes you make to the 14 * email address(es): 15 * lksctp developers <linux-sctp@vger.kernel.org> 16 * 17 * Written or modified by: 18 * La Monte H.P. Yarroll <piggy@acm.org> 19 * Karl Knutson <karl@athena.chicago.il.us> 20 * Jon Grimm <jgrimm@us.ibm.com> 21 * Xingang Guo <xingang.guo@intel.com> 22 * Hui Huang <hui.huang@nokia.com> 23 * Sridhar Samudrala <sri@us.ibm.com> 24 * Daisy Chang <daisyc@us.ibm.com> 25 * Ryan Layer <rmlayer@us.ibm.com> 26 * Kevin Gao <kevin.gao@intel.com> 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/types.h> 32 #include <linux/fcntl.h> 33 #include <linux/poll.h> 34 #include <linux/init.h> 35 36 #include <linux/slab.h> 37 #include <linux/in.h> 38 #include <net/ipv6.h> 39 #include <net/sctp/sctp.h> 40 #include <net/sctp/sm.h> 41 42 /* Forward declarations for internal functions. */ 43 static void sctp_select_active_and_retran_path(struct sctp_association *asoc); 44 static void sctp_assoc_bh_rcv(struct work_struct *work); 45 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 46 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); 47 48 /* 1st Level Abstractions. */ 49 50 /* Initialize a new association from provided memory. */ 51 static struct sctp_association *sctp_association_init( 52 struct sctp_association *asoc, 53 const struct sctp_endpoint *ep, 54 const struct sock *sk, 55 enum sctp_scope scope, gfp_t gfp) 56 { 57 struct sctp_sock *sp; 58 struct sctp_paramhdr *p; 59 int i; 60 61 /* Retrieve the SCTP per socket area. */ 62 sp = sctp_sk((struct sock *)sk); 63 64 /* Discarding const is appropriate here. */ 65 asoc->ep = (struct sctp_endpoint *)ep; 66 asoc->base.sk = (struct sock *)sk; 67 asoc->base.net = sock_net(sk); 68 69 sctp_endpoint_hold(asoc->ep); 70 sock_hold(asoc->base.sk); 71 72 /* Initialize the common base substructure. */ 73 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; 74 75 /* Initialize the object handling fields. */ 76 refcount_set(&asoc->base.refcnt, 1); 77 78 /* Initialize the bind addr area. */ 79 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); 80 81 asoc->state = SCTP_STATE_CLOSED; 82 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life); 83 asoc->user_frag = sp->user_frag; 84 85 /* Set the association max_retrans and RTO values from the 86 * socket values. 87 */ 88 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; 89 asoc->pf_retrans = sp->pf_retrans; 90 asoc->ps_retrans = sp->ps_retrans; 91 asoc->pf_expose = sp->pf_expose; 92 93 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); 94 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); 95 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); 96 97 /* Initialize the association's heartbeat interval based on the 98 * sock configured value. 99 */ 100 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); 101 102 asoc->encap_port = sp->encap_port; 103 104 /* Initialize path max retrans value. */ 105 asoc->pathmaxrxt = sp->pathmaxrxt; 106 107 asoc->flowlabel = sp->flowlabel; 108 asoc->dscp = sp->dscp; 109 110 /* Set association default SACK delay */ 111 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); 112 asoc->sackfreq = sp->sackfreq; 113 114 /* Set the association default flags controlling 115 * Heartbeat, SACK delay, and Path MTU Discovery. 116 */ 117 asoc->param_flags = sp->param_flags; 118 119 /* Initialize the maximum number of new data packets that can be sent 120 * in a burst. 121 */ 122 asoc->max_burst = sp->max_burst; 123 124 asoc->subscribe = sp->subscribe; 125 126 /* initialize association timers */ 127 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; 128 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; 129 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; 130 131 /* sctpimpguide Section 2.12.2 132 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the 133 * recommended value of 5 times 'RTO.Max'. 134 */ 135 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] 136 = 5 * asoc->rto_max; 137 138 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 139 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; 140 141 /* Initializes the timers */ 142 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 143 timer_setup(&asoc->timers[i], sctp_timer_events[i], 0); 144 145 /* Pull default initialization values from the sock options. 146 * Note: This assumes that the values have already been 147 * validated in the sock. 148 */ 149 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; 150 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; 151 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; 152 153 asoc->max_init_timeo = 154 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); 155 156 /* Set the local window size for receive. 157 * This is also the rcvbuf space per association. 158 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of 159 * 1500 bytes in one SCTP packet. 160 */ 161 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) 162 asoc->rwnd = SCTP_DEFAULT_MINWINDOW; 163 else 164 asoc->rwnd = sk->sk_rcvbuf/2; 165 166 asoc->a_rwnd = asoc->rwnd; 167 168 /* Use my own max window until I learn something better. */ 169 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; 170 171 /* Initialize the receive memory counter */ 172 atomic_set(&asoc->rmem_alloc, 0); 173 174 init_waitqueue_head(&asoc->wait); 175 176 asoc->c.my_vtag = sctp_generate_tag(ep); 177 asoc->c.my_port = ep->base.bind_addr.port; 178 179 asoc->c.initial_tsn = sctp_generate_tsn(ep); 180 181 asoc->next_tsn = asoc->c.initial_tsn; 182 183 asoc->ctsn_ack_point = asoc->next_tsn - 1; 184 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 185 asoc->highest_sacked = asoc->ctsn_ack_point; 186 asoc->last_cwr_tsn = asoc->ctsn_ack_point; 187 188 /* ADDIP Section 4.1 Asconf Chunk Procedures 189 * 190 * When an endpoint has an ASCONF signaled change to be sent to the 191 * remote endpoint it should do the following: 192 * ... 193 * A2) a serial number should be assigned to the chunk. The serial 194 * number SHOULD be a monotonically increasing number. The serial 195 * numbers SHOULD be initialized at the start of the 196 * association to the same value as the initial TSN. 197 */ 198 asoc->addip_serial = asoc->c.initial_tsn; 199 asoc->strreset_outseq = asoc->c.initial_tsn; 200 201 INIT_LIST_HEAD(&asoc->addip_chunk_list); 202 INIT_LIST_HEAD(&asoc->asconf_ack_list); 203 204 /* Make an empty list of remote transport addresses. */ 205 INIT_LIST_HEAD(&asoc->peer.transport_addr_list); 206 207 /* RFC 2960 5.1 Normal Establishment of an Association 208 * 209 * After the reception of the first data chunk in an 210 * association the endpoint must immediately respond with a 211 * sack to acknowledge the data chunk. Subsequent 212 * acknowledgements should be done as described in Section 213 * 6.2. 214 * 215 * [We implement this by telling a new association that it 216 * already received one packet.] 217 */ 218 asoc->peer.sack_needed = 1; 219 asoc->peer.sack_generation = 1; 220 221 /* Create an input queue. */ 222 sctp_inq_init(&asoc->base.inqueue); 223 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); 224 225 /* Create an output queue. */ 226 sctp_outq_init(asoc, &asoc->outqueue); 227 228 if (!sctp_ulpq_init(&asoc->ulpq, asoc)) 229 goto fail_init; 230 231 if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 232 0, gfp)) 233 goto fail_init; 234 235 /* Initialize default path MTU. */ 236 asoc->pathmtu = sp->pathmtu; 237 sctp_assoc_update_frag_point(asoc); 238 239 /* Assume that peer would support both address types unless we are 240 * told otherwise. 241 */ 242 asoc->peer.ipv4_address = 1; 243 if (asoc->base.sk->sk_family == PF_INET6) 244 asoc->peer.ipv6_address = 1; 245 INIT_LIST_HEAD(&asoc->asocs); 246 247 asoc->default_stream = sp->default_stream; 248 asoc->default_ppid = sp->default_ppid; 249 asoc->default_flags = sp->default_flags; 250 asoc->default_context = sp->default_context; 251 asoc->default_timetolive = sp->default_timetolive; 252 asoc->default_rcv_context = sp->default_rcv_context; 253 254 /* AUTH related initializations */ 255 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 256 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) 257 goto stream_free; 258 259 asoc->active_key_id = ep->active_key_id; 260 asoc->strreset_enable = ep->strreset_enable; 261 262 /* Save the hmacs and chunks list into this association */ 263 if (ep->auth_hmacs_list) 264 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, 265 ntohs(ep->auth_hmacs_list->param_hdr.length)); 266 if (ep->auth_chunk_list) 267 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, 268 ntohs(ep->auth_chunk_list->param_hdr.length)); 269 270 /* Get the AUTH random number for this association */ 271 p = (struct sctp_paramhdr *)asoc->c.auth_random; 272 p->type = SCTP_PARAM_RANDOM; 273 p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH); 274 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); 275 276 return asoc; 277 278 stream_free: 279 sctp_stream_free(&asoc->stream); 280 fail_init: 281 sock_put(asoc->base.sk); 282 sctp_endpoint_put(asoc->ep); 283 return NULL; 284 } 285 286 /* Allocate and initialize a new association */ 287 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, 288 const struct sock *sk, 289 enum sctp_scope scope, gfp_t gfp) 290 { 291 struct sctp_association *asoc; 292 293 asoc = kzalloc(sizeof(*asoc), gfp); 294 if (!asoc) 295 goto fail; 296 297 if (!sctp_association_init(asoc, ep, sk, scope, gfp)) 298 goto fail_init; 299 300 SCTP_DBG_OBJCNT_INC(assoc); 301 302 pr_debug("Created asoc %p\n", asoc); 303 304 return asoc; 305 306 fail_init: 307 kfree(asoc); 308 fail: 309 return NULL; 310 } 311 312 /* Free this association if possible. There may still be users, so 313 * the actual deallocation may be delayed. 314 */ 315 void sctp_association_free(struct sctp_association *asoc) 316 { 317 struct sock *sk = asoc->base.sk; 318 struct sctp_transport *transport; 319 struct list_head *pos, *temp; 320 int i; 321 322 /* Only real associations count against the endpoint, so 323 * don't bother for if this is a temporary association. 324 */ 325 if (!list_empty(&asoc->asocs)) { 326 list_del(&asoc->asocs); 327 328 /* Decrement the backlog value for a TCP-style listening 329 * socket. 330 */ 331 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 332 sk_acceptq_removed(sk); 333 } 334 335 /* Mark as dead, so other users can know this structure is 336 * going away. 337 */ 338 asoc->base.dead = true; 339 340 /* Dispose of any data lying around in the outqueue. */ 341 sctp_outq_free(&asoc->outqueue); 342 343 /* Dispose of any pending messages for the upper layer. */ 344 sctp_ulpq_free(&asoc->ulpq); 345 346 /* Dispose of any pending chunks on the inqueue. */ 347 sctp_inq_free(&asoc->base.inqueue); 348 349 sctp_tsnmap_free(&asoc->peer.tsn_map); 350 351 /* Free stream information. */ 352 sctp_stream_free(&asoc->stream); 353 354 if (asoc->strreset_chunk) 355 sctp_chunk_free(asoc->strreset_chunk); 356 357 /* Clean up the bound address list. */ 358 sctp_bind_addr_free(&asoc->base.bind_addr); 359 360 /* Do we need to go through all of our timers and 361 * delete them? To be safe we will try to delete all, but we 362 * should be able to go through and make a guess based 363 * on our state. 364 */ 365 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 366 if (del_timer(&asoc->timers[i])) 367 sctp_association_put(asoc); 368 } 369 370 /* Free peer's cached cookie. */ 371 kfree(asoc->peer.cookie); 372 kfree(asoc->peer.peer_random); 373 kfree(asoc->peer.peer_chunks); 374 kfree(asoc->peer.peer_hmacs); 375 376 /* Release the transport structures. */ 377 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 378 transport = list_entry(pos, struct sctp_transport, transports); 379 list_del_rcu(pos); 380 sctp_unhash_transport(transport); 381 sctp_transport_free(transport); 382 } 383 384 asoc->peer.transport_count = 0; 385 386 sctp_asconf_queue_teardown(asoc); 387 388 /* Free pending address space being deleted */ 389 kfree(asoc->asconf_addr_del_pending); 390 391 /* AUTH - Free the endpoint shared keys */ 392 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); 393 394 /* AUTH - Free the association shared key */ 395 sctp_auth_key_put(asoc->asoc_shared_key); 396 397 sctp_association_put(asoc); 398 } 399 400 /* Cleanup and free up an association. */ 401 static void sctp_association_destroy(struct sctp_association *asoc) 402 { 403 if (unlikely(!asoc->base.dead)) { 404 WARN(1, "Attempt to destroy undead association %p!\n", asoc); 405 return; 406 } 407 408 sctp_endpoint_put(asoc->ep); 409 sock_put(asoc->base.sk); 410 411 if (asoc->assoc_id != 0) { 412 spin_lock_bh(&sctp_assocs_id_lock); 413 idr_remove(&sctp_assocs_id, asoc->assoc_id); 414 spin_unlock_bh(&sctp_assocs_id_lock); 415 } 416 417 WARN_ON(atomic_read(&asoc->rmem_alloc)); 418 419 kfree_rcu(asoc, rcu); 420 SCTP_DBG_OBJCNT_DEC(assoc); 421 } 422 423 /* Change the primary destination address for the peer. */ 424 void sctp_assoc_set_primary(struct sctp_association *asoc, 425 struct sctp_transport *transport) 426 { 427 int changeover = 0; 428 429 /* it's a changeover only if we already have a primary path 430 * that we are changing 431 */ 432 if (asoc->peer.primary_path != NULL && 433 asoc->peer.primary_path != transport) 434 changeover = 1 ; 435 436 asoc->peer.primary_path = transport; 437 sctp_ulpevent_notify_peer_addr_change(transport, 438 SCTP_ADDR_MADE_PRIM, 0); 439 440 /* Set a default msg_name for events. */ 441 memcpy(&asoc->peer.primary_addr, &transport->ipaddr, 442 sizeof(union sctp_addr)); 443 444 /* If the primary path is changing, assume that the 445 * user wants to use this new path. 446 */ 447 if ((transport->state == SCTP_ACTIVE) || 448 (transport->state == SCTP_UNKNOWN)) 449 asoc->peer.active_path = transport; 450 451 /* 452 * SFR-CACC algorithm: 453 * Upon the receipt of a request to change the primary 454 * destination address, on the data structure for the new 455 * primary destination, the sender MUST do the following: 456 * 457 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch 458 * to this destination address earlier. The sender MUST set 459 * CYCLING_CHANGEOVER to indicate that this switch is a 460 * double switch to the same destination address. 461 * 462 * Really, only bother is we have data queued or outstanding on 463 * the association. 464 */ 465 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen) 466 return; 467 468 if (transport->cacc.changeover_active) 469 transport->cacc.cycling_changeover = changeover; 470 471 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that 472 * a changeover has occurred. 473 */ 474 transport->cacc.changeover_active = changeover; 475 476 /* 3) The sender MUST store the next TSN to be sent in 477 * next_tsn_at_change. 478 */ 479 transport->cacc.next_tsn_at_change = asoc->next_tsn; 480 } 481 482 /* Remove a transport from an association. */ 483 void sctp_assoc_rm_peer(struct sctp_association *asoc, 484 struct sctp_transport *peer) 485 { 486 struct sctp_transport *transport; 487 struct list_head *pos; 488 struct sctp_chunk *ch; 489 490 pr_debug("%s: association:%p addr:%pISpc\n", 491 __func__, asoc, &peer->ipaddr.sa); 492 493 /* If we are to remove the current retran_path, update it 494 * to the next peer before removing this peer from the list. 495 */ 496 if (asoc->peer.retran_path == peer) 497 sctp_assoc_update_retran_path(asoc); 498 499 /* Remove this peer from the list. */ 500 list_del_rcu(&peer->transports); 501 /* Remove this peer from the transport hashtable */ 502 sctp_unhash_transport(peer); 503 504 /* Get the first transport of asoc. */ 505 pos = asoc->peer.transport_addr_list.next; 506 transport = list_entry(pos, struct sctp_transport, transports); 507 508 /* Update any entries that match the peer to be deleted. */ 509 if (asoc->peer.primary_path == peer) 510 sctp_assoc_set_primary(asoc, transport); 511 if (asoc->peer.active_path == peer) 512 asoc->peer.active_path = transport; 513 if (asoc->peer.retran_path == peer) 514 asoc->peer.retran_path = transport; 515 if (asoc->peer.last_data_from == peer) 516 asoc->peer.last_data_from = transport; 517 518 if (asoc->strreset_chunk && 519 asoc->strreset_chunk->transport == peer) { 520 asoc->strreset_chunk->transport = transport; 521 sctp_transport_reset_reconf_timer(transport); 522 } 523 524 /* If we remove the transport an INIT was last sent to, set it to 525 * NULL. Combined with the update of the retran path above, this 526 * will cause the next INIT to be sent to the next available 527 * transport, maintaining the cycle. 528 */ 529 if (asoc->init_last_sent_to == peer) 530 asoc->init_last_sent_to = NULL; 531 532 /* If we remove the transport an SHUTDOWN was last sent to, set it 533 * to NULL. Combined with the update of the retran path above, this 534 * will cause the next SHUTDOWN to be sent to the next available 535 * transport, maintaining the cycle. 536 */ 537 if (asoc->shutdown_last_sent_to == peer) 538 asoc->shutdown_last_sent_to = NULL; 539 540 /* If we remove the transport an ASCONF was last sent to, set it to 541 * NULL. 542 */ 543 if (asoc->addip_last_asconf && 544 asoc->addip_last_asconf->transport == peer) 545 asoc->addip_last_asconf->transport = NULL; 546 547 /* If we have something on the transmitted list, we have to 548 * save it off. The best place is the active path. 549 */ 550 if (!list_empty(&peer->transmitted)) { 551 struct sctp_transport *active = asoc->peer.active_path; 552 553 /* Reset the transport of each chunk on this list */ 554 list_for_each_entry(ch, &peer->transmitted, 555 transmitted_list) { 556 ch->transport = NULL; 557 ch->rtt_in_progress = 0; 558 } 559 560 list_splice_tail_init(&peer->transmitted, 561 &active->transmitted); 562 563 /* Start a T3 timer here in case it wasn't running so 564 * that these migrated packets have a chance to get 565 * retransmitted. 566 */ 567 if (!timer_pending(&active->T3_rtx_timer)) 568 if (!mod_timer(&active->T3_rtx_timer, 569 jiffies + active->rto)) 570 sctp_transport_hold(active); 571 } 572 573 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) 574 if (ch->transport == peer) 575 ch->transport = NULL; 576 577 asoc->peer.transport_count--; 578 579 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0); 580 sctp_transport_free(peer); 581 } 582 583 /* Add a transport address to an association. */ 584 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, 585 const union sctp_addr *addr, 586 const gfp_t gfp, 587 const int peer_state) 588 { 589 struct sctp_transport *peer; 590 struct sctp_sock *sp; 591 unsigned short port; 592 593 sp = sctp_sk(asoc->base.sk); 594 595 /* AF_INET and AF_INET6 share common port field. */ 596 port = ntohs(addr->v4.sin_port); 597 598 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__, 599 asoc, &addr->sa, peer_state); 600 601 /* Set the port if it has not been set yet. */ 602 if (0 == asoc->peer.port) 603 asoc->peer.port = port; 604 605 /* Check to see if this is a duplicate. */ 606 peer = sctp_assoc_lookup_paddr(asoc, addr); 607 if (peer) { 608 /* An UNKNOWN state is only set on transports added by 609 * user in sctp_connectx() call. Such transports should be 610 * considered CONFIRMED per RFC 4960, Section 5.4. 611 */ 612 if (peer->state == SCTP_UNKNOWN) { 613 peer->state = SCTP_ACTIVE; 614 } 615 return peer; 616 } 617 618 peer = sctp_transport_new(asoc->base.net, addr, gfp); 619 if (!peer) 620 return NULL; 621 622 sctp_transport_set_owner(peer, asoc); 623 624 /* Initialize the peer's heartbeat interval based on the 625 * association configured value. 626 */ 627 peer->hbinterval = asoc->hbinterval; 628 629 peer->encap_port = asoc->encap_port; 630 631 /* Set the path max_retrans. */ 632 peer->pathmaxrxt = asoc->pathmaxrxt; 633 634 /* And the partial failure retrans threshold */ 635 peer->pf_retrans = asoc->pf_retrans; 636 /* And the primary path switchover retrans threshold */ 637 peer->ps_retrans = asoc->ps_retrans; 638 639 /* Initialize the peer's SACK delay timeout based on the 640 * association configured value. 641 */ 642 peer->sackdelay = asoc->sackdelay; 643 peer->sackfreq = asoc->sackfreq; 644 645 if (addr->sa.sa_family == AF_INET6) { 646 __be32 info = addr->v6.sin6_flowinfo; 647 648 if (info) { 649 peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK); 650 peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK; 651 } else { 652 peer->flowlabel = asoc->flowlabel; 653 } 654 } 655 peer->dscp = asoc->dscp; 656 657 /* Enable/disable heartbeat, SACK delay, and path MTU discovery 658 * based on association setting. 659 */ 660 peer->param_flags = asoc->param_flags; 661 662 /* Initialize the pmtu of the transport. */ 663 sctp_transport_route(peer, NULL, sp); 664 665 /* If this is the first transport addr on this association, 666 * initialize the association PMTU to the peer's PMTU. 667 * If not and the current association PMTU is higher than the new 668 * peer's PMTU, reset the association PMTU to the new peer's PMTU. 669 */ 670 sctp_assoc_set_pmtu(asoc, asoc->pathmtu ? 671 min_t(int, peer->pathmtu, asoc->pathmtu) : 672 peer->pathmtu); 673 674 peer->pmtu_pending = 0; 675 676 /* The asoc->peer.port might not be meaningful yet, but 677 * initialize the packet structure anyway. 678 */ 679 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, 680 asoc->peer.port); 681 682 /* 7.2.1 Slow-Start 683 * 684 * o The initial cwnd before DATA transmission or after a sufficiently 685 * long idle period MUST be set to 686 * min(4*MTU, max(2*MTU, 4380 bytes)) 687 * 688 * o The initial value of ssthresh MAY be arbitrarily high 689 * (for example, implementations MAY use the size of the 690 * receiver advertised window). 691 */ 692 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 693 694 /* At this point, we may not have the receiver's advertised window, 695 * so initialize ssthresh to the default value and it will be set 696 * later when we process the INIT. 697 */ 698 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; 699 700 peer->partial_bytes_acked = 0; 701 peer->flight_size = 0; 702 peer->burst_limited = 0; 703 704 /* Set the transport's RTO.initial value */ 705 peer->rto = asoc->rto_initial; 706 sctp_max_rto(asoc, peer); 707 708 /* Set the peer's active state. */ 709 peer->state = peer_state; 710 711 /* Add this peer into the transport hashtable */ 712 if (sctp_hash_transport(peer)) { 713 sctp_transport_free(peer); 714 return NULL; 715 } 716 717 /* Attach the remote transport to our asoc. */ 718 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list); 719 asoc->peer.transport_count++; 720 721 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_ADDED, 0); 722 723 /* If we do not yet have a primary path, set one. */ 724 if (!asoc->peer.primary_path) { 725 sctp_assoc_set_primary(asoc, peer); 726 asoc->peer.retran_path = peer; 727 } 728 729 if (asoc->peer.active_path == asoc->peer.retran_path && 730 peer->state != SCTP_UNCONFIRMED) { 731 asoc->peer.retran_path = peer; 732 } 733 734 return peer; 735 } 736 737 /* Delete a transport address from an association. */ 738 void sctp_assoc_del_peer(struct sctp_association *asoc, 739 const union sctp_addr *addr) 740 { 741 struct list_head *pos; 742 struct list_head *temp; 743 struct sctp_transport *transport; 744 745 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 746 transport = list_entry(pos, struct sctp_transport, transports); 747 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { 748 /* Do book keeping for removing the peer and free it. */ 749 sctp_assoc_rm_peer(asoc, transport); 750 break; 751 } 752 } 753 } 754 755 /* Lookup a transport by address. */ 756 struct sctp_transport *sctp_assoc_lookup_paddr( 757 const struct sctp_association *asoc, 758 const union sctp_addr *address) 759 { 760 struct sctp_transport *t; 761 762 /* Cycle through all transports searching for a peer address. */ 763 764 list_for_each_entry(t, &asoc->peer.transport_addr_list, 765 transports) { 766 if (sctp_cmp_addr_exact(address, &t->ipaddr)) 767 return t; 768 } 769 770 return NULL; 771 } 772 773 /* Remove all transports except a give one */ 774 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, 775 struct sctp_transport *primary) 776 { 777 struct sctp_transport *temp; 778 struct sctp_transport *t; 779 780 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list, 781 transports) { 782 /* if the current transport is not the primary one, delete it */ 783 if (t != primary) 784 sctp_assoc_rm_peer(asoc, t); 785 } 786 } 787 788 /* Engage in transport control operations. 789 * Mark the transport up or down and send a notification to the user. 790 * Select and update the new active and retran paths. 791 */ 792 void sctp_assoc_control_transport(struct sctp_association *asoc, 793 struct sctp_transport *transport, 794 enum sctp_transport_cmd command, 795 sctp_sn_error_t error) 796 { 797 int spc_state = SCTP_ADDR_AVAILABLE; 798 bool ulp_notify = true; 799 800 /* Record the transition on the transport. */ 801 switch (command) { 802 case SCTP_TRANSPORT_UP: 803 /* If we are moving from UNCONFIRMED state due 804 * to heartbeat success, report the SCTP_ADDR_CONFIRMED 805 * state to the user, otherwise report SCTP_ADDR_AVAILABLE. 806 */ 807 if (transport->state == SCTP_PF && 808 asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE) 809 ulp_notify = false; 810 else if (transport->state == SCTP_UNCONFIRMED && 811 error == SCTP_HEARTBEAT_SUCCESS) 812 spc_state = SCTP_ADDR_CONFIRMED; 813 814 transport->state = SCTP_ACTIVE; 815 break; 816 817 case SCTP_TRANSPORT_DOWN: 818 /* If the transport was never confirmed, do not transition it 819 * to inactive state. Also, release the cached route since 820 * there may be a better route next time. 821 */ 822 if (transport->state != SCTP_UNCONFIRMED) { 823 transport->state = SCTP_INACTIVE; 824 spc_state = SCTP_ADDR_UNREACHABLE; 825 } else { 826 sctp_transport_dst_release(transport); 827 ulp_notify = false; 828 } 829 break; 830 831 case SCTP_TRANSPORT_PF: 832 transport->state = SCTP_PF; 833 if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE) 834 ulp_notify = false; 835 else 836 spc_state = SCTP_ADDR_POTENTIALLY_FAILED; 837 break; 838 839 default: 840 return; 841 } 842 843 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification 844 * to the user. 845 */ 846 if (ulp_notify) 847 sctp_ulpevent_notify_peer_addr_change(transport, 848 spc_state, error); 849 850 /* Select new active and retran paths. */ 851 sctp_select_active_and_retran_path(asoc); 852 } 853 854 /* Hold a reference to an association. */ 855 void sctp_association_hold(struct sctp_association *asoc) 856 { 857 refcount_inc(&asoc->base.refcnt); 858 } 859 860 /* Release a reference to an association and cleanup 861 * if there are no more references. 862 */ 863 void sctp_association_put(struct sctp_association *asoc) 864 { 865 if (refcount_dec_and_test(&asoc->base.refcnt)) 866 sctp_association_destroy(asoc); 867 } 868 869 /* Allocate the next TSN, Transmission Sequence Number, for the given 870 * association. 871 */ 872 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) 873 { 874 /* From Section 1.6 Serial Number Arithmetic: 875 * Transmission Sequence Numbers wrap around when they reach 876 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use 877 * after transmitting TSN = 2*32 - 1 is TSN = 0. 878 */ 879 __u32 retval = asoc->next_tsn; 880 asoc->next_tsn++; 881 asoc->unack_data++; 882 883 return retval; 884 } 885 886 /* Compare two addresses to see if they match. Wildcard addresses 887 * only match themselves. 888 */ 889 int sctp_cmp_addr_exact(const union sctp_addr *ss1, 890 const union sctp_addr *ss2) 891 { 892 struct sctp_af *af; 893 894 af = sctp_get_af_specific(ss1->sa.sa_family); 895 if (unlikely(!af)) 896 return 0; 897 898 return af->cmp_addr(ss1, ss2); 899 } 900 901 /* Return an ecne chunk to get prepended to a packet. 902 * Note: We are sly and return a shared, prealloced chunk. FIXME: 903 * No we don't, but we could/should. 904 */ 905 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) 906 { 907 if (!asoc->need_ecne) 908 return NULL; 909 910 /* Send ECNE if needed. 911 * Not being able to allocate a chunk here is not deadly. 912 */ 913 return sctp_make_ecne(asoc, asoc->last_ecne_tsn); 914 } 915 916 /* 917 * Find which transport this TSN was sent on. 918 */ 919 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, 920 __u32 tsn) 921 { 922 struct sctp_transport *active; 923 struct sctp_transport *match; 924 struct sctp_transport *transport; 925 struct sctp_chunk *chunk; 926 __be32 key = htonl(tsn); 927 928 match = NULL; 929 930 /* 931 * FIXME: In general, find a more efficient data structure for 932 * searching. 933 */ 934 935 /* 936 * The general strategy is to search each transport's transmitted 937 * list. Return which transport this TSN lives on. 938 * 939 * Let's be hopeful and check the active_path first. 940 * Another optimization would be to know if there is only one 941 * outbound path and not have to look for the TSN at all. 942 * 943 */ 944 945 active = asoc->peer.active_path; 946 947 list_for_each_entry(chunk, &active->transmitted, 948 transmitted_list) { 949 950 if (key == chunk->subh.data_hdr->tsn) { 951 match = active; 952 goto out; 953 } 954 } 955 956 /* If not found, go search all the other transports. */ 957 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 958 transports) { 959 960 if (transport == active) 961 continue; 962 list_for_each_entry(chunk, &transport->transmitted, 963 transmitted_list) { 964 if (key == chunk->subh.data_hdr->tsn) { 965 match = transport; 966 goto out; 967 } 968 } 969 } 970 out: 971 return match; 972 } 973 974 /* Do delayed input processing. This is scheduled by sctp_rcv(). */ 975 static void sctp_assoc_bh_rcv(struct work_struct *work) 976 { 977 struct sctp_association *asoc = 978 container_of(work, struct sctp_association, 979 base.inqueue.immediate); 980 struct net *net = asoc->base.net; 981 union sctp_subtype subtype; 982 struct sctp_endpoint *ep; 983 struct sctp_chunk *chunk; 984 struct sctp_inq *inqueue; 985 int first_time = 1; /* is this the first time through the loop */ 986 int error = 0; 987 int state; 988 989 /* The association should be held so we should be safe. */ 990 ep = asoc->ep; 991 992 inqueue = &asoc->base.inqueue; 993 sctp_association_hold(asoc); 994 while (NULL != (chunk = sctp_inq_pop(inqueue))) { 995 state = asoc->state; 996 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); 997 998 /* If the first chunk in the packet is AUTH, do special 999 * processing specified in Section 6.3 of SCTP-AUTH spec 1000 */ 1001 if (first_time && subtype.chunk == SCTP_CID_AUTH) { 1002 struct sctp_chunkhdr *next_hdr; 1003 1004 next_hdr = sctp_inq_peek(inqueue); 1005 if (!next_hdr) 1006 goto normal; 1007 1008 /* If the next chunk is COOKIE-ECHO, skip the AUTH 1009 * chunk while saving a pointer to it so we can do 1010 * Authentication later (during cookie-echo 1011 * processing). 1012 */ 1013 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { 1014 chunk->auth_chunk = skb_clone(chunk->skb, 1015 GFP_ATOMIC); 1016 chunk->auth = 1; 1017 continue; 1018 } 1019 } 1020 1021 normal: 1022 /* SCTP-AUTH, Section 6.3: 1023 * The receiver has a list of chunk types which it expects 1024 * to be received only after an AUTH-chunk. This list has 1025 * been sent to the peer during the association setup. It 1026 * MUST silently discard these chunks if they are not placed 1027 * after an AUTH chunk in the packet. 1028 */ 1029 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) 1030 continue; 1031 1032 /* Remember where the last DATA chunk came from so we 1033 * know where to send the SACK. 1034 */ 1035 if (sctp_chunk_is_data(chunk)) 1036 asoc->peer.last_data_from = chunk->transport; 1037 else { 1038 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS); 1039 asoc->stats.ictrlchunks++; 1040 if (chunk->chunk_hdr->type == SCTP_CID_SACK) 1041 asoc->stats.isacks++; 1042 } 1043 1044 if (chunk->transport) 1045 chunk->transport->last_time_heard = ktime_get(); 1046 1047 /* Run through the state machine. */ 1048 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, 1049 state, ep, asoc, chunk, GFP_ATOMIC); 1050 1051 /* Check to see if the association is freed in response to 1052 * the incoming chunk. If so, get out of the while loop. 1053 */ 1054 if (asoc->base.dead) 1055 break; 1056 1057 /* If there is an error on chunk, discard this packet. */ 1058 if (error && chunk) 1059 chunk->pdiscard = 1; 1060 1061 if (first_time) 1062 first_time = 0; 1063 } 1064 sctp_association_put(asoc); 1065 } 1066 1067 /* This routine moves an association from its old sk to a new sk. */ 1068 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) 1069 { 1070 struct sctp_sock *newsp = sctp_sk(newsk); 1071 struct sock *oldsk = assoc->base.sk; 1072 1073 /* Delete the association from the old endpoint's list of 1074 * associations. 1075 */ 1076 list_del_init(&assoc->asocs); 1077 1078 /* Decrement the backlog value for a TCP-style socket. */ 1079 if (sctp_style(oldsk, TCP)) 1080 sk_acceptq_removed(oldsk); 1081 1082 /* Release references to the old endpoint and the sock. */ 1083 sctp_endpoint_put(assoc->ep); 1084 sock_put(assoc->base.sk); 1085 1086 /* Get a reference to the new endpoint. */ 1087 assoc->ep = newsp->ep; 1088 sctp_endpoint_hold(assoc->ep); 1089 1090 /* Get a reference to the new sock. */ 1091 assoc->base.sk = newsk; 1092 sock_hold(assoc->base.sk); 1093 1094 /* Add the association to the new endpoint's list of associations. */ 1095 sctp_endpoint_add_asoc(newsp->ep, assoc); 1096 } 1097 1098 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ 1099 int sctp_assoc_update(struct sctp_association *asoc, 1100 struct sctp_association *new) 1101 { 1102 struct sctp_transport *trans; 1103 struct list_head *pos, *temp; 1104 1105 /* Copy in new parameters of peer. */ 1106 asoc->c = new->c; 1107 asoc->peer.rwnd = new->peer.rwnd; 1108 asoc->peer.sack_needed = new->peer.sack_needed; 1109 asoc->peer.auth_capable = new->peer.auth_capable; 1110 asoc->peer.i = new->peer.i; 1111 1112 if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 1113 asoc->peer.i.initial_tsn, GFP_ATOMIC)) 1114 return -ENOMEM; 1115 1116 /* Remove any peer addresses not present in the new association. */ 1117 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1118 trans = list_entry(pos, struct sctp_transport, transports); 1119 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { 1120 sctp_assoc_rm_peer(asoc, trans); 1121 continue; 1122 } 1123 1124 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1125 sctp_transport_reset(trans); 1126 } 1127 1128 /* If the case is A (association restart), use 1129 * initial_tsn as next_tsn. If the case is B, use 1130 * current next_tsn in case data sent to peer 1131 * has been discarded and needs retransmission. 1132 */ 1133 if (asoc->state >= SCTP_STATE_ESTABLISHED) { 1134 asoc->next_tsn = new->next_tsn; 1135 asoc->ctsn_ack_point = new->ctsn_ack_point; 1136 asoc->adv_peer_ack_point = new->adv_peer_ack_point; 1137 1138 /* Reinitialize SSN for both local streams 1139 * and peer's streams. 1140 */ 1141 sctp_stream_clear(&asoc->stream); 1142 1143 /* Flush the ULP reassembly and ordered queue. 1144 * Any data there will now be stale and will 1145 * cause problems. 1146 */ 1147 sctp_ulpq_flush(&asoc->ulpq); 1148 1149 /* reset the overall association error count so 1150 * that the restarted association doesn't get torn 1151 * down on the next retransmission timer. 1152 */ 1153 asoc->overall_error_count = 0; 1154 1155 } else { 1156 /* Add any peer addresses from the new association. */ 1157 list_for_each_entry(trans, &new->peer.transport_addr_list, 1158 transports) 1159 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) && 1160 !sctp_assoc_add_peer(asoc, &trans->ipaddr, 1161 GFP_ATOMIC, trans->state)) 1162 return -ENOMEM; 1163 1164 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1165 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 1166 1167 if (sctp_state(asoc, COOKIE_WAIT)) 1168 sctp_stream_update(&asoc->stream, &new->stream); 1169 1170 /* get a new assoc id if we don't have one yet. */ 1171 if (sctp_assoc_set_id(asoc, GFP_ATOMIC)) 1172 return -ENOMEM; 1173 } 1174 1175 /* SCTP-AUTH: Save the peer parameters from the new associations 1176 * and also move the association shared keys over 1177 */ 1178 kfree(asoc->peer.peer_random); 1179 asoc->peer.peer_random = new->peer.peer_random; 1180 new->peer.peer_random = NULL; 1181 1182 kfree(asoc->peer.peer_chunks); 1183 asoc->peer.peer_chunks = new->peer.peer_chunks; 1184 new->peer.peer_chunks = NULL; 1185 1186 kfree(asoc->peer.peer_hmacs); 1187 asoc->peer.peer_hmacs = new->peer.peer_hmacs; 1188 new->peer.peer_hmacs = NULL; 1189 1190 return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); 1191 } 1192 1193 /* Update the retran path for sending a retransmitted packet. 1194 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: 1195 * 1196 * When there is outbound data to send and the primary path 1197 * becomes inactive (e.g., due to failures), or where the 1198 * SCTP user explicitly requests to send data to an 1199 * inactive destination transport address, before reporting 1200 * an error to its ULP, the SCTP endpoint should try to send 1201 * the data to an alternate active destination transport 1202 * address if one exists. 1203 * 1204 * When retransmitting data that timed out, if the endpoint 1205 * is multihomed, it should consider each source-destination 1206 * address pair in its retransmission selection policy. 1207 * When retransmitting timed-out data, the endpoint should 1208 * attempt to pick the most divergent source-destination 1209 * pair from the original source-destination pair to which 1210 * the packet was transmitted. 1211 * 1212 * Note: Rules for picking the most divergent source-destination 1213 * pair are an implementation decision and are not specified 1214 * within this document. 1215 * 1216 * Our basic strategy is to round-robin transports in priorities 1217 * according to sctp_trans_score() e.g., if no such 1218 * transport with state SCTP_ACTIVE exists, round-robin through 1219 * SCTP_UNKNOWN, etc. You get the picture. 1220 */ 1221 static u8 sctp_trans_score(const struct sctp_transport *trans) 1222 { 1223 switch (trans->state) { 1224 case SCTP_ACTIVE: 1225 return 3; /* best case */ 1226 case SCTP_UNKNOWN: 1227 return 2; 1228 case SCTP_PF: 1229 return 1; 1230 default: /* case SCTP_INACTIVE */ 1231 return 0; /* worst case */ 1232 } 1233 } 1234 1235 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, 1236 struct sctp_transport *trans2) 1237 { 1238 if (trans1->error_count > trans2->error_count) { 1239 return trans2; 1240 } else if (trans1->error_count == trans2->error_count && 1241 ktime_after(trans2->last_time_heard, 1242 trans1->last_time_heard)) { 1243 return trans2; 1244 } else { 1245 return trans1; 1246 } 1247 } 1248 1249 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, 1250 struct sctp_transport *best) 1251 { 1252 u8 score_curr, score_best; 1253 1254 if (best == NULL || curr == best) 1255 return curr; 1256 1257 score_curr = sctp_trans_score(curr); 1258 score_best = sctp_trans_score(best); 1259 1260 /* First, try a score-based selection if both transport states 1261 * differ. If we're in a tie, lets try to make a more clever 1262 * decision here based on error counts and last time heard. 1263 */ 1264 if (score_curr > score_best) 1265 return curr; 1266 else if (score_curr == score_best) 1267 return sctp_trans_elect_tie(best, curr); 1268 else 1269 return best; 1270 } 1271 1272 void sctp_assoc_update_retran_path(struct sctp_association *asoc) 1273 { 1274 struct sctp_transport *trans = asoc->peer.retran_path; 1275 struct sctp_transport *trans_next = NULL; 1276 1277 /* We're done as we only have the one and only path. */ 1278 if (asoc->peer.transport_count == 1) 1279 return; 1280 /* If active_path and retran_path are the same and active, 1281 * then this is the only active path. Use it. 1282 */ 1283 if (asoc->peer.active_path == asoc->peer.retran_path && 1284 asoc->peer.active_path->state == SCTP_ACTIVE) 1285 return; 1286 1287 /* Iterate from retran_path's successor back to retran_path. */ 1288 for (trans = list_next_entry(trans, transports); 1; 1289 trans = list_next_entry(trans, transports)) { 1290 /* Manually skip the head element. */ 1291 if (&trans->transports == &asoc->peer.transport_addr_list) 1292 continue; 1293 if (trans->state == SCTP_UNCONFIRMED) 1294 continue; 1295 trans_next = sctp_trans_elect_best(trans, trans_next); 1296 /* Active is good enough for immediate return. */ 1297 if (trans_next->state == SCTP_ACTIVE) 1298 break; 1299 /* We've reached the end, time to update path. */ 1300 if (trans == asoc->peer.retran_path) 1301 break; 1302 } 1303 1304 asoc->peer.retran_path = trans_next; 1305 1306 pr_debug("%s: association:%p updated new path to addr:%pISpc\n", 1307 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); 1308 } 1309 1310 static void sctp_select_active_and_retran_path(struct sctp_association *asoc) 1311 { 1312 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL; 1313 struct sctp_transport *trans_pf = NULL; 1314 1315 /* Look for the two most recently used active transports. */ 1316 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 1317 transports) { 1318 /* Skip uninteresting transports. */ 1319 if (trans->state == SCTP_INACTIVE || 1320 trans->state == SCTP_UNCONFIRMED) 1321 continue; 1322 /* Keep track of the best PF transport from our 1323 * list in case we don't find an active one. 1324 */ 1325 if (trans->state == SCTP_PF) { 1326 trans_pf = sctp_trans_elect_best(trans, trans_pf); 1327 continue; 1328 } 1329 /* For active transports, pick the most recent ones. */ 1330 if (trans_pri == NULL || 1331 ktime_after(trans->last_time_heard, 1332 trans_pri->last_time_heard)) { 1333 trans_sec = trans_pri; 1334 trans_pri = trans; 1335 } else if (trans_sec == NULL || 1336 ktime_after(trans->last_time_heard, 1337 trans_sec->last_time_heard)) { 1338 trans_sec = trans; 1339 } 1340 } 1341 1342 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints 1343 * 1344 * By default, an endpoint should always transmit to the primary 1345 * path, unless the SCTP user explicitly specifies the 1346 * destination transport address (and possibly source transport 1347 * address) to use. [If the primary is active but not most recent, 1348 * bump the most recently used transport.] 1349 */ 1350 if ((asoc->peer.primary_path->state == SCTP_ACTIVE || 1351 asoc->peer.primary_path->state == SCTP_UNKNOWN) && 1352 asoc->peer.primary_path != trans_pri) { 1353 trans_sec = trans_pri; 1354 trans_pri = asoc->peer.primary_path; 1355 } 1356 1357 /* We did not find anything useful for a possible retransmission 1358 * path; either primary path that we found is the same as 1359 * the current one, or we didn't generally find an active one. 1360 */ 1361 if (trans_sec == NULL) 1362 trans_sec = trans_pri; 1363 1364 /* If we failed to find a usable transport, just camp on the 1365 * active or pick a PF iff it's the better choice. 1366 */ 1367 if (trans_pri == NULL) { 1368 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf); 1369 trans_sec = trans_pri; 1370 } 1371 1372 /* Set the active and retran transports. */ 1373 asoc->peer.active_path = trans_pri; 1374 asoc->peer.retran_path = trans_sec; 1375 } 1376 1377 struct sctp_transport * 1378 sctp_assoc_choose_alter_transport(struct sctp_association *asoc, 1379 struct sctp_transport *last_sent_to) 1380 { 1381 /* If this is the first time packet is sent, use the active path, 1382 * else use the retran path. If the last packet was sent over the 1383 * retran path, update the retran path and use it. 1384 */ 1385 if (last_sent_to == NULL) { 1386 return asoc->peer.active_path; 1387 } else { 1388 if (last_sent_to == asoc->peer.retran_path) 1389 sctp_assoc_update_retran_path(asoc); 1390 1391 return asoc->peer.retran_path; 1392 } 1393 } 1394 1395 void sctp_assoc_update_frag_point(struct sctp_association *asoc) 1396 { 1397 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu, 1398 sctp_datachk_len(&asoc->stream)); 1399 1400 if (asoc->user_frag) 1401 frag = min_t(int, frag, asoc->user_frag); 1402 1403 frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN - 1404 sctp_datachk_len(&asoc->stream)); 1405 1406 asoc->frag_point = SCTP_TRUNC4(frag); 1407 } 1408 1409 void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu) 1410 { 1411 if (asoc->pathmtu != pmtu) { 1412 asoc->pathmtu = pmtu; 1413 sctp_assoc_update_frag_point(asoc); 1414 } 1415 1416 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, 1417 asoc->pathmtu, asoc->frag_point); 1418 } 1419 1420 /* Update the association's pmtu and frag_point by going through all the 1421 * transports. This routine is called when a transport's PMTU has changed. 1422 */ 1423 void sctp_assoc_sync_pmtu(struct sctp_association *asoc) 1424 { 1425 struct sctp_transport *t; 1426 __u32 pmtu = 0; 1427 1428 if (!asoc) 1429 return; 1430 1431 /* Get the lowest pmtu of all the transports. */ 1432 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 1433 if (t->pmtu_pending && t->dst) { 1434 sctp_transport_update_pmtu(t, 1435 atomic_read(&t->mtu_info)); 1436 t->pmtu_pending = 0; 1437 } 1438 if (!pmtu || (t->pathmtu < pmtu)) 1439 pmtu = t->pathmtu; 1440 } 1441 1442 sctp_assoc_set_pmtu(asoc, pmtu); 1443 } 1444 1445 /* Should we send a SACK to update our peer? */ 1446 static inline bool sctp_peer_needs_update(struct sctp_association *asoc) 1447 { 1448 struct net *net = asoc->base.net; 1449 1450 switch (asoc->state) { 1451 case SCTP_STATE_ESTABLISHED: 1452 case SCTP_STATE_SHUTDOWN_PENDING: 1453 case SCTP_STATE_SHUTDOWN_RECEIVED: 1454 case SCTP_STATE_SHUTDOWN_SENT: 1455 if ((asoc->rwnd > asoc->a_rwnd) && 1456 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, 1457 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), 1458 asoc->pathmtu))) 1459 return true; 1460 break; 1461 default: 1462 break; 1463 } 1464 return false; 1465 } 1466 1467 /* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1468 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) 1469 { 1470 struct sctp_chunk *sack; 1471 struct timer_list *timer; 1472 1473 if (asoc->rwnd_over) { 1474 if (asoc->rwnd_over >= len) { 1475 asoc->rwnd_over -= len; 1476 } else { 1477 asoc->rwnd += (len - asoc->rwnd_over); 1478 asoc->rwnd_over = 0; 1479 } 1480 } else { 1481 asoc->rwnd += len; 1482 } 1483 1484 /* If we had window pressure, start recovering it 1485 * once our rwnd had reached the accumulated pressure 1486 * threshold. The idea is to recover slowly, but up 1487 * to the initial advertised window. 1488 */ 1489 if (asoc->rwnd_press) { 1490 int change = min(asoc->pathmtu, asoc->rwnd_press); 1491 asoc->rwnd += change; 1492 asoc->rwnd_press -= change; 1493 } 1494 1495 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", 1496 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1497 asoc->a_rwnd); 1498 1499 /* Send a window update SACK if the rwnd has increased by at least the 1500 * minimum of the association's PMTU and half of the receive buffer. 1501 * The algorithm used is similar to the one described in 1502 * Section 4.2.3.3 of RFC 1122. 1503 */ 1504 if (sctp_peer_needs_update(asoc)) { 1505 asoc->a_rwnd = asoc->rwnd; 1506 1507 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " 1508 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd, 1509 asoc->a_rwnd); 1510 1511 sack = sctp_make_sack(asoc); 1512 if (!sack) 1513 return; 1514 1515 asoc->peer.sack_needed = 0; 1516 1517 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC); 1518 1519 /* Stop the SACK timer. */ 1520 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 1521 if (del_timer(timer)) 1522 sctp_association_put(asoc); 1523 } 1524 } 1525 1526 /* Decrease asoc's rwnd by len. */ 1527 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) 1528 { 1529 int rx_count; 1530 int over = 0; 1531 1532 if (unlikely(!asoc->rwnd || asoc->rwnd_over)) 1533 pr_debug("%s: association:%p has asoc->rwnd:%u, " 1534 "asoc->rwnd_over:%u!\n", __func__, asoc, 1535 asoc->rwnd, asoc->rwnd_over); 1536 1537 if (asoc->ep->rcvbuf_policy) 1538 rx_count = atomic_read(&asoc->rmem_alloc); 1539 else 1540 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1541 1542 /* If we've reached or overflowed our receive buffer, announce 1543 * a 0 rwnd if rwnd would still be positive. Store the 1544 * potential pressure overflow so that the window can be restored 1545 * back to original value. 1546 */ 1547 if (rx_count >= asoc->base.sk->sk_rcvbuf) 1548 over = 1; 1549 1550 if (asoc->rwnd >= len) { 1551 asoc->rwnd -= len; 1552 if (over) { 1553 asoc->rwnd_press += asoc->rwnd; 1554 asoc->rwnd = 0; 1555 } 1556 } else { 1557 asoc->rwnd_over += len - asoc->rwnd; 1558 asoc->rwnd = 0; 1559 } 1560 1561 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", 1562 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1563 asoc->rwnd_press); 1564 } 1565 1566 /* Build the bind address list for the association based on info from the 1567 * local endpoint and the remote peer. 1568 */ 1569 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1570 enum sctp_scope scope, gfp_t gfp) 1571 { 1572 struct sock *sk = asoc->base.sk; 1573 int flags; 1574 1575 /* Use scoping rules to determine the subset of addresses from 1576 * the endpoint. 1577 */ 1578 flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; 1579 if (!inet_v6_ipv6only(sk)) 1580 flags |= SCTP_ADDR4_ALLOWED; 1581 if (asoc->peer.ipv4_address) 1582 flags |= SCTP_ADDR4_PEERSUPP; 1583 if (asoc->peer.ipv6_address) 1584 flags |= SCTP_ADDR6_PEERSUPP; 1585 1586 return sctp_bind_addr_copy(asoc->base.net, 1587 &asoc->base.bind_addr, 1588 &asoc->ep->base.bind_addr, 1589 scope, gfp, flags); 1590 } 1591 1592 /* Build the association's bind address list from the cookie. */ 1593 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, 1594 struct sctp_cookie *cookie, 1595 gfp_t gfp) 1596 { 1597 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); 1598 int var_size3 = cookie->raw_addr_list_len; 1599 __u8 *raw = (__u8 *)cookie->peer_init + var_size2; 1600 1601 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, 1602 asoc->ep->base.bind_addr.port, gfp); 1603 } 1604 1605 /* Lookup laddr in the bind address list of an association. */ 1606 int sctp_assoc_lookup_laddr(struct sctp_association *asoc, 1607 const union sctp_addr *laddr) 1608 { 1609 int found = 0; 1610 1611 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && 1612 sctp_bind_addr_match(&asoc->base.bind_addr, laddr, 1613 sctp_sk(asoc->base.sk))) 1614 found = 1; 1615 1616 return found; 1617 } 1618 1619 /* Set an association id for a given association */ 1620 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) 1621 { 1622 bool preload = gfpflags_allow_blocking(gfp); 1623 int ret; 1624 1625 /* If the id is already assigned, keep it. */ 1626 if (asoc->assoc_id) 1627 return 0; 1628 1629 if (preload) 1630 idr_preload(gfp); 1631 spin_lock_bh(&sctp_assocs_id_lock); 1632 /* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and 1633 * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC. 1634 */ 1635 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0, 1636 GFP_NOWAIT); 1637 spin_unlock_bh(&sctp_assocs_id_lock); 1638 if (preload) 1639 idr_preload_end(); 1640 if (ret < 0) 1641 return ret; 1642 1643 asoc->assoc_id = (sctp_assoc_t)ret; 1644 return 0; 1645 } 1646 1647 /* Free the ASCONF queue */ 1648 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) 1649 { 1650 struct sctp_chunk *asconf; 1651 struct sctp_chunk *tmp; 1652 1653 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { 1654 list_del_init(&asconf->list); 1655 sctp_chunk_free(asconf); 1656 } 1657 } 1658 1659 /* Free asconf_ack cache */ 1660 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) 1661 { 1662 struct sctp_chunk *ack; 1663 struct sctp_chunk *tmp; 1664 1665 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1666 transmitted_list) { 1667 list_del_init(&ack->transmitted_list); 1668 sctp_chunk_free(ack); 1669 } 1670 } 1671 1672 /* Clean up the ASCONF_ACK queue */ 1673 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) 1674 { 1675 struct sctp_chunk *ack; 1676 struct sctp_chunk *tmp; 1677 1678 /* We can remove all the entries from the queue up to 1679 * the "Peer-Sequence-Number". 1680 */ 1681 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1682 transmitted_list) { 1683 if (ack->subh.addip_hdr->serial == 1684 htonl(asoc->peer.addip_serial)) 1685 break; 1686 1687 list_del_init(&ack->transmitted_list); 1688 sctp_chunk_free(ack); 1689 } 1690 } 1691 1692 /* Find the ASCONF_ACK whose serial number matches ASCONF */ 1693 struct sctp_chunk *sctp_assoc_lookup_asconf_ack( 1694 const struct sctp_association *asoc, 1695 __be32 serial) 1696 { 1697 struct sctp_chunk *ack; 1698 1699 /* Walk through the list of cached ASCONF-ACKs and find the 1700 * ack chunk whose serial number matches that of the request. 1701 */ 1702 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { 1703 if (sctp_chunk_pending(ack)) 1704 continue; 1705 if (ack->subh.addip_hdr->serial == serial) { 1706 sctp_chunk_hold(ack); 1707 return ack; 1708 } 1709 } 1710 1711 return NULL; 1712 } 1713 1714 void sctp_asconf_queue_teardown(struct sctp_association *asoc) 1715 { 1716 /* Free any cached ASCONF_ACK chunk. */ 1717 sctp_assoc_free_asconf_acks(asoc); 1718 1719 /* Free the ASCONF queue. */ 1720 sctp_assoc_free_asconf_queue(asoc); 1721 1722 /* Free any cached ASCONF chunk. */ 1723 if (asoc->addip_last_asconf) 1724 sctp_chunk_free(asoc->addip_last_asconf); 1725 } 1726