1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001 Intel Corp. 6 * Copyright (c) 2001 La Monte H.P. Yarroll 7 * 8 * This file is part of the SCTP kernel implementation 9 * 10 * This module provides the abstraction for an SCTP association. 11 * 12 * This SCTP implementation is free software; 13 * you can redistribute it and/or modify it under the terms of 14 * the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * This SCTP implementation is distributed in the hope that it 19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 20 * ************************ 21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 22 * See the GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with GNU CC; see the file COPYING. If not, write to 26 * the Free Software Foundation, 59 Temple Place - Suite 330, 27 * Boston, MA 02111-1307, USA. 28 * 29 * Please send any bug reports or fixes you make to the 30 * email address(es): 31 * lksctp developers <lksctp-developers@lists.sourceforge.net> 32 * 33 * Or submit a bug report through the following website: 34 * http://www.sf.net/projects/lksctp 35 * 36 * Written or modified by: 37 * La Monte H.P. Yarroll <piggy@acm.org> 38 * Karl Knutson <karl@athena.chicago.il.us> 39 * Jon Grimm <jgrimm@us.ibm.com> 40 * Xingang Guo <xingang.guo@intel.com> 41 * Hui Huang <hui.huang@nokia.com> 42 * Sridhar Samudrala <sri@us.ibm.com> 43 * Daisy Chang <daisyc@us.ibm.com> 44 * Ryan Layer <rmlayer@us.ibm.com> 45 * Kevin Gao <kevin.gao@intel.com> 46 * 47 * Any bugs reported given to us we will try to fix... any fixes shared will 48 * be incorporated into the next SCTP release. 49 */ 50 51 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 52 53 #include <linux/types.h> 54 #include <linux/fcntl.h> 55 #include <linux/poll.h> 56 #include <linux/init.h> 57 58 #include <linux/slab.h> 59 #include <linux/in.h> 60 #include <net/ipv6.h> 61 #include <net/sctp/sctp.h> 62 #include <net/sctp/sm.h> 63 64 /* Forward declarations for internal functions. */ 65 static void sctp_assoc_bh_rcv(struct work_struct *work); 66 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 67 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); 68 69 /* Keep track of the new idr low so that we don't re-use association id 70 * numbers too fast. It is protected by they idr spin lock is in the 71 * range of 1 - INT_MAX. 72 */ 73 static u32 idr_low = 1; 74 75 76 /* 1st Level Abstractions. */ 77 78 /* Initialize a new association from provided memory. */ 79 static struct sctp_association *sctp_association_init(struct sctp_association *asoc, 80 const struct sctp_endpoint *ep, 81 const struct sock *sk, 82 sctp_scope_t scope, 83 gfp_t gfp) 84 { 85 struct net *net = sock_net(sk); 86 struct sctp_sock *sp; 87 int i; 88 sctp_paramhdr_t *p; 89 int err; 90 91 /* Retrieve the SCTP per socket area. */ 92 sp = sctp_sk((struct sock *)sk); 93 94 /* Discarding const is appropriate here. */ 95 asoc->ep = (struct sctp_endpoint *)ep; 96 sctp_endpoint_hold(asoc->ep); 97 98 /* Hold the sock. */ 99 asoc->base.sk = (struct sock *)sk; 100 sock_hold(asoc->base.sk); 101 102 /* Initialize the common base substructure. */ 103 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; 104 105 /* Initialize the object handling fields. */ 106 atomic_set(&asoc->base.refcnt, 1); 107 asoc->base.dead = 0; 108 asoc->base.malloced = 0; 109 110 /* Initialize the bind addr area. */ 111 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); 112 113 asoc->state = SCTP_STATE_CLOSED; 114 115 /* Set these values from the socket values, a conversion between 116 * millsecons to seconds/microseconds must also be done. 117 */ 118 asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000; 119 asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) 120 * 1000; 121 asoc->frag_point = 0; 122 asoc->user_frag = sp->user_frag; 123 124 /* Set the association max_retrans and RTO values from the 125 * socket values. 126 */ 127 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; 128 asoc->pf_retrans = net->sctp.pf_retrans; 129 130 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); 131 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); 132 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); 133 134 asoc->overall_error_count = 0; 135 136 /* Initialize the association's heartbeat interval based on the 137 * sock configured value. 138 */ 139 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); 140 141 /* Initialize path max retrans value. */ 142 asoc->pathmaxrxt = sp->pathmaxrxt; 143 144 /* Initialize default path MTU. */ 145 asoc->pathmtu = sp->pathmtu; 146 147 /* Set association default SACK delay */ 148 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); 149 asoc->sackfreq = sp->sackfreq; 150 151 /* Set the association default flags controlling 152 * Heartbeat, SACK delay, and Path MTU Discovery. 153 */ 154 asoc->param_flags = sp->param_flags; 155 156 /* Initialize the maximum mumber of new data packets that can be sent 157 * in a burst. 158 */ 159 asoc->max_burst = sp->max_burst; 160 161 /* initialize association timers */ 162 asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; 163 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; 164 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; 165 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; 166 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; 167 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0; 168 169 /* sctpimpguide Section 2.12.2 170 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the 171 * recommended value of 5 times 'RTO.Max'. 172 */ 173 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] 174 = 5 * asoc->rto_max; 175 176 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 177 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 178 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 179 min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ; 180 181 /* Initializes the timers */ 182 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 183 setup_timer(&asoc->timers[i], sctp_timer_events[i], 184 (unsigned long)asoc); 185 186 /* Pull default initialization values from the sock options. 187 * Note: This assumes that the values have already been 188 * validated in the sock. 189 */ 190 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; 191 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; 192 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; 193 194 asoc->max_init_timeo = 195 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); 196 197 /* Allocate storage for the ssnmap after the inbound and outbound 198 * streams have been negotiated during Init. 199 */ 200 asoc->ssnmap = NULL; 201 202 /* Set the local window size for receive. 203 * This is also the rcvbuf space per association. 204 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of 205 * 1500 bytes in one SCTP packet. 206 */ 207 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) 208 asoc->rwnd = SCTP_DEFAULT_MINWINDOW; 209 else 210 asoc->rwnd = sk->sk_rcvbuf/2; 211 212 asoc->a_rwnd = asoc->rwnd; 213 214 asoc->rwnd_over = 0; 215 asoc->rwnd_press = 0; 216 217 /* Use my own max window until I learn something better. */ 218 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; 219 220 /* Set the sndbuf size for transmit. */ 221 asoc->sndbuf_used = 0; 222 223 /* Initialize the receive memory counter */ 224 atomic_set(&asoc->rmem_alloc, 0); 225 226 init_waitqueue_head(&asoc->wait); 227 228 asoc->c.my_vtag = sctp_generate_tag(ep); 229 asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */ 230 asoc->c.peer_vtag = 0; 231 asoc->c.my_ttag = 0; 232 asoc->c.peer_ttag = 0; 233 asoc->c.my_port = ep->base.bind_addr.port; 234 235 asoc->c.initial_tsn = sctp_generate_tsn(ep); 236 237 asoc->next_tsn = asoc->c.initial_tsn; 238 239 asoc->ctsn_ack_point = asoc->next_tsn - 1; 240 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 241 asoc->highest_sacked = asoc->ctsn_ack_point; 242 asoc->last_cwr_tsn = asoc->ctsn_ack_point; 243 asoc->unack_data = 0; 244 245 /* ADDIP Section 4.1 Asconf Chunk Procedures 246 * 247 * When an endpoint has an ASCONF signaled change to be sent to the 248 * remote endpoint it should do the following: 249 * ... 250 * A2) a serial number should be assigned to the chunk. The serial 251 * number SHOULD be a monotonically increasing number. The serial 252 * numbers SHOULD be initialized at the start of the 253 * association to the same value as the initial TSN. 254 */ 255 asoc->addip_serial = asoc->c.initial_tsn; 256 257 INIT_LIST_HEAD(&asoc->addip_chunk_list); 258 INIT_LIST_HEAD(&asoc->asconf_ack_list); 259 260 /* Make an empty list of remote transport addresses. */ 261 INIT_LIST_HEAD(&asoc->peer.transport_addr_list); 262 asoc->peer.transport_count = 0; 263 264 /* RFC 2960 5.1 Normal Establishment of an Association 265 * 266 * After the reception of the first data chunk in an 267 * association the endpoint must immediately respond with a 268 * sack to acknowledge the data chunk. Subsequent 269 * acknowledgements should be done as described in Section 270 * 6.2. 271 * 272 * [We implement this by telling a new association that it 273 * already received one packet.] 274 */ 275 asoc->peer.sack_needed = 1; 276 asoc->peer.sack_cnt = 0; 277 asoc->peer.sack_generation = 1; 278 279 /* Assume that the peer will tell us if he recognizes ASCONF 280 * as part of INIT exchange. 281 * The sctp_addip_noauth option is there for backward compatibilty 282 * and will revert old behavior. 283 */ 284 asoc->peer.asconf_capable = 0; 285 if (net->sctp.addip_noauth) 286 asoc->peer.asconf_capable = 1; 287 asoc->asconf_addr_del_pending = NULL; 288 asoc->src_out_of_asoc_ok = 0; 289 asoc->new_transport = NULL; 290 291 /* Create an input queue. */ 292 sctp_inq_init(&asoc->base.inqueue); 293 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); 294 295 /* Create an output queue. */ 296 sctp_outq_init(asoc, &asoc->outqueue); 297 298 if (!sctp_ulpq_init(&asoc->ulpq, asoc)) 299 goto fail_init; 300 301 memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap)); 302 303 asoc->need_ecne = 0; 304 305 asoc->assoc_id = 0; 306 307 /* Assume that peer would support both address types unless we are 308 * told otherwise. 309 */ 310 asoc->peer.ipv4_address = 1; 311 if (asoc->base.sk->sk_family == PF_INET6) 312 asoc->peer.ipv6_address = 1; 313 INIT_LIST_HEAD(&asoc->asocs); 314 315 asoc->autoclose = sp->autoclose; 316 317 asoc->default_stream = sp->default_stream; 318 asoc->default_ppid = sp->default_ppid; 319 asoc->default_flags = sp->default_flags; 320 asoc->default_context = sp->default_context; 321 asoc->default_timetolive = sp->default_timetolive; 322 asoc->default_rcv_context = sp->default_rcv_context; 323 324 /* SCTP_GET_ASSOC_STATS COUNTERS */ 325 memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats)); 326 327 /* AUTH related initializations */ 328 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 329 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); 330 if (err) 331 goto fail_init; 332 333 asoc->active_key_id = ep->active_key_id; 334 asoc->asoc_shared_key = NULL; 335 336 asoc->default_hmac_id = 0; 337 /* Save the hmacs and chunks list into this association */ 338 if (ep->auth_hmacs_list) 339 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, 340 ntohs(ep->auth_hmacs_list->param_hdr.length)); 341 if (ep->auth_chunk_list) 342 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, 343 ntohs(ep->auth_chunk_list->param_hdr.length)); 344 345 /* Get the AUTH random number for this association */ 346 p = (sctp_paramhdr_t *)asoc->c.auth_random; 347 p->type = SCTP_PARAM_RANDOM; 348 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH); 349 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); 350 351 return asoc; 352 353 fail_init: 354 sctp_endpoint_put(asoc->ep); 355 sock_put(asoc->base.sk); 356 return NULL; 357 } 358 359 /* Allocate and initialize a new association */ 360 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, 361 const struct sock *sk, 362 sctp_scope_t scope, 363 gfp_t gfp) 364 { 365 struct sctp_association *asoc; 366 367 asoc = t_new(struct sctp_association, gfp); 368 if (!asoc) 369 goto fail; 370 371 if (!sctp_association_init(asoc, ep, sk, scope, gfp)) 372 goto fail_init; 373 374 asoc->base.malloced = 1; 375 SCTP_DBG_OBJCNT_INC(assoc); 376 SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc); 377 378 return asoc; 379 380 fail_init: 381 kfree(asoc); 382 fail: 383 return NULL; 384 } 385 386 /* Free this association if possible. There may still be users, so 387 * the actual deallocation may be delayed. 388 */ 389 void sctp_association_free(struct sctp_association *asoc) 390 { 391 struct sock *sk = asoc->base.sk; 392 struct sctp_transport *transport; 393 struct list_head *pos, *temp; 394 int i; 395 396 /* Only real associations count against the endpoint, so 397 * don't bother for if this is a temporary association. 398 */ 399 if (!asoc->temp) { 400 list_del(&asoc->asocs); 401 402 /* Decrement the backlog value for a TCP-style listening 403 * socket. 404 */ 405 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 406 sk->sk_ack_backlog--; 407 } 408 409 /* Mark as dead, so other users can know this structure is 410 * going away. 411 */ 412 asoc->base.dead = 1; 413 414 /* Dispose of any data lying around in the outqueue. */ 415 sctp_outq_free(&asoc->outqueue); 416 417 /* Dispose of any pending messages for the upper layer. */ 418 sctp_ulpq_free(&asoc->ulpq); 419 420 /* Dispose of any pending chunks on the inqueue. */ 421 sctp_inq_free(&asoc->base.inqueue); 422 423 sctp_tsnmap_free(&asoc->peer.tsn_map); 424 425 /* Free ssnmap storage. */ 426 sctp_ssnmap_free(asoc->ssnmap); 427 428 /* Clean up the bound address list. */ 429 sctp_bind_addr_free(&asoc->base.bind_addr); 430 431 /* Do we need to go through all of our timers and 432 * delete them? To be safe we will try to delete all, but we 433 * should be able to go through and make a guess based 434 * on our state. 435 */ 436 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 437 if (del_timer(&asoc->timers[i])) 438 sctp_association_put(asoc); 439 } 440 441 /* Free peer's cached cookie. */ 442 kfree(asoc->peer.cookie); 443 kfree(asoc->peer.peer_random); 444 kfree(asoc->peer.peer_chunks); 445 kfree(asoc->peer.peer_hmacs); 446 447 /* Release the transport structures. */ 448 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 449 transport = list_entry(pos, struct sctp_transport, transports); 450 list_del_rcu(pos); 451 sctp_transport_free(transport); 452 } 453 454 asoc->peer.transport_count = 0; 455 456 sctp_asconf_queue_teardown(asoc); 457 458 /* Free pending address space being deleted */ 459 if (asoc->asconf_addr_del_pending != NULL) 460 kfree(asoc->asconf_addr_del_pending); 461 462 /* AUTH - Free the endpoint shared keys */ 463 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); 464 465 /* AUTH - Free the association shared key */ 466 sctp_auth_key_put(asoc->asoc_shared_key); 467 468 sctp_association_put(asoc); 469 } 470 471 /* Cleanup and free up an association. */ 472 static void sctp_association_destroy(struct sctp_association *asoc) 473 { 474 SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return); 475 476 sctp_endpoint_put(asoc->ep); 477 sock_put(asoc->base.sk); 478 479 if (asoc->assoc_id != 0) { 480 spin_lock_bh(&sctp_assocs_id_lock); 481 idr_remove(&sctp_assocs_id, asoc->assoc_id); 482 spin_unlock_bh(&sctp_assocs_id_lock); 483 } 484 485 WARN_ON(atomic_read(&asoc->rmem_alloc)); 486 487 if (asoc->base.malloced) { 488 kfree(asoc); 489 SCTP_DBG_OBJCNT_DEC(assoc); 490 } 491 } 492 493 /* Change the primary destination address for the peer. */ 494 void sctp_assoc_set_primary(struct sctp_association *asoc, 495 struct sctp_transport *transport) 496 { 497 int changeover = 0; 498 499 /* it's a changeover only if we already have a primary path 500 * that we are changing 501 */ 502 if (asoc->peer.primary_path != NULL && 503 asoc->peer.primary_path != transport) 504 changeover = 1 ; 505 506 asoc->peer.primary_path = transport; 507 508 /* Set a default msg_name for events. */ 509 memcpy(&asoc->peer.primary_addr, &transport->ipaddr, 510 sizeof(union sctp_addr)); 511 512 /* If the primary path is changing, assume that the 513 * user wants to use this new path. 514 */ 515 if ((transport->state == SCTP_ACTIVE) || 516 (transport->state == SCTP_UNKNOWN)) 517 asoc->peer.active_path = transport; 518 519 /* 520 * SFR-CACC algorithm: 521 * Upon the receipt of a request to change the primary 522 * destination address, on the data structure for the new 523 * primary destination, the sender MUST do the following: 524 * 525 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch 526 * to this destination address earlier. The sender MUST set 527 * CYCLING_CHANGEOVER to indicate that this switch is a 528 * double switch to the same destination address. 529 * 530 * Really, only bother is we have data queued or outstanding on 531 * the association. 532 */ 533 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen) 534 return; 535 536 if (transport->cacc.changeover_active) 537 transport->cacc.cycling_changeover = changeover; 538 539 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that 540 * a changeover has occurred. 541 */ 542 transport->cacc.changeover_active = changeover; 543 544 /* 3) The sender MUST store the next TSN to be sent in 545 * next_tsn_at_change. 546 */ 547 transport->cacc.next_tsn_at_change = asoc->next_tsn; 548 } 549 550 /* Remove a transport from an association. */ 551 void sctp_assoc_rm_peer(struct sctp_association *asoc, 552 struct sctp_transport *peer) 553 { 554 struct list_head *pos; 555 struct sctp_transport *transport; 556 557 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ", 558 " port: %d\n", 559 asoc, 560 (&peer->ipaddr), 561 ntohs(peer->ipaddr.v4.sin_port)); 562 563 /* If we are to remove the current retran_path, update it 564 * to the next peer before removing this peer from the list. 565 */ 566 if (asoc->peer.retran_path == peer) 567 sctp_assoc_update_retran_path(asoc); 568 569 /* Remove this peer from the list. */ 570 list_del_rcu(&peer->transports); 571 572 /* Get the first transport of asoc. */ 573 pos = asoc->peer.transport_addr_list.next; 574 transport = list_entry(pos, struct sctp_transport, transports); 575 576 /* Update any entries that match the peer to be deleted. */ 577 if (asoc->peer.primary_path == peer) 578 sctp_assoc_set_primary(asoc, transport); 579 if (asoc->peer.active_path == peer) 580 asoc->peer.active_path = transport; 581 if (asoc->peer.retran_path == peer) 582 asoc->peer.retran_path = transport; 583 if (asoc->peer.last_data_from == peer) 584 asoc->peer.last_data_from = transport; 585 586 /* If we remove the transport an INIT was last sent to, set it to 587 * NULL. Combined with the update of the retran path above, this 588 * will cause the next INIT to be sent to the next available 589 * transport, maintaining the cycle. 590 */ 591 if (asoc->init_last_sent_to == peer) 592 asoc->init_last_sent_to = NULL; 593 594 /* If we remove the transport an SHUTDOWN was last sent to, set it 595 * to NULL. Combined with the update of the retran path above, this 596 * will cause the next SHUTDOWN to be sent to the next available 597 * transport, maintaining the cycle. 598 */ 599 if (asoc->shutdown_last_sent_to == peer) 600 asoc->shutdown_last_sent_to = NULL; 601 602 /* If we remove the transport an ASCONF was last sent to, set it to 603 * NULL. 604 */ 605 if (asoc->addip_last_asconf && 606 asoc->addip_last_asconf->transport == peer) 607 asoc->addip_last_asconf->transport = NULL; 608 609 /* If we have something on the transmitted list, we have to 610 * save it off. The best place is the active path. 611 */ 612 if (!list_empty(&peer->transmitted)) { 613 struct sctp_transport *active = asoc->peer.active_path; 614 struct sctp_chunk *ch; 615 616 /* Reset the transport of each chunk on this list */ 617 list_for_each_entry(ch, &peer->transmitted, 618 transmitted_list) { 619 ch->transport = NULL; 620 ch->rtt_in_progress = 0; 621 } 622 623 list_splice_tail_init(&peer->transmitted, 624 &active->transmitted); 625 626 /* Start a T3 timer here in case it wasn't running so 627 * that these migrated packets have a chance to get 628 * retrnasmitted. 629 */ 630 if (!timer_pending(&active->T3_rtx_timer)) 631 if (!mod_timer(&active->T3_rtx_timer, 632 jiffies + active->rto)) 633 sctp_transport_hold(active); 634 } 635 636 asoc->peer.transport_count--; 637 638 sctp_transport_free(peer); 639 } 640 641 /* Add a transport address to an association. */ 642 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, 643 const union sctp_addr *addr, 644 const gfp_t gfp, 645 const int peer_state) 646 { 647 struct net *net = sock_net(asoc->base.sk); 648 struct sctp_transport *peer; 649 struct sctp_sock *sp; 650 unsigned short port; 651 652 sp = sctp_sk(asoc->base.sk); 653 654 /* AF_INET and AF_INET6 share common port field. */ 655 port = ntohs(addr->v4.sin_port); 656 657 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", 658 " port: %d state:%d\n", 659 asoc, 660 addr, 661 port, 662 peer_state); 663 664 /* Set the port if it has not been set yet. */ 665 if (0 == asoc->peer.port) 666 asoc->peer.port = port; 667 668 /* Check to see if this is a duplicate. */ 669 peer = sctp_assoc_lookup_paddr(asoc, addr); 670 if (peer) { 671 /* An UNKNOWN state is only set on transports added by 672 * user in sctp_connectx() call. Such transports should be 673 * considered CONFIRMED per RFC 4960, Section 5.4. 674 */ 675 if (peer->state == SCTP_UNKNOWN) { 676 peer->state = SCTP_ACTIVE; 677 } 678 return peer; 679 } 680 681 peer = sctp_transport_new(net, addr, gfp); 682 if (!peer) 683 return NULL; 684 685 sctp_transport_set_owner(peer, asoc); 686 687 /* Initialize the peer's heartbeat interval based on the 688 * association configured value. 689 */ 690 peer->hbinterval = asoc->hbinterval; 691 692 /* Set the path max_retrans. */ 693 peer->pathmaxrxt = asoc->pathmaxrxt; 694 695 /* And the partial failure retrnas threshold */ 696 peer->pf_retrans = asoc->pf_retrans; 697 698 /* Initialize the peer's SACK delay timeout based on the 699 * association configured value. 700 */ 701 peer->sackdelay = asoc->sackdelay; 702 peer->sackfreq = asoc->sackfreq; 703 704 /* Enable/disable heartbeat, SACK delay, and path MTU discovery 705 * based on association setting. 706 */ 707 peer->param_flags = asoc->param_flags; 708 709 sctp_transport_route(peer, NULL, sp); 710 711 /* Initialize the pmtu of the transport. */ 712 if (peer->param_flags & SPP_PMTUD_DISABLE) { 713 if (asoc->pathmtu) 714 peer->pathmtu = asoc->pathmtu; 715 else 716 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 717 } 718 719 /* If this is the first transport addr on this association, 720 * initialize the association PMTU to the peer's PMTU. 721 * If not and the current association PMTU is higher than the new 722 * peer's PMTU, reset the association PMTU to the new peer's PMTU. 723 */ 724 if (asoc->pathmtu) 725 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); 726 else 727 asoc->pathmtu = peer->pathmtu; 728 729 SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " 730 "%d\n", asoc, asoc->pathmtu); 731 peer->pmtu_pending = 0; 732 733 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 734 735 /* The asoc->peer.port might not be meaningful yet, but 736 * initialize the packet structure anyway. 737 */ 738 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, 739 asoc->peer.port); 740 741 /* 7.2.1 Slow-Start 742 * 743 * o The initial cwnd before DATA transmission or after a sufficiently 744 * long idle period MUST be set to 745 * min(4*MTU, max(2*MTU, 4380 bytes)) 746 * 747 * o The initial value of ssthresh MAY be arbitrarily high 748 * (for example, implementations MAY use the size of the 749 * receiver advertised window). 750 */ 751 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 752 753 /* At this point, we may not have the receiver's advertised window, 754 * so initialize ssthresh to the default value and it will be set 755 * later when we process the INIT. 756 */ 757 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; 758 759 peer->partial_bytes_acked = 0; 760 peer->flight_size = 0; 761 peer->burst_limited = 0; 762 763 /* Set the transport's RTO.initial value */ 764 peer->rto = asoc->rto_initial; 765 sctp_max_rto(asoc, peer); 766 767 /* Set the peer's active state. */ 768 peer->state = peer_state; 769 770 /* Attach the remote transport to our asoc. */ 771 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list); 772 asoc->peer.transport_count++; 773 774 /* If we do not yet have a primary path, set one. */ 775 if (!asoc->peer.primary_path) { 776 sctp_assoc_set_primary(asoc, peer); 777 asoc->peer.retran_path = peer; 778 } 779 780 if (asoc->peer.active_path == asoc->peer.retran_path && 781 peer->state != SCTP_UNCONFIRMED) { 782 asoc->peer.retran_path = peer; 783 } 784 785 return peer; 786 } 787 788 /* Delete a transport address from an association. */ 789 void sctp_assoc_del_peer(struct sctp_association *asoc, 790 const union sctp_addr *addr) 791 { 792 struct list_head *pos; 793 struct list_head *temp; 794 struct sctp_transport *transport; 795 796 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 797 transport = list_entry(pos, struct sctp_transport, transports); 798 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { 799 /* Do book keeping for removing the peer and free it. */ 800 sctp_assoc_rm_peer(asoc, transport); 801 break; 802 } 803 } 804 } 805 806 /* Lookup a transport by address. */ 807 struct sctp_transport *sctp_assoc_lookup_paddr( 808 const struct sctp_association *asoc, 809 const union sctp_addr *address) 810 { 811 struct sctp_transport *t; 812 813 /* Cycle through all transports searching for a peer address. */ 814 815 list_for_each_entry(t, &asoc->peer.transport_addr_list, 816 transports) { 817 if (sctp_cmp_addr_exact(address, &t->ipaddr)) 818 return t; 819 } 820 821 return NULL; 822 } 823 824 /* Remove all transports except a give one */ 825 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, 826 struct sctp_transport *primary) 827 { 828 struct sctp_transport *temp; 829 struct sctp_transport *t; 830 831 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list, 832 transports) { 833 /* if the current transport is not the primary one, delete it */ 834 if (t != primary) 835 sctp_assoc_rm_peer(asoc, t); 836 } 837 } 838 839 /* Engage in transport control operations. 840 * Mark the transport up or down and send a notification to the user. 841 * Select and update the new active and retran paths. 842 */ 843 void sctp_assoc_control_transport(struct sctp_association *asoc, 844 struct sctp_transport *transport, 845 sctp_transport_cmd_t command, 846 sctp_sn_error_t error) 847 { 848 struct sctp_transport *t = NULL; 849 struct sctp_transport *first; 850 struct sctp_transport *second; 851 struct sctp_ulpevent *event; 852 struct sockaddr_storage addr; 853 int spc_state = 0; 854 bool ulp_notify = true; 855 856 /* Record the transition on the transport. */ 857 switch (command) { 858 case SCTP_TRANSPORT_UP: 859 /* If we are moving from UNCONFIRMED state due 860 * to heartbeat success, report the SCTP_ADDR_CONFIRMED 861 * state to the user, otherwise report SCTP_ADDR_AVAILABLE. 862 */ 863 if (SCTP_UNCONFIRMED == transport->state && 864 SCTP_HEARTBEAT_SUCCESS == error) 865 spc_state = SCTP_ADDR_CONFIRMED; 866 else 867 spc_state = SCTP_ADDR_AVAILABLE; 868 /* Don't inform ULP about transition from PF to 869 * active state and set cwnd to 1, see SCTP 870 * Quick failover draft section 5.1, point 5 871 */ 872 if (transport->state == SCTP_PF) { 873 ulp_notify = false; 874 transport->cwnd = 1; 875 } 876 transport->state = SCTP_ACTIVE; 877 break; 878 879 case SCTP_TRANSPORT_DOWN: 880 /* If the transport was never confirmed, do not transition it 881 * to inactive state. Also, release the cached route since 882 * there may be a better route next time. 883 */ 884 if (transport->state != SCTP_UNCONFIRMED) 885 transport->state = SCTP_INACTIVE; 886 else { 887 dst_release(transport->dst); 888 transport->dst = NULL; 889 } 890 891 spc_state = SCTP_ADDR_UNREACHABLE; 892 break; 893 894 case SCTP_TRANSPORT_PF: 895 transport->state = SCTP_PF; 896 ulp_notify = false; 897 break; 898 899 default: 900 return; 901 } 902 903 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the 904 * user. 905 */ 906 if (ulp_notify) { 907 memset(&addr, 0, sizeof(struct sockaddr_storage)); 908 memcpy(&addr, &transport->ipaddr, 909 transport->af_specific->sockaddr_len); 910 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 911 0, spc_state, error, GFP_ATOMIC); 912 if (event) 913 sctp_ulpq_tail_event(&asoc->ulpq, event); 914 } 915 916 /* Select new active and retran paths. */ 917 918 /* Look for the two most recently used active transports. 919 * 920 * This code produces the wrong ordering whenever jiffies 921 * rolls over, but we still get usable transports, so we don't 922 * worry about it. 923 */ 924 first = NULL; second = NULL; 925 926 list_for_each_entry(t, &asoc->peer.transport_addr_list, 927 transports) { 928 929 if ((t->state == SCTP_INACTIVE) || 930 (t->state == SCTP_UNCONFIRMED) || 931 (t->state == SCTP_PF)) 932 continue; 933 if (!first || t->last_time_heard > first->last_time_heard) { 934 second = first; 935 first = t; 936 } 937 if (!second || t->last_time_heard > second->last_time_heard) 938 second = t; 939 } 940 941 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints 942 * 943 * By default, an endpoint should always transmit to the 944 * primary path, unless the SCTP user explicitly specifies the 945 * destination transport address (and possibly source 946 * transport address) to use. 947 * 948 * [If the primary is active but not most recent, bump the most 949 * recently used transport.] 950 */ 951 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) || 952 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) && 953 first != asoc->peer.primary_path) { 954 second = first; 955 first = asoc->peer.primary_path; 956 } 957 958 /* If we failed to find a usable transport, just camp on the 959 * primary, even if it is inactive. 960 */ 961 if (!first) { 962 first = asoc->peer.primary_path; 963 second = asoc->peer.primary_path; 964 } 965 966 /* Set the active and retran transports. */ 967 asoc->peer.active_path = first; 968 asoc->peer.retran_path = second; 969 } 970 971 /* Hold a reference to an association. */ 972 void sctp_association_hold(struct sctp_association *asoc) 973 { 974 atomic_inc(&asoc->base.refcnt); 975 } 976 977 /* Release a reference to an association and cleanup 978 * if there are no more references. 979 */ 980 void sctp_association_put(struct sctp_association *asoc) 981 { 982 if (atomic_dec_and_test(&asoc->base.refcnt)) 983 sctp_association_destroy(asoc); 984 } 985 986 /* Allocate the next TSN, Transmission Sequence Number, for the given 987 * association. 988 */ 989 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) 990 { 991 /* From Section 1.6 Serial Number Arithmetic: 992 * Transmission Sequence Numbers wrap around when they reach 993 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use 994 * after transmitting TSN = 2*32 - 1 is TSN = 0. 995 */ 996 __u32 retval = asoc->next_tsn; 997 asoc->next_tsn++; 998 asoc->unack_data++; 999 1000 return retval; 1001 } 1002 1003 /* Compare two addresses to see if they match. Wildcard addresses 1004 * only match themselves. 1005 */ 1006 int sctp_cmp_addr_exact(const union sctp_addr *ss1, 1007 const union sctp_addr *ss2) 1008 { 1009 struct sctp_af *af; 1010 1011 af = sctp_get_af_specific(ss1->sa.sa_family); 1012 if (unlikely(!af)) 1013 return 0; 1014 1015 return af->cmp_addr(ss1, ss2); 1016 } 1017 1018 /* Return an ecne chunk to get prepended to a packet. 1019 * Note: We are sly and return a shared, prealloced chunk. FIXME: 1020 * No we don't, but we could/should. 1021 */ 1022 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) 1023 { 1024 struct sctp_chunk *chunk; 1025 1026 /* Send ECNE if needed. 1027 * Not being able to allocate a chunk here is not deadly. 1028 */ 1029 if (asoc->need_ecne) 1030 chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn); 1031 else 1032 chunk = NULL; 1033 1034 return chunk; 1035 } 1036 1037 /* 1038 * Find which transport this TSN was sent on. 1039 */ 1040 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, 1041 __u32 tsn) 1042 { 1043 struct sctp_transport *active; 1044 struct sctp_transport *match; 1045 struct sctp_transport *transport; 1046 struct sctp_chunk *chunk; 1047 __be32 key = htonl(tsn); 1048 1049 match = NULL; 1050 1051 /* 1052 * FIXME: In general, find a more efficient data structure for 1053 * searching. 1054 */ 1055 1056 /* 1057 * The general strategy is to search each transport's transmitted 1058 * list. Return which transport this TSN lives on. 1059 * 1060 * Let's be hopeful and check the active_path first. 1061 * Another optimization would be to know if there is only one 1062 * outbound path and not have to look for the TSN at all. 1063 * 1064 */ 1065 1066 active = asoc->peer.active_path; 1067 1068 list_for_each_entry(chunk, &active->transmitted, 1069 transmitted_list) { 1070 1071 if (key == chunk->subh.data_hdr->tsn) { 1072 match = active; 1073 goto out; 1074 } 1075 } 1076 1077 /* If not found, go search all the other transports. */ 1078 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 1079 transports) { 1080 1081 if (transport == active) 1082 break; 1083 list_for_each_entry(chunk, &transport->transmitted, 1084 transmitted_list) { 1085 if (key == chunk->subh.data_hdr->tsn) { 1086 match = transport; 1087 goto out; 1088 } 1089 } 1090 } 1091 out: 1092 return match; 1093 } 1094 1095 /* Is this the association we are looking for? */ 1096 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, 1097 struct net *net, 1098 const union sctp_addr *laddr, 1099 const union sctp_addr *paddr) 1100 { 1101 struct sctp_transport *transport; 1102 1103 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && 1104 (htons(asoc->peer.port) == paddr->v4.sin_port) && 1105 net_eq(sock_net(asoc->base.sk), net)) { 1106 transport = sctp_assoc_lookup_paddr(asoc, paddr); 1107 if (!transport) 1108 goto out; 1109 1110 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr, 1111 sctp_sk(asoc->base.sk))) 1112 goto out; 1113 } 1114 transport = NULL; 1115 1116 out: 1117 return transport; 1118 } 1119 1120 /* Do delayed input processing. This is scheduled by sctp_rcv(). */ 1121 static void sctp_assoc_bh_rcv(struct work_struct *work) 1122 { 1123 struct sctp_association *asoc = 1124 container_of(work, struct sctp_association, 1125 base.inqueue.immediate); 1126 struct net *net = sock_net(asoc->base.sk); 1127 struct sctp_endpoint *ep; 1128 struct sctp_chunk *chunk; 1129 struct sctp_inq *inqueue; 1130 int state; 1131 sctp_subtype_t subtype; 1132 int error = 0; 1133 1134 /* The association should be held so we should be safe. */ 1135 ep = asoc->ep; 1136 1137 inqueue = &asoc->base.inqueue; 1138 sctp_association_hold(asoc); 1139 while (NULL != (chunk = sctp_inq_pop(inqueue))) { 1140 state = asoc->state; 1141 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); 1142 1143 /* SCTP-AUTH, Section 6.3: 1144 * The receiver has a list of chunk types which it expects 1145 * to be received only after an AUTH-chunk. This list has 1146 * been sent to the peer during the association setup. It 1147 * MUST silently discard these chunks if they are not placed 1148 * after an AUTH chunk in the packet. 1149 */ 1150 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) 1151 continue; 1152 1153 /* Remember where the last DATA chunk came from so we 1154 * know where to send the SACK. 1155 */ 1156 if (sctp_chunk_is_data(chunk)) 1157 asoc->peer.last_data_from = chunk->transport; 1158 else { 1159 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS); 1160 asoc->stats.ictrlchunks++; 1161 if (chunk->chunk_hdr->type == SCTP_CID_SACK) 1162 asoc->stats.isacks++; 1163 } 1164 1165 if (chunk->transport) 1166 chunk->transport->last_time_heard = jiffies; 1167 1168 /* Run through the state machine. */ 1169 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, 1170 state, ep, asoc, chunk, GFP_ATOMIC); 1171 1172 /* Check to see if the association is freed in response to 1173 * the incoming chunk. If so, get out of the while loop. 1174 */ 1175 if (asoc->base.dead) 1176 break; 1177 1178 /* If there is an error on chunk, discard this packet. */ 1179 if (error && chunk) 1180 chunk->pdiscard = 1; 1181 } 1182 sctp_association_put(asoc); 1183 } 1184 1185 /* This routine moves an association from its old sk to a new sk. */ 1186 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) 1187 { 1188 struct sctp_sock *newsp = sctp_sk(newsk); 1189 struct sock *oldsk = assoc->base.sk; 1190 1191 /* Delete the association from the old endpoint's list of 1192 * associations. 1193 */ 1194 list_del_init(&assoc->asocs); 1195 1196 /* Decrement the backlog value for a TCP-style socket. */ 1197 if (sctp_style(oldsk, TCP)) 1198 oldsk->sk_ack_backlog--; 1199 1200 /* Release references to the old endpoint and the sock. */ 1201 sctp_endpoint_put(assoc->ep); 1202 sock_put(assoc->base.sk); 1203 1204 /* Get a reference to the new endpoint. */ 1205 assoc->ep = newsp->ep; 1206 sctp_endpoint_hold(assoc->ep); 1207 1208 /* Get a reference to the new sock. */ 1209 assoc->base.sk = newsk; 1210 sock_hold(assoc->base.sk); 1211 1212 /* Add the association to the new endpoint's list of associations. */ 1213 sctp_endpoint_add_asoc(newsp->ep, assoc); 1214 } 1215 1216 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ 1217 void sctp_assoc_update(struct sctp_association *asoc, 1218 struct sctp_association *new) 1219 { 1220 struct sctp_transport *trans; 1221 struct list_head *pos, *temp; 1222 1223 /* Copy in new parameters of peer. */ 1224 asoc->c = new->c; 1225 asoc->peer.rwnd = new->peer.rwnd; 1226 asoc->peer.sack_needed = new->peer.sack_needed; 1227 asoc->peer.i = new->peer.i; 1228 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 1229 asoc->peer.i.initial_tsn, GFP_ATOMIC); 1230 1231 /* Remove any peer addresses not present in the new association. */ 1232 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1233 trans = list_entry(pos, struct sctp_transport, transports); 1234 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { 1235 sctp_assoc_rm_peer(asoc, trans); 1236 continue; 1237 } 1238 1239 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1240 sctp_transport_reset(trans); 1241 } 1242 1243 /* If the case is A (association restart), use 1244 * initial_tsn as next_tsn. If the case is B, use 1245 * current next_tsn in case data sent to peer 1246 * has been discarded and needs retransmission. 1247 */ 1248 if (asoc->state >= SCTP_STATE_ESTABLISHED) { 1249 asoc->next_tsn = new->next_tsn; 1250 asoc->ctsn_ack_point = new->ctsn_ack_point; 1251 asoc->adv_peer_ack_point = new->adv_peer_ack_point; 1252 1253 /* Reinitialize SSN for both local streams 1254 * and peer's streams. 1255 */ 1256 sctp_ssnmap_clear(asoc->ssnmap); 1257 1258 /* Flush the ULP reassembly and ordered queue. 1259 * Any data there will now be stale and will 1260 * cause problems. 1261 */ 1262 sctp_ulpq_flush(&asoc->ulpq); 1263 1264 /* reset the overall association error count so 1265 * that the restarted association doesn't get torn 1266 * down on the next retransmission timer. 1267 */ 1268 asoc->overall_error_count = 0; 1269 1270 } else { 1271 /* Add any peer addresses from the new association. */ 1272 list_for_each_entry(trans, &new->peer.transport_addr_list, 1273 transports) { 1274 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) 1275 sctp_assoc_add_peer(asoc, &trans->ipaddr, 1276 GFP_ATOMIC, trans->state); 1277 } 1278 1279 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1280 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 1281 if (!asoc->ssnmap) { 1282 /* Move the ssnmap. */ 1283 asoc->ssnmap = new->ssnmap; 1284 new->ssnmap = NULL; 1285 } 1286 1287 if (!asoc->assoc_id) { 1288 /* get a new association id since we don't have one 1289 * yet. 1290 */ 1291 sctp_assoc_set_id(asoc, GFP_ATOMIC); 1292 } 1293 } 1294 1295 /* SCTP-AUTH: Save the peer parameters from the new assocaitions 1296 * and also move the association shared keys over 1297 */ 1298 kfree(asoc->peer.peer_random); 1299 asoc->peer.peer_random = new->peer.peer_random; 1300 new->peer.peer_random = NULL; 1301 1302 kfree(asoc->peer.peer_chunks); 1303 asoc->peer.peer_chunks = new->peer.peer_chunks; 1304 new->peer.peer_chunks = NULL; 1305 1306 kfree(asoc->peer.peer_hmacs); 1307 asoc->peer.peer_hmacs = new->peer.peer_hmacs; 1308 new->peer.peer_hmacs = NULL; 1309 1310 sctp_auth_key_put(asoc->asoc_shared_key); 1311 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); 1312 } 1313 1314 /* Update the retran path for sending a retransmitted packet. 1315 * Round-robin through the active transports, else round-robin 1316 * through the inactive transports as this is the next best thing 1317 * we can try. 1318 */ 1319 void sctp_assoc_update_retran_path(struct sctp_association *asoc) 1320 { 1321 struct sctp_transport *t, *next; 1322 struct list_head *head = &asoc->peer.transport_addr_list; 1323 struct list_head *pos; 1324 1325 if (asoc->peer.transport_count == 1) 1326 return; 1327 1328 /* Find the next transport in a round-robin fashion. */ 1329 t = asoc->peer.retran_path; 1330 pos = &t->transports; 1331 next = NULL; 1332 1333 while (1) { 1334 /* Skip the head. */ 1335 if (pos->next == head) 1336 pos = head->next; 1337 else 1338 pos = pos->next; 1339 1340 t = list_entry(pos, struct sctp_transport, transports); 1341 1342 /* We have exhausted the list, but didn't find any 1343 * other active transports. If so, use the next 1344 * transport. 1345 */ 1346 if (t == asoc->peer.retran_path) { 1347 t = next; 1348 break; 1349 } 1350 1351 /* Try to find an active transport. */ 1352 1353 if ((t->state == SCTP_ACTIVE) || 1354 (t->state == SCTP_UNKNOWN)) { 1355 break; 1356 } else { 1357 /* Keep track of the next transport in case 1358 * we don't find any active transport. 1359 */ 1360 if (t->state != SCTP_UNCONFIRMED && !next) 1361 next = t; 1362 } 1363 } 1364 1365 if (t) 1366 asoc->peer.retran_path = t; 1367 else 1368 t = asoc->peer.retran_path; 1369 1370 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" 1371 " %p addr: ", 1372 " port: %d\n", 1373 asoc, 1374 (&t->ipaddr), 1375 ntohs(t->ipaddr.v4.sin_port)); 1376 } 1377 1378 /* Choose the transport for sending retransmit packet. */ 1379 struct sctp_transport *sctp_assoc_choose_alter_transport( 1380 struct sctp_association *asoc, struct sctp_transport *last_sent_to) 1381 { 1382 /* If this is the first time packet is sent, use the active path, 1383 * else use the retran path. If the last packet was sent over the 1384 * retran path, update the retran path and use it. 1385 */ 1386 if (!last_sent_to) 1387 return asoc->peer.active_path; 1388 else { 1389 if (last_sent_to == asoc->peer.retran_path) 1390 sctp_assoc_update_retran_path(asoc); 1391 return asoc->peer.retran_path; 1392 } 1393 } 1394 1395 /* Update the association's pmtu and frag_point by going through all the 1396 * transports. This routine is called when a transport's PMTU has changed. 1397 */ 1398 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) 1399 { 1400 struct sctp_transport *t; 1401 __u32 pmtu = 0; 1402 1403 if (!asoc) 1404 return; 1405 1406 /* Get the lowest pmtu of all the transports. */ 1407 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1408 transports) { 1409 if (t->pmtu_pending && t->dst) { 1410 sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst)); 1411 t->pmtu_pending = 0; 1412 } 1413 if (!pmtu || (t->pathmtu < pmtu)) 1414 pmtu = t->pathmtu; 1415 } 1416 1417 if (pmtu) { 1418 asoc->pathmtu = pmtu; 1419 asoc->frag_point = sctp_frag_point(asoc, pmtu); 1420 } 1421 1422 SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", 1423 __func__, asoc, asoc->pathmtu, asoc->frag_point); 1424 } 1425 1426 /* Should we send a SACK to update our peer? */ 1427 static inline int sctp_peer_needs_update(struct sctp_association *asoc) 1428 { 1429 struct net *net = sock_net(asoc->base.sk); 1430 switch (asoc->state) { 1431 case SCTP_STATE_ESTABLISHED: 1432 case SCTP_STATE_SHUTDOWN_PENDING: 1433 case SCTP_STATE_SHUTDOWN_RECEIVED: 1434 case SCTP_STATE_SHUTDOWN_SENT: 1435 if ((asoc->rwnd > asoc->a_rwnd) && 1436 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, 1437 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), 1438 asoc->pathmtu))) 1439 return 1; 1440 break; 1441 default: 1442 break; 1443 } 1444 return 0; 1445 } 1446 1447 /* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1448 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) 1449 { 1450 struct sctp_chunk *sack; 1451 struct timer_list *timer; 1452 1453 if (asoc->rwnd_over) { 1454 if (asoc->rwnd_over >= len) { 1455 asoc->rwnd_over -= len; 1456 } else { 1457 asoc->rwnd += (len - asoc->rwnd_over); 1458 asoc->rwnd_over = 0; 1459 } 1460 } else { 1461 asoc->rwnd += len; 1462 } 1463 1464 /* If we had window pressure, start recovering it 1465 * once our rwnd had reached the accumulated pressure 1466 * threshold. The idea is to recover slowly, but up 1467 * to the initial advertised window. 1468 */ 1469 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { 1470 int change = min(asoc->pathmtu, asoc->rwnd_press); 1471 asoc->rwnd += change; 1472 asoc->rwnd_press -= change; 1473 } 1474 1475 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " 1476 "- %u\n", __func__, asoc, len, asoc->rwnd, 1477 asoc->rwnd_over, asoc->a_rwnd); 1478 1479 /* Send a window update SACK if the rwnd has increased by at least the 1480 * minimum of the association's PMTU and half of the receive buffer. 1481 * The algorithm used is similar to the one described in 1482 * Section 4.2.3.3 of RFC 1122. 1483 */ 1484 if (sctp_peer_needs_update(asoc)) { 1485 asoc->a_rwnd = asoc->rwnd; 1486 SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " 1487 "rwnd: %u a_rwnd: %u\n", __func__, 1488 asoc, asoc->rwnd, asoc->a_rwnd); 1489 sack = sctp_make_sack(asoc); 1490 if (!sack) 1491 return; 1492 1493 asoc->peer.sack_needed = 0; 1494 1495 sctp_outq_tail(&asoc->outqueue, sack); 1496 1497 /* Stop the SACK timer. */ 1498 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 1499 if (del_timer(timer)) 1500 sctp_association_put(asoc); 1501 } 1502 } 1503 1504 /* Decrease asoc's rwnd by len. */ 1505 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) 1506 { 1507 int rx_count; 1508 int over = 0; 1509 1510 SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); 1511 SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); 1512 1513 if (asoc->ep->rcvbuf_policy) 1514 rx_count = atomic_read(&asoc->rmem_alloc); 1515 else 1516 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1517 1518 /* If we've reached or overflowed our receive buffer, announce 1519 * a 0 rwnd if rwnd would still be positive. Store the 1520 * the pottential pressure overflow so that the window can be restored 1521 * back to original value. 1522 */ 1523 if (rx_count >= asoc->base.sk->sk_rcvbuf) 1524 over = 1; 1525 1526 if (asoc->rwnd >= len) { 1527 asoc->rwnd -= len; 1528 if (over) { 1529 asoc->rwnd_press += asoc->rwnd; 1530 asoc->rwnd = 0; 1531 } 1532 } else { 1533 asoc->rwnd_over = len - asoc->rwnd; 1534 asoc->rwnd = 0; 1535 } 1536 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n", 1537 __func__, asoc, len, asoc->rwnd, 1538 asoc->rwnd_over, asoc->rwnd_press); 1539 } 1540 1541 /* Build the bind address list for the association based on info from the 1542 * local endpoint and the remote peer. 1543 */ 1544 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1545 sctp_scope_t scope, gfp_t gfp) 1546 { 1547 int flags; 1548 1549 /* Use scoping rules to determine the subset of addresses from 1550 * the endpoint. 1551 */ 1552 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; 1553 if (asoc->peer.ipv4_address) 1554 flags |= SCTP_ADDR4_PEERSUPP; 1555 if (asoc->peer.ipv6_address) 1556 flags |= SCTP_ADDR6_PEERSUPP; 1557 1558 return sctp_bind_addr_copy(sock_net(asoc->base.sk), 1559 &asoc->base.bind_addr, 1560 &asoc->ep->base.bind_addr, 1561 scope, gfp, flags); 1562 } 1563 1564 /* Build the association's bind address list from the cookie. */ 1565 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, 1566 struct sctp_cookie *cookie, 1567 gfp_t gfp) 1568 { 1569 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); 1570 int var_size3 = cookie->raw_addr_list_len; 1571 __u8 *raw = (__u8 *)cookie->peer_init + var_size2; 1572 1573 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, 1574 asoc->ep->base.bind_addr.port, gfp); 1575 } 1576 1577 /* Lookup laddr in the bind address list of an association. */ 1578 int sctp_assoc_lookup_laddr(struct sctp_association *asoc, 1579 const union sctp_addr *laddr) 1580 { 1581 int found = 0; 1582 1583 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && 1584 sctp_bind_addr_match(&asoc->base.bind_addr, laddr, 1585 sctp_sk(asoc->base.sk))) 1586 found = 1; 1587 1588 return found; 1589 } 1590 1591 /* Set an association id for a given association */ 1592 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) 1593 { 1594 int assoc_id; 1595 int error = 0; 1596 1597 /* If the id is already assigned, keep it. */ 1598 if (asoc->assoc_id) 1599 return error; 1600 retry: 1601 if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) 1602 return -ENOMEM; 1603 1604 spin_lock_bh(&sctp_assocs_id_lock); 1605 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1606 idr_low, &assoc_id); 1607 if (!error) { 1608 idr_low = assoc_id + 1; 1609 if (idr_low == INT_MAX) 1610 idr_low = 1; 1611 } 1612 spin_unlock_bh(&sctp_assocs_id_lock); 1613 if (error == -EAGAIN) 1614 goto retry; 1615 else if (error) 1616 return error; 1617 1618 asoc->assoc_id = (sctp_assoc_t) assoc_id; 1619 return error; 1620 } 1621 1622 /* Free the ASCONF queue */ 1623 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) 1624 { 1625 struct sctp_chunk *asconf; 1626 struct sctp_chunk *tmp; 1627 1628 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { 1629 list_del_init(&asconf->list); 1630 sctp_chunk_free(asconf); 1631 } 1632 } 1633 1634 /* Free asconf_ack cache */ 1635 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) 1636 { 1637 struct sctp_chunk *ack; 1638 struct sctp_chunk *tmp; 1639 1640 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1641 transmitted_list) { 1642 list_del_init(&ack->transmitted_list); 1643 sctp_chunk_free(ack); 1644 } 1645 } 1646 1647 /* Clean up the ASCONF_ACK queue */ 1648 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) 1649 { 1650 struct sctp_chunk *ack; 1651 struct sctp_chunk *tmp; 1652 1653 /* We can remove all the entries from the queue up to 1654 * the "Peer-Sequence-Number". 1655 */ 1656 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1657 transmitted_list) { 1658 if (ack->subh.addip_hdr->serial == 1659 htonl(asoc->peer.addip_serial)) 1660 break; 1661 1662 list_del_init(&ack->transmitted_list); 1663 sctp_chunk_free(ack); 1664 } 1665 } 1666 1667 /* Find the ASCONF_ACK whose serial number matches ASCONF */ 1668 struct sctp_chunk *sctp_assoc_lookup_asconf_ack( 1669 const struct sctp_association *asoc, 1670 __be32 serial) 1671 { 1672 struct sctp_chunk *ack; 1673 1674 /* Walk through the list of cached ASCONF-ACKs and find the 1675 * ack chunk whose serial number matches that of the request. 1676 */ 1677 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { 1678 if (ack->subh.addip_hdr->serial == serial) { 1679 sctp_chunk_hold(ack); 1680 return ack; 1681 } 1682 } 1683 1684 return NULL; 1685 } 1686 1687 void sctp_asconf_queue_teardown(struct sctp_association *asoc) 1688 { 1689 /* Free any cached ASCONF_ACK chunk. */ 1690 sctp_assoc_free_asconf_acks(asoc); 1691 1692 /* Free the ASCONF queue. */ 1693 sctp_assoc_free_asconf_queue(asoc); 1694 1695 /* Free any cached ASCONF chunk. */ 1696 if (asoc->addip_last_asconf) 1697 sctp_chunk_free(asoc->addip_last_asconf); 1698 } 1699