1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * 6 * This file is part of the SCTP kernel implementation 7 * 8 * These functions work with the state functions in sctp_sm_statefuns.c 9 * to implement that state operations. These functions implement the 10 * steps which require modifying existing data structures. 11 * 12 * This SCTP implementation is free software; 13 * you can redistribute it and/or modify it under the terms of 14 * the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * This SCTP implementation is distributed in the hope that it 19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 20 * ************************ 21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 22 * See the GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with GNU CC; see the file COPYING. If not, see 26 * <http://www.gnu.org/licenses/>. 27 * 28 * Please send any bug reports or fixes you make to the 29 * email address(es): 30 * lksctp developers <linux-sctp@vger.kernel.org> 31 * 32 * Written or modified by: 33 * La Monte H.P. Yarroll <piggy@acm.org> 34 * Karl Knutson <karl@athena.chicago.il.us> 35 * Jon Grimm <jgrimm@austin.ibm.com> 36 * Hui Huang <hui.huang@nokia.com> 37 * Dajiang Zhang <dajiang.zhang@nokia.com> 38 * Daisy Chang <daisyc@us.ibm.com> 39 * Sridhar Samudrala <sri@us.ibm.com> 40 * Ardelle Fan <ardelle.fan@intel.com> 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/skbuff.h> 46 #include <linux/types.h> 47 #include <linux/socket.h> 48 #include <linux/ip.h> 49 #include <linux/gfp.h> 50 #include <net/sock.h> 51 #include <net/sctp/sctp.h> 52 #include <net/sctp/sm.h> 53 54 static int sctp_cmd_interpreter(sctp_event_t event_type, 55 sctp_subtype_t subtype, 56 sctp_state_t state, 57 struct sctp_endpoint *ep, 58 struct sctp_association *asoc, 59 void *event_arg, 60 sctp_disposition_t status, 61 sctp_cmd_seq_t *commands, 62 gfp_t gfp); 63 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, 64 sctp_state_t state, 65 struct sctp_endpoint *ep, 66 struct sctp_association **asoc, 67 void *event_arg, 68 sctp_disposition_t status, 69 sctp_cmd_seq_t *commands, 70 gfp_t gfp); 71 72 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, 73 struct sctp_transport *t); 74 /******************************************************************** 75 * Helper functions 76 ********************************************************************/ 77 78 /* A helper function for delayed processing of INET ECN CE bit. */ 79 static void sctp_do_ecn_ce_work(struct sctp_association *asoc, 80 __u32 lowest_tsn) 81 { 82 /* Save the TSN away for comparison when we receive CWR */ 83 84 asoc->last_ecne_tsn = lowest_tsn; 85 asoc->need_ecne = 1; 86 } 87 88 /* Helper function for delayed processing of SCTP ECNE chunk. */ 89 /* RFC 2960 Appendix A 90 * 91 * RFC 2481 details a specific bit for a sender to send in 92 * the header of its next outbound TCP segment to indicate to 93 * its peer that it has reduced its congestion window. This 94 * is termed the CWR bit. For SCTP the same indication is made 95 * by including the CWR chunk. This chunk contains one data 96 * element, i.e. the TSN number that was sent in the ECNE chunk. 97 * This element represents the lowest TSN number in the datagram 98 * that was originally marked with the CE bit. 99 */ 100 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, 101 __u32 lowest_tsn, 102 struct sctp_chunk *chunk) 103 { 104 struct sctp_chunk *repl; 105 106 /* Our previously transmitted packet ran into some congestion 107 * so we should take action by reducing cwnd and ssthresh 108 * and then ACK our peer that we we've done so by 109 * sending a CWR. 110 */ 111 112 /* First, try to determine if we want to actually lower 113 * our cwnd variables. Only lower them if the ECNE looks more 114 * recent than the last response. 115 */ 116 if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { 117 struct sctp_transport *transport; 118 119 /* Find which transport's congestion variables 120 * need to be adjusted. 121 */ 122 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); 123 124 /* Update the congestion variables. */ 125 if (transport) 126 sctp_transport_lower_cwnd(transport, 127 SCTP_LOWER_CWND_ECNE); 128 asoc->last_cwr_tsn = lowest_tsn; 129 } 130 131 /* Always try to quiet the other end. In case of lost CWR, 132 * resend last_cwr_tsn. 133 */ 134 repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); 135 136 /* If we run out of memory, it will look like a lost CWR. We'll 137 * get back in sync eventually. 138 */ 139 return repl; 140 } 141 142 /* Helper function to do delayed processing of ECN CWR chunk. */ 143 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, 144 __u32 lowest_tsn) 145 { 146 /* Turn off ECNE getting auto-prepended to every outgoing 147 * packet 148 */ 149 asoc->need_ecne = 0; 150 } 151 152 /* Generate SACK if necessary. We call this at the end of a packet. */ 153 static int sctp_gen_sack(struct sctp_association *asoc, int force, 154 sctp_cmd_seq_t *commands) 155 { 156 __u32 ctsn, max_tsn_seen; 157 struct sctp_chunk *sack; 158 struct sctp_transport *trans = asoc->peer.last_data_from; 159 int error = 0; 160 161 if (force || 162 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || 163 (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) 164 asoc->peer.sack_needed = 1; 165 166 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); 167 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); 168 169 /* From 12.2 Parameters necessary per association (i.e. the TCB): 170 * 171 * Ack State : This flag indicates if the next received packet 172 * : is to be responded to with a SACK. ... 173 * : When DATA chunks are out of order, SACK's 174 * : are not delayed (see Section 6). 175 * 176 * [This is actually not mentioned in Section 6, but we 177 * implement it here anyway. --piggy] 178 */ 179 if (max_tsn_seen != ctsn) 180 asoc->peer.sack_needed = 1; 181 182 /* From 6.2 Acknowledgement on Reception of DATA Chunks: 183 * 184 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, 185 * an acknowledgement SHOULD be generated for at least every 186 * second packet (not every second DATA chunk) received, and 187 * SHOULD be generated within 200 ms of the arrival of any 188 * unacknowledged DATA chunk. ... 189 */ 190 if (!asoc->peer.sack_needed) { 191 asoc->peer.sack_cnt++; 192 193 /* Set the SACK delay timeout based on the 194 * SACK delay for the last transport 195 * data was received from, or the default 196 * for the association. 197 */ 198 if (trans) { 199 /* We will need a SACK for the next packet. */ 200 if (asoc->peer.sack_cnt >= trans->sackfreq - 1) 201 asoc->peer.sack_needed = 1; 202 203 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 204 trans->sackdelay; 205 } else { 206 /* We will need a SACK for the next packet. */ 207 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) 208 asoc->peer.sack_needed = 1; 209 210 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 211 asoc->sackdelay; 212 } 213 214 /* Restart the SACK timer. */ 215 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 216 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); 217 } else { 218 asoc->a_rwnd = asoc->rwnd; 219 sack = sctp_make_sack(asoc); 220 if (!sack) 221 goto nomem; 222 223 asoc->peer.sack_needed = 0; 224 asoc->peer.sack_cnt = 0; 225 226 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); 227 228 /* Stop the SACK timer. */ 229 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 230 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); 231 } 232 233 return error; 234 nomem: 235 error = -ENOMEM; 236 return error; 237 } 238 239 /* When the T3-RTX timer expires, it calls this function to create the 240 * relevant state machine event. 241 */ 242 void sctp_generate_t3_rtx_event(unsigned long peer) 243 { 244 int error; 245 struct sctp_transport *transport = (struct sctp_transport *) peer; 246 struct sctp_association *asoc = transport->asoc; 247 struct sock *sk = asoc->base.sk; 248 struct net *net = sock_net(sk); 249 250 /* Check whether a task is in the sock. */ 251 252 bh_lock_sock(sk); 253 if (sock_owned_by_user(sk)) { 254 pr_debug("%s: sock is busy\n", __func__); 255 256 /* Try again later. */ 257 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) 258 sctp_transport_hold(transport); 259 goto out_unlock; 260 } 261 262 /* Run through the state machine. */ 263 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 264 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), 265 asoc->state, 266 asoc->ep, asoc, 267 transport, GFP_ATOMIC); 268 269 if (error) 270 sk->sk_err = -error; 271 272 out_unlock: 273 bh_unlock_sock(sk); 274 sctp_transport_put(transport); 275 } 276 277 /* This is a sa interface for producing timeout events. It works 278 * for timeouts which use the association as their parameter. 279 */ 280 static void sctp_generate_timeout_event(struct sctp_association *asoc, 281 sctp_event_timeout_t timeout_type) 282 { 283 struct sock *sk = asoc->base.sk; 284 struct net *net = sock_net(sk); 285 int error = 0; 286 287 bh_lock_sock(sk); 288 if (sock_owned_by_user(sk)) { 289 pr_debug("%s: sock is busy: timer %d\n", __func__, 290 timeout_type); 291 292 /* Try again later. */ 293 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) 294 sctp_association_hold(asoc); 295 goto out_unlock; 296 } 297 298 /* Is this association really dead and just waiting around for 299 * the timer to let go of the reference? 300 */ 301 if (asoc->base.dead) 302 goto out_unlock; 303 304 /* Run through the state machine. */ 305 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 306 SCTP_ST_TIMEOUT(timeout_type), 307 asoc->state, asoc->ep, asoc, 308 (void *)timeout_type, GFP_ATOMIC); 309 310 if (error) 311 sk->sk_err = -error; 312 313 out_unlock: 314 bh_unlock_sock(sk); 315 sctp_association_put(asoc); 316 } 317 318 static void sctp_generate_t1_cookie_event(unsigned long data) 319 { 320 struct sctp_association *asoc = (struct sctp_association *) data; 321 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); 322 } 323 324 static void sctp_generate_t1_init_event(unsigned long data) 325 { 326 struct sctp_association *asoc = (struct sctp_association *) data; 327 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); 328 } 329 330 static void sctp_generate_t2_shutdown_event(unsigned long data) 331 { 332 struct sctp_association *asoc = (struct sctp_association *) data; 333 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); 334 } 335 336 static void sctp_generate_t4_rto_event(unsigned long data) 337 { 338 struct sctp_association *asoc = (struct sctp_association *) data; 339 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); 340 } 341 342 static void sctp_generate_t5_shutdown_guard_event(unsigned long data) 343 { 344 struct sctp_association *asoc = (struct sctp_association *)data; 345 sctp_generate_timeout_event(asoc, 346 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); 347 348 } /* sctp_generate_t5_shutdown_guard_event() */ 349 350 static void sctp_generate_autoclose_event(unsigned long data) 351 { 352 struct sctp_association *asoc = (struct sctp_association *) data; 353 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); 354 } 355 356 /* Generate a heart beat event. If the sock is busy, reschedule. Make 357 * sure that the transport is still valid. 358 */ 359 void sctp_generate_heartbeat_event(unsigned long data) 360 { 361 int error = 0; 362 struct sctp_transport *transport = (struct sctp_transport *) data; 363 struct sctp_association *asoc = transport->asoc; 364 struct sock *sk = asoc->base.sk; 365 struct net *net = sock_net(sk); 366 367 bh_lock_sock(sk); 368 if (sock_owned_by_user(sk)) { 369 pr_debug("%s: sock is busy\n", __func__); 370 371 /* Try again later. */ 372 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) 373 sctp_transport_hold(transport); 374 goto out_unlock; 375 } 376 377 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 378 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), 379 asoc->state, asoc->ep, asoc, 380 transport, GFP_ATOMIC); 381 382 if (error) 383 sk->sk_err = -error; 384 385 out_unlock: 386 bh_unlock_sock(sk); 387 sctp_transport_put(transport); 388 } 389 390 /* Handle the timeout of the ICMP protocol unreachable timer. Trigger 391 * the correct state machine transition that will close the association. 392 */ 393 void sctp_generate_proto_unreach_event(unsigned long data) 394 { 395 struct sctp_transport *transport = (struct sctp_transport *) data; 396 struct sctp_association *asoc = transport->asoc; 397 struct sock *sk = asoc->base.sk; 398 struct net *net = sock_net(sk); 399 400 bh_lock_sock(sk); 401 if (sock_owned_by_user(sk)) { 402 pr_debug("%s: sock is busy\n", __func__); 403 404 /* Try again later. */ 405 if (!mod_timer(&transport->proto_unreach_timer, 406 jiffies + (HZ/20))) 407 sctp_association_hold(asoc); 408 goto out_unlock; 409 } 410 411 /* Is this structure just waiting around for us to actually 412 * get destroyed? 413 */ 414 if (asoc->base.dead) 415 goto out_unlock; 416 417 sctp_do_sm(net, SCTP_EVENT_T_OTHER, 418 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), 419 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); 420 421 out_unlock: 422 bh_unlock_sock(sk); 423 sctp_association_put(asoc); 424 } 425 426 427 /* Inject a SACK Timeout event into the state machine. */ 428 static void sctp_generate_sack_event(unsigned long data) 429 { 430 struct sctp_association *asoc = (struct sctp_association *) data; 431 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); 432 } 433 434 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { 435 NULL, 436 sctp_generate_t1_cookie_event, 437 sctp_generate_t1_init_event, 438 sctp_generate_t2_shutdown_event, 439 NULL, 440 sctp_generate_t4_rto_event, 441 sctp_generate_t5_shutdown_guard_event, 442 NULL, 443 sctp_generate_sack_event, 444 sctp_generate_autoclose_event, 445 }; 446 447 448 /* RFC 2960 8.2 Path Failure Detection 449 * 450 * When its peer endpoint is multi-homed, an endpoint should keep a 451 * error counter for each of the destination transport addresses of the 452 * peer endpoint. 453 * 454 * Each time the T3-rtx timer expires on any address, or when a 455 * HEARTBEAT sent to an idle address is not acknowledged within a RTO, 456 * the error counter of that destination address will be incremented. 457 * When the value in the error counter exceeds the protocol parameter 458 * 'Path.Max.Retrans' of that destination address, the endpoint should 459 * mark the destination transport address as inactive, and a 460 * notification SHOULD be sent to the upper layer. 461 * 462 */ 463 static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, 464 struct sctp_association *asoc, 465 struct sctp_transport *transport, 466 int is_hb) 467 { 468 struct net *net = sock_net(asoc->base.sk); 469 470 /* The check for association's overall error counter exceeding the 471 * threshold is done in the state function. 472 */ 473 /* We are here due to a timer expiration. If the timer was 474 * not a HEARTBEAT, then normal error tracking is done. 475 * If the timer was a heartbeat, we only increment error counts 476 * when we already have an outstanding HEARTBEAT that has not 477 * been acknowledged. 478 * Additionally, some tranport states inhibit error increments. 479 */ 480 if (!is_hb) { 481 asoc->overall_error_count++; 482 if (transport->state != SCTP_INACTIVE) 483 transport->error_count++; 484 } else if (transport->hb_sent) { 485 if (transport->state != SCTP_UNCONFIRMED) 486 asoc->overall_error_count++; 487 if (transport->state != SCTP_INACTIVE) 488 transport->error_count++; 489 } 490 491 /* If the transport error count is greater than the pf_retrans 492 * threshold, and less than pathmaxrtx, and if the current state 493 * is SCTP_ACTIVE, then mark this transport as Partially Failed, 494 * see SCTP Quick Failover Draft, section 5.1 495 */ 496 if (net->sctp.pf_enable && 497 (transport->state == SCTP_ACTIVE) && 498 (asoc->pf_retrans < transport->pathmaxrxt) && 499 (transport->error_count > asoc->pf_retrans)) { 500 501 sctp_assoc_control_transport(asoc, transport, 502 SCTP_TRANSPORT_PF, 503 0); 504 505 /* Update the hb timer to resend a heartbeat every rto */ 506 sctp_cmd_hb_timer_update(commands, transport); 507 } 508 509 if (transport->state != SCTP_INACTIVE && 510 (transport->error_count > transport->pathmaxrxt)) { 511 pr_debug("%s: association:%p transport addr:%pISpc failed\n", 512 __func__, asoc, &transport->ipaddr.sa); 513 514 sctp_assoc_control_transport(asoc, transport, 515 SCTP_TRANSPORT_DOWN, 516 SCTP_FAILED_THRESHOLD); 517 } 518 519 /* E2) For the destination address for which the timer 520 * expires, set RTO <- RTO * 2 ("back off the timer"). The 521 * maximum value discussed in rule C7 above (RTO.max) may be 522 * used to provide an upper bound to this doubling operation. 523 * 524 * Special Case: the first HB doesn't trigger exponential backoff. 525 * The first unacknowledged HB triggers it. We do this with a flag 526 * that indicates that we have an outstanding HB. 527 */ 528 if (!is_hb || transport->hb_sent) { 529 transport->rto = min((transport->rto * 2), transport->asoc->rto_max); 530 sctp_max_rto(asoc, transport); 531 } 532 } 533 534 /* Worker routine to handle INIT command failure. */ 535 static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, 536 struct sctp_association *asoc, 537 unsigned int error) 538 { 539 struct sctp_ulpevent *event; 540 541 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC, 542 (__u16)error, 0, 0, NULL, 543 GFP_ATOMIC); 544 545 if (event) 546 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 547 SCTP_ULPEVENT(event)); 548 549 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 550 SCTP_STATE(SCTP_STATE_CLOSED)); 551 552 /* SEND_FAILED sent later when cleaning up the association. */ 553 asoc->outqueue.error = error; 554 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 555 } 556 557 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ 558 static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, 559 struct sctp_association *asoc, 560 sctp_event_t event_type, 561 sctp_subtype_t subtype, 562 struct sctp_chunk *chunk, 563 unsigned int error) 564 { 565 struct sctp_ulpevent *event; 566 struct sctp_chunk *abort; 567 /* Cancel any partial delivery in progress. */ 568 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 569 570 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) 571 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, 572 (__u16)error, 0, 0, chunk, 573 GFP_ATOMIC); 574 else 575 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, 576 (__u16)error, 0, 0, NULL, 577 GFP_ATOMIC); 578 if (event) 579 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 580 SCTP_ULPEVENT(event)); 581 582 if (asoc->overall_error_count >= asoc->max_retrans) { 583 abort = sctp_make_violation_max_retrans(asoc, chunk); 584 if (abort) 585 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 586 SCTP_CHUNK(abort)); 587 } 588 589 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 590 SCTP_STATE(SCTP_STATE_CLOSED)); 591 592 /* SEND_FAILED sent later when cleaning up the association. */ 593 asoc->outqueue.error = error; 594 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 595 } 596 597 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT 598 * inside the cookie. In reality, this is only used for INIT-ACK processing 599 * since all other cases use "temporary" associations and can do all 600 * their work in statefuns directly. 601 */ 602 static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, 603 struct sctp_association *asoc, 604 struct sctp_chunk *chunk, 605 sctp_init_chunk_t *peer_init, 606 gfp_t gfp) 607 { 608 int error; 609 610 /* We only process the init as a sideeffect in a single 611 * case. This is when we process the INIT-ACK. If we 612 * fail during INIT processing (due to malloc problems), 613 * just return the error and stop processing the stack. 614 */ 615 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) 616 error = -ENOMEM; 617 else 618 error = 0; 619 620 return error; 621 } 622 623 /* Helper function to break out starting up of heartbeat timers. */ 624 static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, 625 struct sctp_association *asoc) 626 { 627 struct sctp_transport *t; 628 629 /* Start a heartbeat timer for each transport on the association. 630 * hold a reference on the transport to make sure none of 631 * the needed data structures go away. 632 */ 633 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 634 635 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 636 sctp_transport_hold(t); 637 } 638 } 639 640 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, 641 struct sctp_association *asoc) 642 { 643 struct sctp_transport *t; 644 645 /* Stop all heartbeat timers. */ 646 647 list_for_each_entry(t, &asoc->peer.transport_addr_list, 648 transports) { 649 if (del_timer(&t->hb_timer)) 650 sctp_transport_put(t); 651 } 652 } 653 654 /* Helper function to stop any pending T3-RTX timers */ 655 static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, 656 struct sctp_association *asoc) 657 { 658 struct sctp_transport *t; 659 660 list_for_each_entry(t, &asoc->peer.transport_addr_list, 661 transports) { 662 if (del_timer(&t->T3_rtx_timer)) 663 sctp_transport_put(t); 664 } 665 } 666 667 668 /* Helper function to update the heartbeat timer. */ 669 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, 670 struct sctp_transport *t) 671 { 672 /* Update the heartbeat timer. */ 673 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 674 sctp_transport_hold(t); 675 } 676 677 /* Helper function to handle the reception of an HEARTBEAT ACK. */ 678 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, 679 struct sctp_association *asoc, 680 struct sctp_transport *t, 681 struct sctp_chunk *chunk) 682 { 683 sctp_sender_hb_info_t *hbinfo; 684 int was_unconfirmed = 0; 685 686 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the 687 * HEARTBEAT should clear the error counter of the destination 688 * transport address to which the HEARTBEAT was sent. 689 */ 690 t->error_count = 0; 691 692 /* 693 * Although RFC4960 specifies that the overall error count must 694 * be cleared when a HEARTBEAT ACK is received, we make an 695 * exception while in SHUTDOWN PENDING. If the peer keeps its 696 * window shut forever, we may never be able to transmit our 697 * outstanding data and rely on the retransmission limit be reached 698 * to shutdown the association. 699 */ 700 if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) 701 t->asoc->overall_error_count = 0; 702 703 /* Clear the hb_sent flag to signal that we had a good 704 * acknowledgement. 705 */ 706 t->hb_sent = 0; 707 708 /* Mark the destination transport address as active if it is not so 709 * marked. 710 */ 711 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { 712 was_unconfirmed = 1; 713 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 714 SCTP_HEARTBEAT_SUCCESS); 715 } 716 717 if (t->state == SCTP_PF) 718 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 719 SCTP_HEARTBEAT_SUCCESS); 720 721 /* HB-ACK was received for a the proper HB. Consider this 722 * forward progress. 723 */ 724 if (t->dst) 725 dst_confirm(t->dst); 726 727 /* The receiver of the HEARTBEAT ACK should also perform an 728 * RTT measurement for that destination transport address 729 * using the time value carried in the HEARTBEAT ACK chunk. 730 * If the transport's rto_pending variable has been cleared, 731 * it was most likely due to a retransmit. However, we want 732 * to re-enable it to properly update the rto. 733 */ 734 if (t->rto_pending == 0) 735 t->rto_pending = 1; 736 737 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 738 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 739 740 /* Update the heartbeat timer. */ 741 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 742 sctp_transport_hold(t); 743 744 if (was_unconfirmed && asoc->peer.transport_count == 1) 745 sctp_transport_immediate_rtx(t); 746 } 747 748 749 /* Helper function to process the process SACK command. */ 750 static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, 751 struct sctp_association *asoc, 752 struct sctp_chunk *chunk) 753 { 754 int err = 0; 755 756 if (sctp_outq_sack(&asoc->outqueue, chunk)) { 757 struct net *net = sock_net(asoc->base.sk); 758 759 /* There are no more TSNs awaiting SACK. */ 760 err = sctp_do_sm(net, SCTP_EVENT_T_OTHER, 761 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), 762 asoc->state, asoc->ep, asoc, NULL, 763 GFP_ATOMIC); 764 } 765 766 return err; 767 } 768 769 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set 770 * the transport for a shutdown chunk. 771 */ 772 static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, 773 struct sctp_association *asoc, 774 struct sctp_chunk *chunk) 775 { 776 struct sctp_transport *t; 777 778 if (chunk->transport) 779 t = chunk->transport; 780 else { 781 t = sctp_assoc_choose_alter_transport(asoc, 782 asoc->shutdown_last_sent_to); 783 chunk->transport = t; 784 } 785 asoc->shutdown_last_sent_to = t; 786 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; 787 } 788 789 /* Helper function to change the state of an association. */ 790 static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, 791 struct sctp_association *asoc, 792 sctp_state_t state) 793 { 794 struct sock *sk = asoc->base.sk; 795 796 asoc->state = state; 797 798 pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]); 799 800 if (sctp_style(sk, TCP)) { 801 /* Change the sk->sk_state of a TCP-style socket that has 802 * successfully completed a connect() call. 803 */ 804 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) 805 sk->sk_state = SCTP_SS_ESTABLISHED; 806 807 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ 808 if (sctp_state(asoc, SHUTDOWN_RECEIVED) && 809 sctp_sstate(sk, ESTABLISHED)) 810 sk->sk_shutdown |= RCV_SHUTDOWN; 811 } 812 813 if (sctp_state(asoc, COOKIE_WAIT)) { 814 /* Reset init timeouts since they may have been 815 * increased due to timer expirations. 816 */ 817 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = 818 asoc->rto_initial; 819 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = 820 asoc->rto_initial; 821 } 822 823 if (sctp_state(asoc, ESTABLISHED) || 824 sctp_state(asoc, CLOSED) || 825 sctp_state(asoc, SHUTDOWN_RECEIVED)) { 826 /* Wake up any processes waiting in the asoc's wait queue in 827 * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). 828 */ 829 if (waitqueue_active(&asoc->wait)) 830 wake_up_interruptible(&asoc->wait); 831 832 /* Wake up any processes waiting in the sk's sleep queue of 833 * a TCP-style or UDP-style peeled-off socket in 834 * sctp_wait_for_accept() or sctp_wait_for_packet(). 835 * For a UDP-style socket, the waiters are woken up by the 836 * notifications. 837 */ 838 if (!sctp_style(sk, UDP)) 839 sk->sk_state_change(sk); 840 } 841 } 842 843 /* Helper function to delete an association. */ 844 static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, 845 struct sctp_association *asoc) 846 { 847 struct sock *sk = asoc->base.sk; 848 849 /* If it is a non-temporary association belonging to a TCP-style 850 * listening socket that is not closed, do not free it so that accept() 851 * can pick it up later. 852 */ 853 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && 854 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) 855 return; 856 857 sctp_association_free(asoc); 858 } 859 860 /* 861 * ADDIP Section 4.1 ASCONF Chunk Procedures 862 * A4) Start a T-4 RTO timer, using the RTO value of the selected 863 * destination address (we use active path instead of primary path just 864 * because primary path may be inactive. 865 */ 866 static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, 867 struct sctp_association *asoc, 868 struct sctp_chunk *chunk) 869 { 870 struct sctp_transport *t; 871 872 t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); 873 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; 874 chunk->transport = t; 875 } 876 877 /* Process an incoming Operation Error Chunk. */ 878 static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, 879 struct sctp_association *asoc, 880 struct sctp_chunk *chunk) 881 { 882 struct sctp_errhdr *err_hdr; 883 struct sctp_ulpevent *ev; 884 885 while (chunk->chunk_end > chunk->skb->data) { 886 err_hdr = (struct sctp_errhdr *)(chunk->skb->data); 887 888 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, 889 GFP_ATOMIC); 890 if (!ev) 891 return; 892 893 sctp_ulpq_tail_event(&asoc->ulpq, ev); 894 895 switch (err_hdr->cause) { 896 case SCTP_ERROR_UNKNOWN_CHUNK: 897 { 898 sctp_chunkhdr_t *unk_chunk_hdr; 899 900 unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable; 901 switch (unk_chunk_hdr->type) { 902 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with 903 * an ERROR chunk reporting that it did not recognized 904 * the ASCONF chunk type, the sender of the ASCONF MUST 905 * NOT send any further ASCONF chunks and MUST stop its 906 * T-4 timer. 907 */ 908 case SCTP_CID_ASCONF: 909 if (asoc->peer.asconf_capable == 0) 910 break; 911 912 asoc->peer.asconf_capable = 0; 913 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, 914 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 915 break; 916 default: 917 break; 918 } 919 break; 920 } 921 default: 922 break; 923 } 924 } 925 } 926 927 /* Process variable FWDTSN chunk information. */ 928 static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, 929 struct sctp_chunk *chunk) 930 { 931 struct sctp_fwdtsn_skip *skip; 932 /* Walk through all the skipped SSNs */ 933 sctp_walk_fwdtsn(skip, chunk) { 934 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); 935 } 936 } 937 938 /* Helper function to remove the association non-primary peer 939 * transports. 940 */ 941 static void sctp_cmd_del_non_primary(struct sctp_association *asoc) 942 { 943 struct sctp_transport *t; 944 struct list_head *pos; 945 struct list_head *temp; 946 947 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 948 t = list_entry(pos, struct sctp_transport, transports); 949 if (!sctp_cmp_addr_exact(&t->ipaddr, 950 &asoc->peer.primary_addr)) { 951 sctp_assoc_rm_peer(asoc, t); 952 } 953 } 954 } 955 956 /* Helper function to set sk_err on a 1-1 style socket. */ 957 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) 958 { 959 struct sock *sk = asoc->base.sk; 960 961 if (!sctp_style(sk, UDP)) 962 sk->sk_err = error; 963 } 964 965 /* Helper function to generate an association change event */ 966 static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands, 967 struct sctp_association *asoc, 968 u8 state) 969 { 970 struct sctp_ulpevent *ev; 971 972 ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0, 973 asoc->c.sinit_num_ostreams, 974 asoc->c.sinit_max_instreams, 975 NULL, GFP_ATOMIC); 976 if (ev) 977 sctp_ulpq_tail_event(&asoc->ulpq, ev); 978 } 979 980 /* Helper function to generate an adaptation indication event */ 981 static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands, 982 struct sctp_association *asoc) 983 { 984 struct sctp_ulpevent *ev; 985 986 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); 987 988 if (ev) 989 sctp_ulpq_tail_event(&asoc->ulpq, ev); 990 } 991 992 993 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, 994 sctp_event_timeout_t timer, 995 char *name) 996 { 997 struct sctp_transport *t; 998 999 t = asoc->init_last_sent_to; 1000 asoc->init_err_counter++; 1001 1002 if (t->init_sent_count > (asoc->init_cycle + 1)) { 1003 asoc->timeouts[timer] *= 2; 1004 if (asoc->timeouts[timer] > asoc->max_init_timeo) { 1005 asoc->timeouts[timer] = asoc->max_init_timeo; 1006 } 1007 asoc->init_cycle++; 1008 1009 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" 1010 " cycle:%d timeout:%ld\n", __func__, name, 1011 asoc->init_err_counter, asoc->init_cycle, 1012 asoc->timeouts[timer]); 1013 } 1014 1015 } 1016 1017 /* Send the whole message, chunk by chunk, to the outqueue. 1018 * This way the whole message is queued up and bundling if 1019 * encouraged for small fragments. 1020 */ 1021 static int sctp_cmd_send_msg(struct sctp_association *asoc, 1022 struct sctp_datamsg *msg) 1023 { 1024 struct sctp_chunk *chunk; 1025 int error = 0; 1026 1027 list_for_each_entry(chunk, &msg->chunks, frag_list) { 1028 error = sctp_outq_tail(&asoc->outqueue, chunk); 1029 if (error) 1030 break; 1031 } 1032 1033 return error; 1034 } 1035 1036 1037 /* Sent the next ASCONF packet currently stored in the association. 1038 * This happens after the ASCONF_ACK was succeffully processed. 1039 */ 1040 static void sctp_cmd_send_asconf(struct sctp_association *asoc) 1041 { 1042 struct net *net = sock_net(asoc->base.sk); 1043 1044 /* Send the next asconf chunk from the addip chunk 1045 * queue. 1046 */ 1047 if (!list_empty(&asoc->addip_chunk_list)) { 1048 struct list_head *entry = asoc->addip_chunk_list.next; 1049 struct sctp_chunk *asconf = list_entry(entry, 1050 struct sctp_chunk, list); 1051 list_del_init(entry); 1052 1053 /* Hold the chunk until an ASCONF_ACK is received. */ 1054 sctp_chunk_hold(asconf); 1055 if (sctp_primitive_ASCONF(net, asoc, asconf)) 1056 sctp_chunk_free(asconf); 1057 else 1058 asoc->addip_last_asconf = asconf; 1059 } 1060 } 1061 1062 1063 /* These three macros allow us to pull the debugging code out of the 1064 * main flow of sctp_do_sm() to keep attention focused on the real 1065 * functionality there. 1066 */ 1067 #define debug_pre_sfn() \ 1068 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ 1069 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ 1070 asoc, sctp_state_tbl[state], state_fn->name) 1071 1072 #define debug_post_sfn() \ 1073 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ 1074 sctp_status_tbl[status]) 1075 1076 #define debug_post_sfx() \ 1077 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ 1078 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ 1079 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) 1080 1081 /* 1082 * This is the master state machine processing function. 1083 * 1084 * If you want to understand all of lksctp, this is a 1085 * good place to start. 1086 */ 1087 int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, 1088 sctp_state_t state, 1089 struct sctp_endpoint *ep, 1090 struct sctp_association *asoc, 1091 void *event_arg, 1092 gfp_t gfp) 1093 { 1094 sctp_cmd_seq_t commands; 1095 const sctp_sm_table_entry_t *state_fn; 1096 sctp_disposition_t status; 1097 int error = 0; 1098 typedef const char *(printfn_t)(sctp_subtype_t); 1099 static printfn_t *table[] = { 1100 NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, 1101 }; 1102 printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; 1103 1104 /* Look up the state function, run it, and then process the 1105 * side effects. These three steps are the heart of lksctp. 1106 */ 1107 state_fn = sctp_sm_lookup_event(net, event_type, state, subtype); 1108 1109 sctp_init_cmd_seq(&commands); 1110 1111 debug_pre_sfn(); 1112 status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands); 1113 debug_post_sfn(); 1114 1115 error = sctp_side_effects(event_type, subtype, state, 1116 ep, &asoc, event_arg, status, 1117 &commands, gfp); 1118 debug_post_sfx(); 1119 1120 return error; 1121 } 1122 1123 /***************************************************************** 1124 * This the master state function side effect processing function. 1125 *****************************************************************/ 1126 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, 1127 sctp_state_t state, 1128 struct sctp_endpoint *ep, 1129 struct sctp_association **asoc, 1130 void *event_arg, 1131 sctp_disposition_t status, 1132 sctp_cmd_seq_t *commands, 1133 gfp_t gfp) 1134 { 1135 int error; 1136 1137 /* FIXME - Most of the dispositions left today would be categorized 1138 * as "exceptional" dispositions. For those dispositions, it 1139 * may not be proper to run through any of the commands at all. 1140 * For example, the command interpreter might be run only with 1141 * disposition SCTP_DISPOSITION_CONSUME. 1142 */ 1143 if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, 1144 ep, *asoc, 1145 event_arg, status, 1146 commands, gfp))) 1147 goto bail; 1148 1149 switch (status) { 1150 case SCTP_DISPOSITION_DISCARD: 1151 pr_debug("%s: ignored sctp protocol event - state:%d, " 1152 "event_type:%d, event_id:%d\n", __func__, state, 1153 event_type, subtype.chunk); 1154 break; 1155 1156 case SCTP_DISPOSITION_NOMEM: 1157 /* We ran out of memory, so we need to discard this 1158 * packet. 1159 */ 1160 /* BUG--we should now recover some memory, probably by 1161 * reneging... 1162 */ 1163 error = -ENOMEM; 1164 break; 1165 1166 case SCTP_DISPOSITION_DELETE_TCB: 1167 case SCTP_DISPOSITION_ABORT: 1168 /* This should now be a command. */ 1169 *asoc = NULL; 1170 break; 1171 1172 case SCTP_DISPOSITION_CONSUME: 1173 /* 1174 * We should no longer have much work to do here as the 1175 * real work has been done as explicit commands above. 1176 */ 1177 break; 1178 1179 case SCTP_DISPOSITION_VIOLATION: 1180 net_err_ratelimited("protocol violation state %d chunkid %d\n", 1181 state, subtype.chunk); 1182 break; 1183 1184 case SCTP_DISPOSITION_NOT_IMPL: 1185 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n", 1186 state, event_type, subtype.chunk); 1187 break; 1188 1189 case SCTP_DISPOSITION_BUG: 1190 pr_err("bug in state %d, event_type %d, event_id %d\n", 1191 state, event_type, subtype.chunk); 1192 BUG(); 1193 break; 1194 1195 default: 1196 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n", 1197 status, state, event_type, subtype.chunk); 1198 BUG(); 1199 break; 1200 } 1201 1202 bail: 1203 return error; 1204 } 1205 1206 /******************************************************************** 1207 * 2nd Level Abstractions 1208 ********************************************************************/ 1209 1210 /* This is the side-effect interpreter. */ 1211 static int sctp_cmd_interpreter(sctp_event_t event_type, 1212 sctp_subtype_t subtype, 1213 sctp_state_t state, 1214 struct sctp_endpoint *ep, 1215 struct sctp_association *asoc, 1216 void *event_arg, 1217 sctp_disposition_t status, 1218 sctp_cmd_seq_t *commands, 1219 gfp_t gfp) 1220 { 1221 int error = 0; 1222 int force; 1223 sctp_cmd_t *cmd; 1224 struct sctp_chunk *new_obj; 1225 struct sctp_chunk *chunk = NULL; 1226 struct sctp_packet *packet; 1227 struct timer_list *timer; 1228 unsigned long timeout; 1229 struct sctp_transport *t; 1230 struct sctp_sackhdr sackh; 1231 int local_cork = 0; 1232 1233 if (SCTP_EVENT_T_TIMEOUT != event_type) 1234 chunk = event_arg; 1235 1236 /* Note: This whole file is a huge candidate for rework. 1237 * For example, each command could either have its own handler, so 1238 * the loop would look like: 1239 * while (cmds) 1240 * cmd->handle(x, y, z) 1241 * --jgrimm 1242 */ 1243 while (NULL != (cmd = sctp_next_cmd(commands))) { 1244 switch (cmd->verb) { 1245 case SCTP_CMD_NOP: 1246 /* Do nothing. */ 1247 break; 1248 1249 case SCTP_CMD_NEW_ASOC: 1250 /* Register a new association. */ 1251 if (local_cork) { 1252 sctp_outq_uncork(&asoc->outqueue); 1253 local_cork = 0; 1254 } 1255 1256 /* Register with the endpoint. */ 1257 asoc = cmd->obj.asoc; 1258 BUG_ON(asoc->peer.primary_path == NULL); 1259 sctp_endpoint_add_asoc(ep, asoc); 1260 break; 1261 1262 case SCTP_CMD_UPDATE_ASSOC: 1263 sctp_assoc_update(asoc, cmd->obj.asoc); 1264 break; 1265 1266 case SCTP_CMD_PURGE_OUTQUEUE: 1267 sctp_outq_teardown(&asoc->outqueue); 1268 break; 1269 1270 case SCTP_CMD_DELETE_TCB: 1271 if (local_cork) { 1272 sctp_outq_uncork(&asoc->outqueue); 1273 local_cork = 0; 1274 } 1275 /* Delete the current association. */ 1276 sctp_cmd_delete_tcb(commands, asoc); 1277 asoc = NULL; 1278 break; 1279 1280 case SCTP_CMD_NEW_STATE: 1281 /* Enter a new state. */ 1282 sctp_cmd_new_state(commands, asoc, cmd->obj.state); 1283 break; 1284 1285 case SCTP_CMD_REPORT_TSN: 1286 /* Record the arrival of a TSN. */ 1287 error = sctp_tsnmap_mark(&asoc->peer.tsn_map, 1288 cmd->obj.u32, NULL); 1289 break; 1290 1291 case SCTP_CMD_REPORT_FWDTSN: 1292 /* Move the Cumulattive TSN Ack ahead. */ 1293 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); 1294 1295 /* purge the fragmentation queue */ 1296 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); 1297 1298 /* Abort any in progress partial delivery. */ 1299 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 1300 break; 1301 1302 case SCTP_CMD_PROCESS_FWDTSN: 1303 sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk); 1304 break; 1305 1306 case SCTP_CMD_GEN_SACK: 1307 /* Generate a Selective ACK. 1308 * The argument tells us whether to just count 1309 * the packet and MAYBE generate a SACK, or 1310 * force a SACK out. 1311 */ 1312 force = cmd->obj.i32; 1313 error = sctp_gen_sack(asoc, force, commands); 1314 break; 1315 1316 case SCTP_CMD_PROCESS_SACK: 1317 /* Process an inbound SACK. */ 1318 error = sctp_cmd_process_sack(commands, asoc, 1319 cmd->obj.chunk); 1320 break; 1321 1322 case SCTP_CMD_GEN_INIT_ACK: 1323 /* Generate an INIT ACK chunk. */ 1324 new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, 1325 0); 1326 if (!new_obj) 1327 goto nomem; 1328 1329 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1330 SCTP_CHUNK(new_obj)); 1331 break; 1332 1333 case SCTP_CMD_PEER_INIT: 1334 /* Process a unified INIT from the peer. 1335 * Note: Only used during INIT-ACK processing. If 1336 * there is an error just return to the outter 1337 * layer which will bail. 1338 */ 1339 error = sctp_cmd_process_init(commands, asoc, chunk, 1340 cmd->obj.init, gfp); 1341 break; 1342 1343 case SCTP_CMD_GEN_COOKIE_ECHO: 1344 /* Generate a COOKIE ECHO chunk. */ 1345 new_obj = sctp_make_cookie_echo(asoc, chunk); 1346 if (!new_obj) { 1347 if (cmd->obj.chunk) 1348 sctp_chunk_free(cmd->obj.chunk); 1349 goto nomem; 1350 } 1351 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1352 SCTP_CHUNK(new_obj)); 1353 1354 /* If there is an ERROR chunk to be sent along with 1355 * the COOKIE_ECHO, send it, too. 1356 */ 1357 if (cmd->obj.chunk) 1358 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1359 SCTP_CHUNK(cmd->obj.chunk)); 1360 1361 if (new_obj->transport) { 1362 new_obj->transport->init_sent_count++; 1363 asoc->init_last_sent_to = new_obj->transport; 1364 } 1365 1366 /* FIXME - Eventually come up with a cleaner way to 1367 * enabling COOKIE-ECHO + DATA bundling during 1368 * multihoming stale cookie scenarios, the following 1369 * command plays with asoc->peer.retran_path to 1370 * avoid the problem of sending the COOKIE-ECHO and 1371 * DATA in different paths, which could result 1372 * in the association being ABORTed if the DATA chunk 1373 * is processed first by the server. Checking the 1374 * init error counter simply causes this command 1375 * to be executed only during failed attempts of 1376 * association establishment. 1377 */ 1378 if ((asoc->peer.retran_path != 1379 asoc->peer.primary_path) && 1380 (asoc->init_err_counter > 0)) { 1381 sctp_add_cmd_sf(commands, 1382 SCTP_CMD_FORCE_PRIM_RETRAN, 1383 SCTP_NULL()); 1384 } 1385 1386 break; 1387 1388 case SCTP_CMD_GEN_SHUTDOWN: 1389 /* Generate SHUTDOWN when in SHUTDOWN_SENT state. 1390 * Reset error counts. 1391 */ 1392 asoc->overall_error_count = 0; 1393 1394 /* Generate a SHUTDOWN chunk. */ 1395 new_obj = sctp_make_shutdown(asoc, chunk); 1396 if (!new_obj) 1397 goto nomem; 1398 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1399 SCTP_CHUNK(new_obj)); 1400 break; 1401 1402 case SCTP_CMD_CHUNK_ULP: 1403 /* Send a chunk to the sockets layer. */ 1404 pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", 1405 __func__, cmd->obj.chunk, &asoc->ulpq); 1406 1407 sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, 1408 GFP_ATOMIC); 1409 break; 1410 1411 case SCTP_CMD_EVENT_ULP: 1412 /* Send a notification to the sockets layer. */ 1413 pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", 1414 __func__, cmd->obj.ulpevent, &asoc->ulpq); 1415 1416 sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); 1417 break; 1418 1419 case SCTP_CMD_REPLY: 1420 /* If an caller has not already corked, do cork. */ 1421 if (!asoc->outqueue.cork) { 1422 sctp_outq_cork(&asoc->outqueue); 1423 local_cork = 1; 1424 } 1425 /* Send a chunk to our peer. */ 1426 error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk); 1427 break; 1428 1429 case SCTP_CMD_SEND_PKT: 1430 /* Send a full packet to our peer. */ 1431 packet = cmd->obj.packet; 1432 sctp_packet_transmit(packet); 1433 sctp_ootb_pkt_free(packet); 1434 break; 1435 1436 case SCTP_CMD_T1_RETRAN: 1437 /* Mark a transport for retransmission. */ 1438 sctp_retransmit(&asoc->outqueue, cmd->obj.transport, 1439 SCTP_RTXR_T1_RTX); 1440 break; 1441 1442 case SCTP_CMD_RETRAN: 1443 /* Mark a transport for retransmission. */ 1444 sctp_retransmit(&asoc->outqueue, cmd->obj.transport, 1445 SCTP_RTXR_T3_RTX); 1446 break; 1447 1448 case SCTP_CMD_ECN_CE: 1449 /* Do delayed CE processing. */ 1450 sctp_do_ecn_ce_work(asoc, cmd->obj.u32); 1451 break; 1452 1453 case SCTP_CMD_ECN_ECNE: 1454 /* Do delayed ECNE processing. */ 1455 new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, 1456 chunk); 1457 if (new_obj) 1458 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1459 SCTP_CHUNK(new_obj)); 1460 break; 1461 1462 case SCTP_CMD_ECN_CWR: 1463 /* Do delayed CWR processing. */ 1464 sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); 1465 break; 1466 1467 case SCTP_CMD_SETUP_T2: 1468 sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); 1469 break; 1470 1471 case SCTP_CMD_TIMER_START_ONCE: 1472 timer = &asoc->timers[cmd->obj.to]; 1473 1474 if (timer_pending(timer)) 1475 break; 1476 /* fall through */ 1477 1478 case SCTP_CMD_TIMER_START: 1479 timer = &asoc->timers[cmd->obj.to]; 1480 timeout = asoc->timeouts[cmd->obj.to]; 1481 BUG_ON(!timeout); 1482 1483 timer->expires = jiffies + timeout; 1484 sctp_association_hold(asoc); 1485 add_timer(timer); 1486 break; 1487 1488 case SCTP_CMD_TIMER_RESTART: 1489 timer = &asoc->timers[cmd->obj.to]; 1490 timeout = asoc->timeouts[cmd->obj.to]; 1491 if (!mod_timer(timer, jiffies + timeout)) 1492 sctp_association_hold(asoc); 1493 break; 1494 1495 case SCTP_CMD_TIMER_STOP: 1496 timer = &asoc->timers[cmd->obj.to]; 1497 if (del_timer(timer)) 1498 sctp_association_put(asoc); 1499 break; 1500 1501 case SCTP_CMD_INIT_CHOOSE_TRANSPORT: 1502 chunk = cmd->obj.chunk; 1503 t = sctp_assoc_choose_alter_transport(asoc, 1504 asoc->init_last_sent_to); 1505 asoc->init_last_sent_to = t; 1506 chunk->transport = t; 1507 t->init_sent_count++; 1508 /* Set the new transport as primary */ 1509 sctp_assoc_set_primary(asoc, t); 1510 break; 1511 1512 case SCTP_CMD_INIT_RESTART: 1513 /* Do the needed accounting and updates 1514 * associated with restarting an initialization 1515 * timer. Only multiply the timeout by two if 1516 * all transports have been tried at the current 1517 * timeout. 1518 */ 1519 sctp_cmd_t1_timer_update(asoc, 1520 SCTP_EVENT_TIMEOUT_T1_INIT, 1521 "INIT"); 1522 1523 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 1524 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 1525 break; 1526 1527 case SCTP_CMD_COOKIEECHO_RESTART: 1528 /* Do the needed accounting and updates 1529 * associated with restarting an initialization 1530 * timer. Only multiply the timeout by two if 1531 * all transports have been tried at the current 1532 * timeout. 1533 */ 1534 sctp_cmd_t1_timer_update(asoc, 1535 SCTP_EVENT_TIMEOUT_T1_COOKIE, 1536 "COOKIE"); 1537 1538 /* If we've sent any data bundled with 1539 * COOKIE-ECHO we need to resend. 1540 */ 1541 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1542 transports) { 1543 sctp_retransmit_mark(&asoc->outqueue, t, 1544 SCTP_RTXR_T1_RTX); 1545 } 1546 1547 sctp_add_cmd_sf(commands, 1548 SCTP_CMD_TIMER_RESTART, 1549 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 1550 break; 1551 1552 case SCTP_CMD_INIT_FAILED: 1553 sctp_cmd_init_failed(commands, asoc, cmd->obj.err); 1554 break; 1555 1556 case SCTP_CMD_ASSOC_FAILED: 1557 sctp_cmd_assoc_failed(commands, asoc, event_type, 1558 subtype, chunk, cmd->obj.err); 1559 break; 1560 1561 case SCTP_CMD_INIT_COUNTER_INC: 1562 asoc->init_err_counter++; 1563 break; 1564 1565 case SCTP_CMD_INIT_COUNTER_RESET: 1566 asoc->init_err_counter = 0; 1567 asoc->init_cycle = 0; 1568 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1569 transports) { 1570 t->init_sent_count = 0; 1571 } 1572 break; 1573 1574 case SCTP_CMD_REPORT_DUP: 1575 sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, 1576 cmd->obj.u32); 1577 break; 1578 1579 case SCTP_CMD_REPORT_BAD_TAG: 1580 pr_debug("%s: vtag mismatch!\n", __func__); 1581 break; 1582 1583 case SCTP_CMD_STRIKE: 1584 /* Mark one strike against a transport. */ 1585 sctp_do_8_2_transport_strike(commands, asoc, 1586 cmd->obj.transport, 0); 1587 break; 1588 1589 case SCTP_CMD_TRANSPORT_IDLE: 1590 t = cmd->obj.transport; 1591 sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); 1592 break; 1593 1594 case SCTP_CMD_TRANSPORT_HB_SENT: 1595 t = cmd->obj.transport; 1596 sctp_do_8_2_transport_strike(commands, asoc, 1597 t, 1); 1598 t->hb_sent = 1; 1599 break; 1600 1601 case SCTP_CMD_TRANSPORT_ON: 1602 t = cmd->obj.transport; 1603 sctp_cmd_transport_on(commands, asoc, t, chunk); 1604 break; 1605 1606 case SCTP_CMD_HB_TIMERS_START: 1607 sctp_cmd_hb_timers_start(commands, asoc); 1608 break; 1609 1610 case SCTP_CMD_HB_TIMER_UPDATE: 1611 t = cmd->obj.transport; 1612 sctp_cmd_hb_timer_update(commands, t); 1613 break; 1614 1615 case SCTP_CMD_HB_TIMERS_STOP: 1616 sctp_cmd_hb_timers_stop(commands, asoc); 1617 break; 1618 1619 case SCTP_CMD_REPORT_ERROR: 1620 error = cmd->obj.error; 1621 break; 1622 1623 case SCTP_CMD_PROCESS_CTSN: 1624 /* Dummy up a SACK for processing. */ 1625 sackh.cum_tsn_ack = cmd->obj.be32; 1626 sackh.a_rwnd = asoc->peer.rwnd + 1627 asoc->outqueue.outstanding_bytes; 1628 sackh.num_gap_ack_blocks = 0; 1629 sackh.num_dup_tsns = 0; 1630 chunk->subh.sack_hdr = &sackh; 1631 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, 1632 SCTP_CHUNK(chunk)); 1633 break; 1634 1635 case SCTP_CMD_DISCARD_PACKET: 1636 /* We need to discard the whole packet. 1637 * Uncork the queue since there might be 1638 * responses pending 1639 */ 1640 chunk->pdiscard = 1; 1641 if (asoc) { 1642 sctp_outq_uncork(&asoc->outqueue); 1643 local_cork = 0; 1644 } 1645 break; 1646 1647 case SCTP_CMD_RTO_PENDING: 1648 t = cmd->obj.transport; 1649 t->rto_pending = 1; 1650 break; 1651 1652 case SCTP_CMD_PART_DELIVER: 1653 sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC); 1654 break; 1655 1656 case SCTP_CMD_RENEGE: 1657 sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk, 1658 GFP_ATOMIC); 1659 break; 1660 1661 case SCTP_CMD_SETUP_T4: 1662 sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); 1663 break; 1664 1665 case SCTP_CMD_PROCESS_OPERR: 1666 sctp_cmd_process_operr(commands, asoc, chunk); 1667 break; 1668 case SCTP_CMD_CLEAR_INIT_TAG: 1669 asoc->peer.i.init_tag = 0; 1670 break; 1671 case SCTP_CMD_DEL_NON_PRIMARY: 1672 sctp_cmd_del_non_primary(asoc); 1673 break; 1674 case SCTP_CMD_T3_RTX_TIMERS_STOP: 1675 sctp_cmd_t3_rtx_timers_stop(commands, asoc); 1676 break; 1677 case SCTP_CMD_FORCE_PRIM_RETRAN: 1678 t = asoc->peer.retran_path; 1679 asoc->peer.retran_path = asoc->peer.primary_path; 1680 error = sctp_outq_uncork(&asoc->outqueue); 1681 local_cork = 0; 1682 asoc->peer.retran_path = t; 1683 break; 1684 case SCTP_CMD_SET_SK_ERR: 1685 sctp_cmd_set_sk_err(asoc, cmd->obj.error); 1686 break; 1687 case SCTP_CMD_ASSOC_CHANGE: 1688 sctp_cmd_assoc_change(commands, asoc, 1689 cmd->obj.u8); 1690 break; 1691 case SCTP_CMD_ADAPTATION_IND: 1692 sctp_cmd_adaptation_ind(commands, asoc); 1693 break; 1694 1695 case SCTP_CMD_ASSOC_SHKEY: 1696 error = sctp_auth_asoc_init_active_key(asoc, 1697 GFP_ATOMIC); 1698 break; 1699 case SCTP_CMD_UPDATE_INITTAG: 1700 asoc->peer.i.init_tag = cmd->obj.u32; 1701 break; 1702 case SCTP_CMD_SEND_MSG: 1703 if (!asoc->outqueue.cork) { 1704 sctp_outq_cork(&asoc->outqueue); 1705 local_cork = 1; 1706 } 1707 error = sctp_cmd_send_msg(asoc, cmd->obj.msg); 1708 break; 1709 case SCTP_CMD_SEND_NEXT_ASCONF: 1710 sctp_cmd_send_asconf(asoc); 1711 break; 1712 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1713 sctp_asconf_queue_teardown(asoc); 1714 break; 1715 1716 case SCTP_CMD_SET_ASOC: 1717 asoc = cmd->obj.asoc; 1718 break; 1719 1720 default: 1721 pr_warn("Impossible command: %u\n", 1722 cmd->verb); 1723 break; 1724 } 1725 1726 if (error) 1727 break; 1728 } 1729 1730 out: 1731 /* If this is in response to a received chunk, wait until 1732 * we are done with the packet to open the queue so that we don't 1733 * send multiple packets in response to a single request. 1734 */ 1735 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { 1736 if (chunk->end_of_packet || chunk->singleton) 1737 error = sctp_outq_uncork(&asoc->outqueue); 1738 } else if (local_cork) 1739 error = sctp_outq_uncork(&asoc->outqueue); 1740 return error; 1741 nomem: 1742 error = -ENOMEM; 1743 goto out; 1744 } 1745 1746