1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * (C) Copyright IBM Corp. 2001, 2004 4 * Copyright (c) 1999-2000 Cisco, Inc. 5 * Copyright (c) 1999-2001 Motorola, Inc. 6 * Copyright (c) 2001-2002 Intel Corp. 7 * Copyright (c) 2002 Nokia Corp. 8 * 9 * This is part of the SCTP Linux Kernel Implementation. 10 * 11 * These are the state functions for the state machine. 12 * 13 * Please send any bug reports or fixes you make to the 14 * email address(es): 15 * lksctp developers <linux-sctp@vger.kernel.org> 16 * 17 * Written or modified by: 18 * La Monte H.P. Yarroll <piggy@acm.org> 19 * Karl Knutson <karl@athena.chicago.il.us> 20 * Mathew Kotowsky <kotowsky@sctp.org> 21 * Sridhar Samudrala <samudrala@us.ibm.com> 22 * Jon Grimm <jgrimm@us.ibm.com> 23 * Hui Huang <hui.huang@nokia.com> 24 * Dajiang Zhang <dajiang.zhang@nokia.com> 25 * Daisy Chang <daisyc@us.ibm.com> 26 * Ardelle Fan <ardelle.fan@intel.com> 27 * Ryan Layer <rmlayer@us.ibm.com> 28 * Kevin Gao <kevin.gao@intel.com> 29 */ 30 31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 33 #include <linux/types.h> 34 #include <linux/kernel.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/net.h> 38 #include <linux/inet.h> 39 #include <linux/slab.h> 40 #include <net/sock.h> 41 #include <net/inet_ecn.h> 42 #include <linux/skbuff.h> 43 #include <net/sctp/sctp.h> 44 #include <net/sctp/sm.h> 45 #include <net/sctp/structs.h> 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/sctp.h> 49 50 static struct sctp_packet *sctp_abort_pkt_new( 51 struct net *net, 52 const struct sctp_endpoint *ep, 53 const struct sctp_association *asoc, 54 struct sctp_chunk *chunk, 55 const void *payload, size_t paylen); 56 static int sctp_eat_data(const struct sctp_association *asoc, 57 struct sctp_chunk *chunk, 58 struct sctp_cmd_seq *commands); 59 static struct sctp_packet *sctp_ootb_pkt_new( 60 struct net *net, 61 const struct sctp_association *asoc, 62 const struct sctp_chunk *chunk); 63 static void sctp_send_stale_cookie_err(struct net *net, 64 const struct sctp_endpoint *ep, 65 const struct sctp_association *asoc, 66 const struct sctp_chunk *chunk, 67 struct sctp_cmd_seq *commands, 68 struct sctp_chunk *err_chunk); 69 static enum sctp_disposition sctp_sf_do_5_2_6_stale( 70 struct net *net, 71 const struct sctp_endpoint *ep, 72 const struct sctp_association *asoc, 73 const union sctp_subtype type, 74 void *arg, 75 struct sctp_cmd_seq *commands); 76 static enum sctp_disposition sctp_sf_shut_8_4_5( 77 struct net *net, 78 const struct sctp_endpoint *ep, 79 const struct sctp_association *asoc, 80 const union sctp_subtype type, 81 void *arg, 82 struct sctp_cmd_seq *commands); 83 static enum sctp_disposition sctp_sf_tabort_8_4_8( 84 struct net *net, 85 const struct sctp_endpoint *ep, 86 const struct sctp_association *asoc, 87 const union sctp_subtype type, 88 void *arg, 89 struct sctp_cmd_seq *commands); 90 static enum sctp_disposition sctp_sf_new_encap_port( 91 struct net *net, 92 const struct sctp_endpoint *ep, 93 const struct sctp_association *asoc, 94 const union sctp_subtype type, 95 void *arg, 96 struct sctp_cmd_seq *commands); 97 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 98 99 static enum sctp_disposition sctp_stop_t1_and_abort( 100 struct net *net, 101 struct sctp_cmd_seq *commands, 102 __be16 error, int sk_err, 103 const struct sctp_association *asoc, 104 struct sctp_transport *transport); 105 106 static enum sctp_disposition sctp_sf_abort_violation( 107 struct net *net, 108 const struct sctp_endpoint *ep, 109 const struct sctp_association *asoc, 110 void *arg, 111 struct sctp_cmd_seq *commands, 112 const __u8 *payload, 113 const size_t paylen); 114 115 static enum sctp_disposition sctp_sf_violation_chunklen( 116 struct net *net, 117 const struct sctp_endpoint *ep, 118 const struct sctp_association *asoc, 119 const union sctp_subtype type, 120 void *arg, 121 struct sctp_cmd_seq *commands); 122 123 static enum sctp_disposition sctp_sf_violation_paramlen( 124 struct net *net, 125 const struct sctp_endpoint *ep, 126 const struct sctp_association *asoc, 127 const union sctp_subtype type, 128 void *arg, void *ext, 129 struct sctp_cmd_seq *commands); 130 131 static enum sctp_disposition sctp_sf_violation_ctsn( 132 struct net *net, 133 const struct sctp_endpoint *ep, 134 const struct sctp_association *asoc, 135 const union sctp_subtype type, 136 void *arg, 137 struct sctp_cmd_seq *commands); 138 139 static enum sctp_disposition sctp_sf_violation_chunk( 140 struct net *net, 141 const struct sctp_endpoint *ep, 142 const struct sctp_association *asoc, 143 const union sctp_subtype type, 144 void *arg, 145 struct sctp_cmd_seq *commands); 146 147 static enum sctp_ierror sctp_sf_authenticate( 148 const struct sctp_association *asoc, 149 struct sctp_chunk *chunk); 150 151 static enum sctp_disposition __sctp_sf_do_9_1_abort( 152 struct net *net, 153 const struct sctp_endpoint *ep, 154 const struct sctp_association *asoc, 155 const union sctp_subtype type, 156 void *arg, 157 struct sctp_cmd_seq *commands); 158 159 static enum sctp_disposition 160 __sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, 161 const struct sctp_association *asoc, 162 const union sctp_subtype type, void *arg, 163 struct sctp_cmd_seq *commands); 164 165 /* Small helper function that checks if the chunk length 166 * is of the appropriate length. The 'required_length' argument 167 * is set to be the size of a specific chunk we are testing. 168 * Return Values: true = Valid length 169 * false = Invalid length 170 * 171 */ 172 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, 173 __u16 required_length) 174 { 175 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); 176 177 /* Previously already marked? */ 178 if (unlikely(chunk->pdiscard)) 179 return false; 180 if (unlikely(chunk_length < required_length)) 181 return false; 182 183 return true; 184 } 185 186 /* Check for format error in an ABORT chunk */ 187 static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk) 188 { 189 struct sctp_errhdr *err; 190 191 sctp_walk_errors(err, chunk->chunk_hdr); 192 193 return (void *)err == (void *)chunk->chunk_end; 194 } 195 196 /********************************************************** 197 * These are the state functions for handling chunk events. 198 **********************************************************/ 199 200 /* 201 * Process the final SHUTDOWN COMPLETE. 202 * 203 * Section: 4 (C) (diagram), 9.2 204 * Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify 205 * that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be 206 * discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint 207 * should stop the T2-shutdown timer and remove all knowledge of the 208 * association (and thus the association enters the CLOSED state). 209 * 210 * Verification Tag: 8.5.1(C), sctpimpguide 2.41. 211 * C) Rules for packet carrying SHUTDOWN COMPLETE: 212 * ... 213 * - The receiver of a SHUTDOWN COMPLETE shall accept the packet 214 * if the Verification Tag field of the packet matches its own tag and 215 * the T bit is not set 216 * OR 217 * it is set to its peer's tag and the T bit is set in the Chunk 218 * Flags. 219 * Otherwise, the receiver MUST silently discard the packet 220 * and take no further action. An endpoint MUST ignore the 221 * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state. 222 * 223 * Inputs 224 * (endpoint, asoc, chunk) 225 * 226 * Outputs 227 * (asoc, reply_msg, msg_up, timers, counters) 228 * 229 * The return value is the disposition of the chunk. 230 */ 231 enum sctp_disposition sctp_sf_do_4_C(struct net *net, 232 const struct sctp_endpoint *ep, 233 const struct sctp_association *asoc, 234 const union sctp_subtype type, 235 void *arg, struct sctp_cmd_seq *commands) 236 { 237 struct sctp_chunk *chunk = arg; 238 struct sctp_ulpevent *ev; 239 240 if (!sctp_vtag_verify_either(chunk, asoc)) 241 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 242 243 /* RFC 2960 6.10 Bundling 244 * 245 * An endpoint MUST NOT bundle INIT, INIT ACK or 246 * SHUTDOWN COMPLETE with any other chunks. 247 */ 248 if (!chunk->singleton) 249 return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); 250 251 /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */ 252 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 253 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 254 commands); 255 256 /* RFC 2960 10.2 SCTP-to-ULP 257 * 258 * H) SHUTDOWN COMPLETE notification 259 * 260 * When SCTP completes the shutdown procedures (section 9.2) this 261 * notification is passed to the upper layer. 262 */ 263 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, 264 0, 0, 0, NULL, GFP_ATOMIC); 265 if (ev) 266 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 267 SCTP_ULPEVENT(ev)); 268 269 /* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint 270 * will verify that it is in SHUTDOWN-ACK-SENT state, if it is 271 * not the chunk should be discarded. If the endpoint is in 272 * the SHUTDOWN-ACK-SENT state the endpoint should stop the 273 * T2-shutdown timer and remove all knowledge of the 274 * association (and thus the association enters the CLOSED 275 * state). 276 */ 277 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 278 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 279 280 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 281 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 282 283 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 284 SCTP_STATE(SCTP_STATE_CLOSED)); 285 286 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); 287 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 288 289 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 290 291 return SCTP_DISPOSITION_DELETE_TCB; 292 } 293 294 /* 295 * Respond to a normal INIT chunk. 296 * We are the side that is being asked for an association. 297 * 298 * Section: 5.1 Normal Establishment of an Association, B 299 * B) "Z" shall respond immediately with an INIT ACK chunk. The 300 * destination IP address of the INIT ACK MUST be set to the source 301 * IP address of the INIT to which this INIT ACK is responding. In 302 * the response, besides filling in other parameters, "Z" must set the 303 * Verification Tag field to Tag_A, and also provide its own 304 * Verification Tag (Tag_Z) in the Initiate Tag field. 305 * 306 * Verification Tag: Must be 0. 307 * 308 * Inputs 309 * (endpoint, asoc, chunk) 310 * 311 * Outputs 312 * (asoc, reply_msg, msg_up, timers, counters) 313 * 314 * The return value is the disposition of the chunk. 315 */ 316 enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net, 317 const struct sctp_endpoint *ep, 318 const struct sctp_association *asoc, 319 const union sctp_subtype type, 320 void *arg, 321 struct sctp_cmd_seq *commands) 322 { 323 struct sctp_chunk *chunk = arg, *repl, *err_chunk; 324 struct sctp_unrecognized_param *unk_param; 325 struct sctp_association *new_asoc; 326 struct sctp_packet *packet; 327 int len; 328 329 /* Update socket peer label if first association. */ 330 if (security_sctp_assoc_request((struct sctp_endpoint *)ep, 331 chunk->skb)) 332 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 333 334 /* 6.10 Bundling 335 * An endpoint MUST NOT bundle INIT, INIT ACK or 336 * SHUTDOWN COMPLETE with any other chunks. 337 * 338 * IG Section 2.11.2 339 * Furthermore, we require that the receiver of an INIT chunk MUST 340 * enforce these rules by silently discarding an arriving packet 341 * with an INIT chunk that is bundled with other chunks. 342 */ 343 if (!chunk->singleton) 344 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 345 346 /* Make sure that the INIT chunk has a valid length. 347 * Normally, this would cause an ABORT with a Protocol Violation 348 * error, but since we don't have an association, we'll 349 * just discard the packet. 350 */ 351 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) 352 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 353 354 /* If the packet is an OOTB packet which is temporarily on the 355 * control endpoint, respond with an ABORT. 356 */ 357 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { 358 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); 359 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 360 } 361 362 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 363 * Tag. 364 */ 365 if (chunk->sctp_hdr->vtag != 0) 366 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 367 368 /* If the INIT is coming toward a closing socket, we'll send back 369 * and ABORT. Essentially, this catches the race of INIT being 370 * backloged to the socket at the same time as the user issues close(). 371 * Since the socket and all its associations are going away, we 372 * can treat this OOTB 373 */ 374 if (sctp_sstate(ep->base.sk, CLOSING)) 375 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 376 377 /* Verify the INIT chunk before processing it. */ 378 err_chunk = NULL; 379 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, 380 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, 381 &err_chunk)) { 382 /* This chunk contains fatal error. It is to be discarded. 383 * Send an ABORT, with causes if there is any. 384 */ 385 if (err_chunk) { 386 packet = sctp_abort_pkt_new(net, ep, asoc, arg, 387 (__u8 *)(err_chunk->chunk_hdr) + 388 sizeof(struct sctp_chunkhdr), 389 ntohs(err_chunk->chunk_hdr->length) - 390 sizeof(struct sctp_chunkhdr)); 391 392 sctp_chunk_free(err_chunk); 393 394 if (packet) { 395 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 396 SCTP_PACKET(packet)); 397 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 398 return SCTP_DISPOSITION_CONSUME; 399 } else { 400 return SCTP_DISPOSITION_NOMEM; 401 } 402 } else { 403 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, 404 commands); 405 } 406 } 407 408 /* Grab the INIT header. */ 409 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data; 410 411 /* Tag the variable length parameters. */ 412 chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr)); 413 414 new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); 415 if (!new_asoc) 416 goto nomem; 417 418 if (sctp_assoc_set_bind_addr_from_ep(new_asoc, 419 sctp_scope(sctp_source(chunk)), 420 GFP_ATOMIC) < 0) 421 goto nomem_init; 422 423 /* The call, sctp_process_init(), can fail on memory allocation. */ 424 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), 425 (struct sctp_init_chunk *)chunk->chunk_hdr, 426 GFP_ATOMIC)) 427 goto nomem_init; 428 429 /* B) "Z" shall respond immediately with an INIT ACK chunk. */ 430 431 /* If there are errors need to be reported for unknown parameters, 432 * make sure to reserve enough room in the INIT ACK for them. 433 */ 434 len = 0; 435 if (err_chunk) 436 len = ntohs(err_chunk->chunk_hdr->length) - 437 sizeof(struct sctp_chunkhdr); 438 439 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); 440 if (!repl) 441 goto nomem_init; 442 443 /* If there are errors need to be reported for unknown parameters, 444 * include them in the outgoing INIT ACK as "Unrecognized parameter" 445 * parameter. 446 */ 447 if (err_chunk) { 448 /* Get the "Unrecognized parameter" parameter(s) out of the 449 * ERROR chunk generated by sctp_verify_init(). Since the 450 * error cause code for "unknown parameter" and the 451 * "Unrecognized parameter" type is the same, we can 452 * construct the parameters in INIT ACK by copying the 453 * ERROR causes over. 454 */ 455 unk_param = (struct sctp_unrecognized_param *) 456 ((__u8 *)(err_chunk->chunk_hdr) + 457 sizeof(struct sctp_chunkhdr)); 458 /* Replace the cause code with the "Unrecognized parameter" 459 * parameter type. 460 */ 461 sctp_addto_chunk(repl, len, unk_param); 462 sctp_chunk_free(err_chunk); 463 } 464 465 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 466 467 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 468 469 /* 470 * Note: After sending out INIT ACK with the State Cookie parameter, 471 * "Z" MUST NOT allocate any resources, nor keep any states for the 472 * new association. Otherwise, "Z" will be vulnerable to resource 473 * attacks. 474 */ 475 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 476 477 return SCTP_DISPOSITION_DELETE_TCB; 478 479 nomem_init: 480 sctp_association_free(new_asoc); 481 nomem: 482 if (err_chunk) 483 sctp_chunk_free(err_chunk); 484 return SCTP_DISPOSITION_NOMEM; 485 } 486 487 /* 488 * Respond to a normal INIT ACK chunk. 489 * We are the side that is initiating the association. 490 * 491 * Section: 5.1 Normal Establishment of an Association, C 492 * C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init 493 * timer and leave COOKIE-WAIT state. "A" shall then send the State 494 * Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start 495 * the T1-cookie timer, and enter the COOKIE-ECHOED state. 496 * 497 * Note: The COOKIE ECHO chunk can be bundled with any pending outbound 498 * DATA chunks, but it MUST be the first chunk in the packet and 499 * until the COOKIE ACK is returned the sender MUST NOT send any 500 * other packets to the peer. 501 * 502 * Verification Tag: 3.3.3 503 * If the value of the Initiate Tag in a received INIT ACK chunk is 504 * found to be 0, the receiver MUST treat it as an error and close the 505 * association by transmitting an ABORT. 506 * 507 * Inputs 508 * (endpoint, asoc, chunk) 509 * 510 * Outputs 511 * (asoc, reply_msg, msg_up, timers, counters) 512 * 513 * The return value is the disposition of the chunk. 514 */ 515 enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net, 516 const struct sctp_endpoint *ep, 517 const struct sctp_association *asoc, 518 const union sctp_subtype type, 519 void *arg, 520 struct sctp_cmd_seq *commands) 521 { 522 struct sctp_init_chunk *initchunk; 523 struct sctp_chunk *chunk = arg; 524 struct sctp_chunk *err_chunk; 525 struct sctp_packet *packet; 526 527 if (!sctp_vtag_verify(chunk, asoc)) 528 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 529 530 /* 6.10 Bundling 531 * An endpoint MUST NOT bundle INIT, INIT ACK or 532 * SHUTDOWN COMPLETE with any other chunks. 533 */ 534 if (!chunk->singleton) 535 return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); 536 537 /* Make sure that the INIT-ACK chunk has a valid length */ 538 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_initack_chunk))) 539 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 540 commands); 541 /* Grab the INIT header. */ 542 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data; 543 544 /* Verify the INIT chunk before processing it. */ 545 err_chunk = NULL; 546 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, 547 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, 548 &err_chunk)) { 549 550 enum sctp_error error = SCTP_ERROR_NO_RESOURCE; 551 552 /* This chunk contains fatal error. It is to be discarded. 553 * Send an ABORT, with causes. If there are no causes, 554 * then there wasn't enough memory. Just terminate 555 * the association. 556 */ 557 if (err_chunk) { 558 packet = sctp_abort_pkt_new(net, ep, asoc, arg, 559 (__u8 *)(err_chunk->chunk_hdr) + 560 sizeof(struct sctp_chunkhdr), 561 ntohs(err_chunk->chunk_hdr->length) - 562 sizeof(struct sctp_chunkhdr)); 563 564 sctp_chunk_free(err_chunk); 565 566 if (packet) { 567 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 568 SCTP_PACKET(packet)); 569 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 570 error = SCTP_ERROR_INV_PARAM; 571 } 572 } 573 574 /* SCTP-AUTH, Section 6.3: 575 * It should be noted that if the receiver wants to tear 576 * down an association in an authenticated way only, the 577 * handling of malformed packets should not result in 578 * tearing down the association. 579 * 580 * This means that if we only want to abort associations 581 * in an authenticated way (i.e AUTH+ABORT), then we 582 * can't destroy this association just because the packet 583 * was malformed. 584 */ 585 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) 586 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 587 588 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 589 return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, 590 asoc, chunk->transport); 591 } 592 593 /* Tag the variable length parameters. Note that we never 594 * convert the parameters in an INIT chunk. 595 */ 596 chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr)); 597 598 initchunk = (struct sctp_init_chunk *)chunk->chunk_hdr; 599 600 sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT, 601 SCTP_PEER_INIT(initchunk)); 602 603 /* Reset init error count upon receipt of INIT-ACK. */ 604 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL()); 605 606 /* 5.1 C) "A" shall stop the T1-init timer and leave 607 * COOKIE-WAIT state. "A" shall then ... start the T1-cookie 608 * timer, and enter the COOKIE-ECHOED state. 609 */ 610 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 611 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 612 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 613 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 614 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 615 SCTP_STATE(SCTP_STATE_COOKIE_ECHOED)); 616 617 /* SCTP-AUTH: generate the association shared keys so that 618 * we can potentially sign the COOKIE-ECHO. 619 */ 620 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL()); 621 622 /* 5.1 C) "A" shall then send the State Cookie received in the 623 * INIT ACK chunk in a COOKIE ECHO chunk, ... 624 */ 625 /* If there is any errors to report, send the ERROR chunk generated 626 * for unknown parameters as well. 627 */ 628 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO, 629 SCTP_CHUNK(err_chunk)); 630 631 return SCTP_DISPOSITION_CONSUME; 632 } 633 634 static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk, 635 const struct sctp_association *asoc) 636 { 637 struct sctp_chunk auth; 638 639 if (!chunk->auth_chunk) 640 return true; 641 642 /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo 643 * is supposed to be authenticated and we have to do delayed 644 * authentication. We've just recreated the association using 645 * the information in the cookie and now it's much easier to 646 * do the authentication. 647 */ 648 649 /* Make sure that we and the peer are AUTH capable */ 650 if (!net->sctp.auth_enable || !asoc->peer.auth_capable) 651 return false; 652 653 /* set-up our fake chunk so that we can process it */ 654 auth.skb = chunk->auth_chunk; 655 auth.asoc = chunk->asoc; 656 auth.sctp_hdr = chunk->sctp_hdr; 657 auth.chunk_hdr = (struct sctp_chunkhdr *) 658 skb_push(chunk->auth_chunk, 659 sizeof(struct sctp_chunkhdr)); 660 skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); 661 auth.transport = chunk->transport; 662 663 return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR; 664 } 665 666 /* 667 * Respond to a normal COOKIE ECHO chunk. 668 * We are the side that is being asked for an association. 669 * 670 * Section: 5.1 Normal Establishment of an Association, D 671 * D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply 672 * with a COOKIE ACK chunk after building a TCB and moving to 673 * the ESTABLISHED state. A COOKIE ACK chunk may be bundled with 674 * any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK 675 * chunk MUST be the first chunk in the packet. 676 * 677 * IMPLEMENTATION NOTE: An implementation may choose to send the 678 * Communication Up notification to the SCTP user upon reception 679 * of a valid COOKIE ECHO chunk. 680 * 681 * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules 682 * D) Rules for packet carrying a COOKIE ECHO 683 * 684 * - When sending a COOKIE ECHO, the endpoint MUST use the value of the 685 * Initial Tag received in the INIT ACK. 686 * 687 * - The receiver of a COOKIE ECHO follows the procedures in Section 5. 688 * 689 * Inputs 690 * (endpoint, asoc, chunk) 691 * 692 * Outputs 693 * (asoc, reply_msg, msg_up, timers, counters) 694 * 695 * The return value is the disposition of the chunk. 696 */ 697 enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, 698 const struct sctp_endpoint *ep, 699 const struct sctp_association *asoc, 700 const union sctp_subtype type, 701 void *arg, 702 struct sctp_cmd_seq *commands) 703 { 704 struct sctp_ulpevent *ev, *ai_ev = NULL, *auth_ev = NULL; 705 struct sctp_association *new_asoc; 706 struct sctp_init_chunk *peer_init; 707 struct sctp_chunk *chunk = arg; 708 struct sctp_chunk *err_chk_p; 709 struct sctp_chunk *repl; 710 struct sock *sk; 711 int error = 0; 712 713 if (asoc && !sctp_vtag_verify(chunk, asoc)) 714 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 715 716 /* If the packet is an OOTB packet which is temporarily on the 717 * control endpoint, respond with an ABORT. 718 */ 719 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { 720 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); 721 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 722 } 723 724 /* Make sure that the COOKIE_ECHO chunk has a valid length. 725 * In this case, we check that we have enough for at least a 726 * chunk header. More detailed verification is done 727 * in sctp_unpack_cookie(). 728 */ 729 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 730 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 731 commands); 732 733 /* If the endpoint is not listening or if the number of associations 734 * on the TCP-style socket exceed the max backlog, respond with an 735 * ABORT. 736 */ 737 sk = ep->base.sk; 738 if (!sctp_sstate(sk, LISTENING) || 739 (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) 740 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 741 742 /* "Decode" the chunk. We have no optional parameters so we 743 * are in good shape. 744 */ 745 chunk->subh.cookie_hdr = 746 (struct sctp_signed_cookie *)chunk->skb->data; 747 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - 748 sizeof(struct sctp_chunkhdr))) 749 goto nomem; 750 751 /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint 752 * "Z" will reply with a COOKIE ACK chunk after building a TCB 753 * and moving to the ESTABLISHED state. 754 */ 755 new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, 756 &err_chk_p); 757 758 /* FIXME: 759 * If the re-build failed, what is the proper error path 760 * from here? 761 * 762 * [We should abort the association. --piggy] 763 */ 764 if (!new_asoc) { 765 /* FIXME: Several errors are possible. A bad cookie should 766 * be silently discarded, but think about logging it too. 767 */ 768 switch (error) { 769 case -SCTP_IERROR_NOMEM: 770 goto nomem; 771 772 case -SCTP_IERROR_STALE_COOKIE: 773 sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, 774 err_chk_p); 775 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 776 777 case -SCTP_IERROR_BAD_SIG: 778 default: 779 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 780 } 781 } 782 783 784 /* Delay state machine commands until later. 785 * 786 * Re-build the bind address for the association is done in 787 * the sctp_unpack_cookie() already. 788 */ 789 /* This is a brand-new association, so these are not yet side 790 * effects--it is safe to run them here. 791 */ 792 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 793 794 if (!sctp_process_init(new_asoc, chunk, 795 &chunk->subh.cookie_hdr->c.peer_addr, 796 peer_init, GFP_ATOMIC)) 797 goto nomem_init; 798 799 /* SCTP-AUTH: Now that we've populate required fields in 800 * sctp_process_init, set up the association shared keys as 801 * necessary so that we can potentially authenticate the ACK 802 */ 803 error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC); 804 if (error) 805 goto nomem_init; 806 807 if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) { 808 sctp_association_free(new_asoc); 809 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 810 } 811 812 repl = sctp_make_cookie_ack(new_asoc, chunk); 813 if (!repl) 814 goto nomem_init; 815 816 /* RFC 2960 5.1 Normal Establishment of an Association 817 * 818 * D) IMPLEMENTATION NOTE: An implementation may choose to 819 * send the Communication Up notification to the SCTP user 820 * upon reception of a valid COOKIE ECHO chunk. 821 */ 822 ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0, 823 new_asoc->c.sinit_num_ostreams, 824 new_asoc->c.sinit_max_instreams, 825 NULL, GFP_ATOMIC); 826 if (!ev) 827 goto nomem_ev; 828 829 /* Sockets API Draft Section 5.3.1.6 830 * When a peer sends a Adaptation Layer Indication parameter , SCTP 831 * delivers this notification to inform the application that of the 832 * peers requested adaptation layer. 833 */ 834 if (new_asoc->peer.adaptation_ind) { 835 ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc, 836 GFP_ATOMIC); 837 if (!ai_ev) 838 goto nomem_aiev; 839 } 840 841 if (!new_asoc->peer.auth_capable) { 842 auth_ev = sctp_ulpevent_make_authkey(new_asoc, 0, 843 SCTP_AUTH_NO_AUTH, 844 GFP_ATOMIC); 845 if (!auth_ev) 846 goto nomem_authev; 847 } 848 849 /* Add all the state machine commands now since we've created 850 * everything. This way we don't introduce memory corruptions 851 * during side-effect processing and correctly count established 852 * associations. 853 */ 854 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 855 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 856 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 857 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); 858 SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS); 859 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 860 861 if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) 862 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 863 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 864 865 /* This will send the COOKIE ACK */ 866 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 867 868 /* Queue the ASSOC_CHANGE event */ 869 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 870 871 /* Send up the Adaptation Layer Indication event */ 872 if (ai_ev) 873 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 874 SCTP_ULPEVENT(ai_ev)); 875 876 if (auth_ev) 877 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 878 SCTP_ULPEVENT(auth_ev)); 879 880 return SCTP_DISPOSITION_CONSUME; 881 882 nomem_authev: 883 sctp_ulpevent_free(ai_ev); 884 nomem_aiev: 885 sctp_ulpevent_free(ev); 886 nomem_ev: 887 sctp_chunk_free(repl); 888 nomem_init: 889 sctp_association_free(new_asoc); 890 nomem: 891 return SCTP_DISPOSITION_NOMEM; 892 } 893 894 /* 895 * Respond to a normal COOKIE ACK chunk. 896 * We are the side that is asking for an association. 897 * 898 * RFC 2960 5.1 Normal Establishment of an Association 899 * 900 * E) Upon reception of the COOKIE ACK, endpoint "A" will move from the 901 * COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie 902 * timer. It may also notify its ULP about the successful 903 * establishment of the association with a Communication Up 904 * notification (see Section 10). 905 * 906 * Verification Tag: 907 * Inputs 908 * (endpoint, asoc, chunk) 909 * 910 * Outputs 911 * (asoc, reply_msg, msg_up, timers, counters) 912 * 913 * The return value is the disposition of the chunk. 914 */ 915 enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net, 916 const struct sctp_endpoint *ep, 917 const struct sctp_association *asoc, 918 const union sctp_subtype type, 919 void *arg, 920 struct sctp_cmd_seq *commands) 921 { 922 struct sctp_chunk *chunk = arg; 923 struct sctp_ulpevent *ev; 924 925 if (!sctp_vtag_verify(chunk, asoc)) 926 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 927 928 /* Verify that the chunk length for the COOKIE-ACK is OK. 929 * If we don't do this, any bundled chunks may be junked. 930 */ 931 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 932 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 933 commands); 934 935 /* Reset init error count upon receipt of COOKIE-ACK, 936 * to avoid problems with the management of this 937 * counter in stale cookie situations when a transition back 938 * from the COOKIE-ECHOED state to the COOKIE-WAIT 939 * state is performed. 940 */ 941 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL()); 942 943 /* Set peer label for connection. */ 944 security_inet_conn_established(ep->base.sk, chunk->skb); 945 946 /* RFC 2960 5.1 Normal Establishment of an Association 947 * 948 * E) Upon reception of the COOKIE ACK, endpoint "A" will move 949 * from the COOKIE-ECHOED state to the ESTABLISHED state, 950 * stopping the T1-cookie timer. 951 */ 952 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 953 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 954 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 955 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 956 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); 957 SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS); 958 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 959 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) 960 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 961 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 962 963 /* It may also notify its ULP about the successful 964 * establishment of the association with a Communication Up 965 * notification (see Section 10). 966 */ 967 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 968 0, asoc->c.sinit_num_ostreams, 969 asoc->c.sinit_max_instreams, 970 NULL, GFP_ATOMIC); 971 972 if (!ev) 973 goto nomem; 974 975 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 976 977 /* Sockets API Draft Section 5.3.1.6 978 * When a peer sends a Adaptation Layer Indication parameter , SCTP 979 * delivers this notification to inform the application that of the 980 * peers requested adaptation layer. 981 */ 982 if (asoc->peer.adaptation_ind) { 983 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); 984 if (!ev) 985 goto nomem; 986 987 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 988 SCTP_ULPEVENT(ev)); 989 } 990 991 if (!asoc->peer.auth_capable) { 992 ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH, 993 GFP_ATOMIC); 994 if (!ev) 995 goto nomem; 996 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 997 SCTP_ULPEVENT(ev)); 998 } 999 1000 return SCTP_DISPOSITION_CONSUME; 1001 nomem: 1002 return SCTP_DISPOSITION_NOMEM; 1003 } 1004 1005 /* Generate and sendout a heartbeat packet. */ 1006 static enum sctp_disposition sctp_sf_heartbeat( 1007 const struct sctp_endpoint *ep, 1008 const struct sctp_association *asoc, 1009 const union sctp_subtype type, 1010 void *arg, 1011 struct sctp_cmd_seq *commands) 1012 { 1013 struct sctp_transport *transport = (struct sctp_transport *) arg; 1014 struct sctp_chunk *reply; 1015 1016 /* Send a heartbeat to our peer. */ 1017 reply = sctp_make_heartbeat(asoc, transport, 0); 1018 if (!reply) 1019 return SCTP_DISPOSITION_NOMEM; 1020 1021 /* Set rto_pending indicating that an RTT measurement 1022 * is started with this heartbeat chunk. 1023 */ 1024 sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING, 1025 SCTP_TRANSPORT(transport)); 1026 1027 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 1028 return SCTP_DISPOSITION_CONSUME; 1029 } 1030 1031 /* Generate a HEARTBEAT packet on the given transport. */ 1032 enum sctp_disposition sctp_sf_sendbeat_8_3(struct net *net, 1033 const struct sctp_endpoint *ep, 1034 const struct sctp_association *asoc, 1035 const union sctp_subtype type, 1036 void *arg, 1037 struct sctp_cmd_seq *commands) 1038 { 1039 struct sctp_transport *transport = (struct sctp_transport *) arg; 1040 1041 if (asoc->overall_error_count >= asoc->max_retrans) { 1042 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 1043 SCTP_ERROR(ETIMEDOUT)); 1044 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 1045 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 1046 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 1047 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 1048 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 1049 return SCTP_DISPOSITION_DELETE_TCB; 1050 } 1051 1052 /* Section 3.3.5. 1053 * The Sender-specific Heartbeat Info field should normally include 1054 * information about the sender's current time when this HEARTBEAT 1055 * chunk is sent and the destination transport address to which this 1056 * HEARTBEAT is sent (see Section 8.3). 1057 */ 1058 1059 if (transport->param_flags & SPP_HB_ENABLE) { 1060 if (SCTP_DISPOSITION_NOMEM == 1061 sctp_sf_heartbeat(ep, asoc, type, arg, 1062 commands)) 1063 return SCTP_DISPOSITION_NOMEM; 1064 1065 /* Set transport error counter and association error counter 1066 * when sending heartbeat. 1067 */ 1068 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, 1069 SCTP_TRANSPORT(transport)); 1070 } 1071 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE, 1072 SCTP_TRANSPORT(transport)); 1073 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, 1074 SCTP_TRANSPORT(transport)); 1075 1076 return SCTP_DISPOSITION_CONSUME; 1077 } 1078 1079 /* resend asoc strreset_chunk. */ 1080 enum sctp_disposition sctp_sf_send_reconf(struct net *net, 1081 const struct sctp_endpoint *ep, 1082 const struct sctp_association *asoc, 1083 const union sctp_subtype type, 1084 void *arg, 1085 struct sctp_cmd_seq *commands) 1086 { 1087 struct sctp_transport *transport = arg; 1088 1089 if (asoc->overall_error_count >= asoc->max_retrans) { 1090 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 1091 SCTP_ERROR(ETIMEDOUT)); 1092 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 1093 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 1094 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 1095 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 1096 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 1097 return SCTP_DISPOSITION_DELETE_TCB; 1098 } 1099 1100 sctp_chunk_hold(asoc->strreset_chunk); 1101 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1102 SCTP_CHUNK(asoc->strreset_chunk)); 1103 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); 1104 1105 return SCTP_DISPOSITION_CONSUME; 1106 } 1107 1108 /* send hb chunk with padding for PLPMUTD. */ 1109 enum sctp_disposition sctp_sf_send_probe(struct net *net, 1110 const struct sctp_endpoint *ep, 1111 const struct sctp_association *asoc, 1112 const union sctp_subtype type, 1113 void *arg, 1114 struct sctp_cmd_seq *commands) 1115 { 1116 struct sctp_transport *transport = (struct sctp_transport *)arg; 1117 struct sctp_chunk *reply; 1118 1119 if (!sctp_transport_pl_enabled(transport)) 1120 return SCTP_DISPOSITION_CONSUME; 1121 1122 if (sctp_transport_pl_send(transport)) { 1123 reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size); 1124 if (!reply) 1125 return SCTP_DISPOSITION_NOMEM; 1126 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 1127 } 1128 sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE, 1129 SCTP_TRANSPORT(transport)); 1130 1131 return SCTP_DISPOSITION_CONSUME; 1132 } 1133 1134 /* 1135 * Process an heartbeat request. 1136 * 1137 * Section: 8.3 Path Heartbeat 1138 * The receiver of the HEARTBEAT should immediately respond with a 1139 * HEARTBEAT ACK that contains the Heartbeat Information field copied 1140 * from the received HEARTBEAT chunk. 1141 * 1142 * Verification Tag: 8.5 Verification Tag [Normal verification] 1143 * When receiving an SCTP packet, the endpoint MUST ensure that the 1144 * value in the Verification Tag field of the received SCTP packet 1145 * matches its own Tag. If the received Verification Tag value does not 1146 * match the receiver's own tag value, the receiver shall silently 1147 * discard the packet and shall not process it any further except for 1148 * those cases listed in Section 8.5.1 below. 1149 * 1150 * Inputs 1151 * (endpoint, asoc, chunk) 1152 * 1153 * Outputs 1154 * (asoc, reply_msg, msg_up, timers, counters) 1155 * 1156 * The return value is the disposition of the chunk. 1157 */ 1158 enum sctp_disposition sctp_sf_beat_8_3(struct net *net, 1159 const struct sctp_endpoint *ep, 1160 const struct sctp_association *asoc, 1161 const union sctp_subtype type, 1162 void *arg, struct sctp_cmd_seq *commands) 1163 { 1164 struct sctp_paramhdr *param_hdr; 1165 struct sctp_chunk *chunk = arg; 1166 struct sctp_chunk *reply; 1167 size_t paylen = 0; 1168 1169 if (!sctp_vtag_verify(chunk, asoc)) 1170 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 1171 1172 /* Make sure that the HEARTBEAT chunk has a valid length. */ 1173 if (!sctp_chunk_length_valid(chunk, 1174 sizeof(struct sctp_heartbeat_chunk))) 1175 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 1176 commands); 1177 1178 /* 8.3 The receiver of the HEARTBEAT should immediately 1179 * respond with a HEARTBEAT ACK that contains the Heartbeat 1180 * Information field copied from the received HEARTBEAT chunk. 1181 */ 1182 chunk->subh.hb_hdr = (struct sctp_heartbeathdr *)chunk->skb->data; 1183 param_hdr = (struct sctp_paramhdr *)chunk->subh.hb_hdr; 1184 paylen = ntohs(chunk->chunk_hdr->length) - sizeof(struct sctp_chunkhdr); 1185 1186 if (ntohs(param_hdr->length) > paylen) 1187 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, 1188 param_hdr, commands); 1189 1190 if (!pskb_pull(chunk->skb, paylen)) 1191 goto nomem; 1192 1193 reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen); 1194 if (!reply) 1195 goto nomem; 1196 1197 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 1198 return SCTP_DISPOSITION_CONSUME; 1199 1200 nomem: 1201 return SCTP_DISPOSITION_NOMEM; 1202 } 1203 1204 /* 1205 * Process the returning HEARTBEAT ACK. 1206 * 1207 * Section: 8.3 Path Heartbeat 1208 * Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT 1209 * should clear the error counter of the destination transport 1210 * address to which the HEARTBEAT was sent, and mark the destination 1211 * transport address as active if it is not so marked. The endpoint may 1212 * optionally report to the upper layer when an inactive destination 1213 * address is marked as active due to the reception of the latest 1214 * HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also 1215 * clear the association overall error count as well (as defined 1216 * in section 8.1). 1217 * 1218 * The receiver of the HEARTBEAT ACK should also perform an RTT 1219 * measurement for that destination transport address using the time 1220 * value carried in the HEARTBEAT ACK chunk. 1221 * 1222 * Verification Tag: 8.5 Verification Tag [Normal verification] 1223 * 1224 * Inputs 1225 * (endpoint, asoc, chunk) 1226 * 1227 * Outputs 1228 * (asoc, reply_msg, msg_up, timers, counters) 1229 * 1230 * The return value is the disposition of the chunk. 1231 */ 1232 enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net, 1233 const struct sctp_endpoint *ep, 1234 const struct sctp_association *asoc, 1235 const union sctp_subtype type, 1236 void *arg, 1237 struct sctp_cmd_seq *commands) 1238 { 1239 struct sctp_sender_hb_info *hbinfo; 1240 struct sctp_chunk *chunk = arg; 1241 struct sctp_transport *link; 1242 unsigned long max_interval; 1243 union sctp_addr from_addr; 1244 1245 if (!sctp_vtag_verify(chunk, asoc)) 1246 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 1247 1248 /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ 1249 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr) + 1250 sizeof(*hbinfo))) 1251 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 1252 commands); 1253 1254 hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; 1255 /* Make sure that the length of the parameter is what we expect */ 1256 if (ntohs(hbinfo->param_hdr.length) != sizeof(*hbinfo)) 1257 return SCTP_DISPOSITION_DISCARD; 1258 1259 from_addr = hbinfo->daddr; 1260 link = sctp_assoc_lookup_paddr(asoc, &from_addr); 1261 1262 /* This should never happen, but lets log it if so. */ 1263 if (unlikely(!link)) { 1264 if (from_addr.sa.sa_family == AF_INET6) { 1265 net_warn_ratelimited("%s association %p could not find address %pI6\n", 1266 __func__, 1267 asoc, 1268 &from_addr.v6.sin6_addr); 1269 } else { 1270 net_warn_ratelimited("%s association %p could not find address %pI4\n", 1271 __func__, 1272 asoc, 1273 &from_addr.v4.sin_addr.s_addr); 1274 } 1275 return SCTP_DISPOSITION_DISCARD; 1276 } 1277 1278 /* Validate the 64-bit random nonce. */ 1279 if (hbinfo->hb_nonce != link->hb_nonce) 1280 return SCTP_DISPOSITION_DISCARD; 1281 1282 if (hbinfo->probe_size) { 1283 if (hbinfo->probe_size != link->pl.probe_size || 1284 !sctp_transport_pl_enabled(link)) 1285 return SCTP_DISPOSITION_DISCARD; 1286 1287 if (sctp_transport_pl_recv(link)) 1288 return SCTP_DISPOSITION_CONSUME; 1289 1290 return sctp_sf_send_probe(net, ep, asoc, type, link, commands); 1291 } 1292 1293 max_interval = link->hbinterval + link->rto; 1294 1295 /* Check if the timestamp looks valid. */ 1296 if (time_after(hbinfo->sent_at, jiffies) || 1297 time_after(jiffies, hbinfo->sent_at + max_interval)) { 1298 pr_debug("%s: HEARTBEAT ACK with invalid timestamp received " 1299 "for transport:%p\n", __func__, link); 1300 1301 return SCTP_DISPOSITION_DISCARD; 1302 } 1303 1304 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of 1305 * the HEARTBEAT should clear the error counter of the 1306 * destination transport address to which the HEARTBEAT was 1307 * sent and mark the destination transport address as active if 1308 * it is not so marked. 1309 */ 1310 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link)); 1311 1312 return SCTP_DISPOSITION_CONSUME; 1313 } 1314 1315 /* Helper function to send out an abort for the restart 1316 * condition. 1317 */ 1318 static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa, 1319 struct sctp_chunk *init, 1320 struct sctp_cmd_seq *commands) 1321 { 1322 struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family); 1323 union sctp_addr_param *addrparm; 1324 struct sctp_errhdr *errhdr; 1325 char buffer[sizeof(*errhdr) + sizeof(*addrparm)]; 1326 struct sctp_endpoint *ep; 1327 struct sctp_packet *pkt; 1328 int len; 1329 1330 /* Build the error on the stack. We are way to malloc crazy 1331 * throughout the code today. 1332 */ 1333 errhdr = (struct sctp_errhdr *)buffer; 1334 addrparm = (union sctp_addr_param *)errhdr->variable; 1335 1336 /* Copy into a parm format. */ 1337 len = af->to_addr_param(ssa, addrparm); 1338 len += sizeof(*errhdr); 1339 1340 errhdr->cause = SCTP_ERROR_RESTART; 1341 errhdr->length = htons(len); 1342 1343 /* Assign to the control socket. */ 1344 ep = sctp_sk(net->sctp.ctl_sock)->ep; 1345 1346 /* Association is NULL since this may be a restart attack and we 1347 * want to send back the attacker's vtag. 1348 */ 1349 pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len); 1350 1351 if (!pkt) 1352 goto out; 1353 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt)); 1354 1355 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 1356 1357 /* Discard the rest of the inbound packet. */ 1358 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 1359 1360 out: 1361 /* Even if there is no memory, treat as a failure so 1362 * the packet will get dropped. 1363 */ 1364 return 0; 1365 } 1366 1367 static bool list_has_sctp_addr(const struct list_head *list, 1368 union sctp_addr *ipaddr) 1369 { 1370 struct sctp_transport *addr; 1371 1372 list_for_each_entry(addr, list, transports) { 1373 if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr)) 1374 return true; 1375 } 1376 1377 return false; 1378 } 1379 /* A restart is occurring, check to make sure no new addresses 1380 * are being added as we may be under a takeover attack. 1381 */ 1382 static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, 1383 const struct sctp_association *asoc, 1384 struct sctp_chunk *init, 1385 struct sctp_cmd_seq *commands) 1386 { 1387 struct net *net = new_asoc->base.net; 1388 struct sctp_transport *new_addr; 1389 int ret = 1; 1390 1391 /* Implementor's Guide - Section 5.2.2 1392 * ... 1393 * Before responding the endpoint MUST check to see if the 1394 * unexpected INIT adds new addresses to the association. If new 1395 * addresses are added to the association, the endpoint MUST respond 1396 * with an ABORT.. 1397 */ 1398 1399 /* Search through all current addresses and make sure 1400 * we aren't adding any new ones. 1401 */ 1402 list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, 1403 transports) { 1404 if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, 1405 &new_addr->ipaddr)) { 1406 sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init, 1407 commands); 1408 ret = 0; 1409 break; 1410 } 1411 } 1412 1413 /* Return success if all addresses were found. */ 1414 return ret; 1415 } 1416 1417 /* Populate the verification/tie tags based on overlapping INIT 1418 * scenario. 1419 * 1420 * Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state. 1421 */ 1422 static void sctp_tietags_populate(struct sctp_association *new_asoc, 1423 const struct sctp_association *asoc) 1424 { 1425 switch (asoc->state) { 1426 1427 /* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */ 1428 1429 case SCTP_STATE_COOKIE_WAIT: 1430 new_asoc->c.my_vtag = asoc->c.my_vtag; 1431 new_asoc->c.my_ttag = asoc->c.my_vtag; 1432 new_asoc->c.peer_ttag = 0; 1433 break; 1434 1435 case SCTP_STATE_COOKIE_ECHOED: 1436 new_asoc->c.my_vtag = asoc->c.my_vtag; 1437 new_asoc->c.my_ttag = asoc->c.my_vtag; 1438 new_asoc->c.peer_ttag = asoc->c.peer_vtag; 1439 break; 1440 1441 /* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED, 1442 * COOKIE-WAIT and SHUTDOWN-ACK-SENT 1443 */ 1444 default: 1445 new_asoc->c.my_ttag = asoc->c.my_vtag; 1446 new_asoc->c.peer_ttag = asoc->c.peer_vtag; 1447 break; 1448 } 1449 1450 /* Other parameters for the endpoint SHOULD be copied from the 1451 * existing parameters of the association (e.g. number of 1452 * outbound streams) into the INIT ACK and cookie. 1453 */ 1454 new_asoc->rwnd = asoc->rwnd; 1455 new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams; 1456 new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams; 1457 new_asoc->c.initial_tsn = asoc->c.initial_tsn; 1458 } 1459 1460 /* 1461 * Compare vtag/tietag values to determine unexpected COOKIE-ECHO 1462 * handling action. 1463 * 1464 * RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists. 1465 * 1466 * Returns value representing action to be taken. These action values 1467 * correspond to Action/Description values in RFC 2960, Table 2. 1468 */ 1469 static char sctp_tietags_compare(struct sctp_association *new_asoc, 1470 const struct sctp_association *asoc) 1471 { 1472 /* In this case, the peer may have restarted. */ 1473 if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && 1474 (asoc->c.peer_vtag != new_asoc->c.peer_vtag) && 1475 (asoc->c.my_vtag == new_asoc->c.my_ttag) && 1476 (asoc->c.peer_vtag == new_asoc->c.peer_ttag)) 1477 return 'A'; 1478 1479 /* Collision case B. */ 1480 if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && 1481 ((asoc->c.peer_vtag != new_asoc->c.peer_vtag) || 1482 (0 == asoc->c.peer_vtag))) { 1483 return 'B'; 1484 } 1485 1486 /* Collision case D. */ 1487 if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && 1488 (asoc->c.peer_vtag == new_asoc->c.peer_vtag)) 1489 return 'D'; 1490 1491 /* Collision case C. */ 1492 if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && 1493 (asoc->c.peer_vtag == new_asoc->c.peer_vtag) && 1494 (0 == new_asoc->c.my_ttag) && 1495 (0 == new_asoc->c.peer_ttag)) 1496 return 'C'; 1497 1498 /* No match to any of the special cases; discard this packet. */ 1499 return 'E'; 1500 } 1501 1502 /* Common helper routine for both duplicate and simultaneous INIT 1503 * chunk handling. 1504 */ 1505 static enum sctp_disposition sctp_sf_do_unexpected_init( 1506 struct net *net, 1507 const struct sctp_endpoint *ep, 1508 const struct sctp_association *asoc, 1509 const union sctp_subtype type, 1510 void *arg, 1511 struct sctp_cmd_seq *commands) 1512 { 1513 struct sctp_chunk *chunk = arg, *repl, *err_chunk; 1514 struct sctp_unrecognized_param *unk_param; 1515 struct sctp_association *new_asoc; 1516 enum sctp_disposition retval; 1517 struct sctp_packet *packet; 1518 int len; 1519 1520 /* Update socket peer label if first association. */ 1521 if (security_sctp_assoc_request((struct sctp_endpoint *)ep, 1522 chunk->skb)) 1523 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 1524 1525 /* 6.10 Bundling 1526 * An endpoint MUST NOT bundle INIT, INIT ACK or 1527 * SHUTDOWN COMPLETE with any other chunks. 1528 * 1529 * IG Section 2.11.2 1530 * Furthermore, we require that the receiver of an INIT chunk MUST 1531 * enforce these rules by silently discarding an arriving packet 1532 * with an INIT chunk that is bundled with other chunks. 1533 */ 1534 if (!chunk->singleton) 1535 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 1536 1537 /* Make sure that the INIT chunk has a valid length. */ 1538 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) 1539 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 1540 1541 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 1542 * Tag. 1543 */ 1544 if (chunk->sctp_hdr->vtag != 0) 1545 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 1546 1547 if (SCTP_INPUT_CB(chunk->skb)->encap_port != chunk->transport->encap_port) 1548 return sctp_sf_new_encap_port(net, ep, asoc, type, arg, commands); 1549 1550 /* Grab the INIT header. */ 1551 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data; 1552 1553 /* Tag the variable length parameters. */ 1554 chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr)); 1555 1556 /* Verify the INIT chunk before processing it. */ 1557 err_chunk = NULL; 1558 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, 1559 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, 1560 &err_chunk)) { 1561 /* This chunk contains fatal error. It is to be discarded. 1562 * Send an ABORT, with causes if there is any. 1563 */ 1564 if (err_chunk) { 1565 packet = sctp_abort_pkt_new(net, ep, asoc, arg, 1566 (__u8 *)(err_chunk->chunk_hdr) + 1567 sizeof(struct sctp_chunkhdr), 1568 ntohs(err_chunk->chunk_hdr->length) - 1569 sizeof(struct sctp_chunkhdr)); 1570 1571 if (packet) { 1572 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 1573 SCTP_PACKET(packet)); 1574 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 1575 retval = SCTP_DISPOSITION_CONSUME; 1576 } else { 1577 retval = SCTP_DISPOSITION_NOMEM; 1578 } 1579 goto cleanup; 1580 } else { 1581 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, 1582 commands); 1583 } 1584 } 1585 1586 /* 1587 * Other parameters for the endpoint SHOULD be copied from the 1588 * existing parameters of the association (e.g. number of 1589 * outbound streams) into the INIT ACK and cookie. 1590 * FIXME: We are copying parameters from the endpoint not the 1591 * association. 1592 */ 1593 new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); 1594 if (!new_asoc) 1595 goto nomem; 1596 1597 if (sctp_assoc_set_bind_addr_from_ep(new_asoc, 1598 sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0) 1599 goto nomem; 1600 1601 /* In the outbound INIT ACK the endpoint MUST copy its current 1602 * Verification Tag and Peers Verification tag into a reserved 1603 * place (local tie-tag and per tie-tag) within the state cookie. 1604 */ 1605 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), 1606 (struct sctp_init_chunk *)chunk->chunk_hdr, 1607 GFP_ATOMIC)) 1608 goto nomem; 1609 1610 /* Make sure no new addresses are being added during the 1611 * restart. Do not do this check for COOKIE-WAIT state, 1612 * since there are no peer addresses to check against. 1613 * Upon return an ABORT will have been sent if needed. 1614 */ 1615 if (!sctp_state(asoc, COOKIE_WAIT)) { 1616 if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, 1617 commands)) { 1618 retval = SCTP_DISPOSITION_CONSUME; 1619 goto nomem_retval; 1620 } 1621 } 1622 1623 sctp_tietags_populate(new_asoc, asoc); 1624 1625 /* B) "Z" shall respond immediately with an INIT ACK chunk. */ 1626 1627 /* If there are errors need to be reported for unknown parameters, 1628 * make sure to reserve enough room in the INIT ACK for them. 1629 */ 1630 len = 0; 1631 if (err_chunk) { 1632 len = ntohs(err_chunk->chunk_hdr->length) - 1633 sizeof(struct sctp_chunkhdr); 1634 } 1635 1636 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); 1637 if (!repl) 1638 goto nomem; 1639 1640 /* If there are errors need to be reported for unknown parameters, 1641 * include them in the outgoing INIT ACK as "Unrecognized parameter" 1642 * parameter. 1643 */ 1644 if (err_chunk) { 1645 /* Get the "Unrecognized parameter" parameter(s) out of the 1646 * ERROR chunk generated by sctp_verify_init(). Since the 1647 * error cause code for "unknown parameter" and the 1648 * "Unrecognized parameter" type is the same, we can 1649 * construct the parameters in INIT ACK by copying the 1650 * ERROR causes over. 1651 */ 1652 unk_param = (struct sctp_unrecognized_param *) 1653 ((__u8 *)(err_chunk->chunk_hdr) + 1654 sizeof(struct sctp_chunkhdr)); 1655 /* Replace the cause code with the "Unrecognized parameter" 1656 * parameter type. 1657 */ 1658 sctp_addto_chunk(repl, len, unk_param); 1659 } 1660 1661 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 1662 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1663 1664 /* 1665 * Note: After sending out INIT ACK with the State Cookie parameter, 1666 * "Z" MUST NOT allocate any resources for this new association. 1667 * Otherwise, "Z" will be vulnerable to resource attacks. 1668 */ 1669 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 1670 retval = SCTP_DISPOSITION_CONSUME; 1671 1672 return retval; 1673 1674 nomem: 1675 retval = SCTP_DISPOSITION_NOMEM; 1676 nomem_retval: 1677 if (new_asoc) 1678 sctp_association_free(new_asoc); 1679 cleanup: 1680 if (err_chunk) 1681 sctp_chunk_free(err_chunk); 1682 return retval; 1683 } 1684 1685 /* 1686 * Handle simultaneous INIT. 1687 * This means we started an INIT and then we got an INIT request from 1688 * our peer. 1689 * 1690 * Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B) 1691 * This usually indicates an initialization collision, i.e., each 1692 * endpoint is attempting, at about the same time, to establish an 1693 * association with the other endpoint. 1694 * 1695 * Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an 1696 * endpoint MUST respond with an INIT ACK using the same parameters it 1697 * sent in its original INIT chunk (including its Verification Tag, 1698 * unchanged). These original parameters are combined with those from the 1699 * newly received INIT chunk. The endpoint shall also generate a State 1700 * Cookie with the INIT ACK. The endpoint uses the parameters sent in its 1701 * INIT to calculate the State Cookie. 1702 * 1703 * After that, the endpoint MUST NOT change its state, the T1-init 1704 * timer shall be left running and the corresponding TCB MUST NOT be 1705 * destroyed. The normal procedures for handling State Cookies when 1706 * a TCB exists will resolve the duplicate INITs to a single association. 1707 * 1708 * For an endpoint that is in the COOKIE-ECHOED state it MUST populate 1709 * its Tie-Tags with the Tag information of itself and its peer (see 1710 * section 5.2.2 for a description of the Tie-Tags). 1711 * 1712 * Verification Tag: Not explicit, but an INIT can not have a valid 1713 * verification tag, so we skip the check. 1714 * 1715 * Inputs 1716 * (endpoint, asoc, chunk) 1717 * 1718 * Outputs 1719 * (asoc, reply_msg, msg_up, timers, counters) 1720 * 1721 * The return value is the disposition of the chunk. 1722 */ 1723 enum sctp_disposition sctp_sf_do_5_2_1_siminit( 1724 struct net *net, 1725 const struct sctp_endpoint *ep, 1726 const struct sctp_association *asoc, 1727 const union sctp_subtype type, 1728 void *arg, 1729 struct sctp_cmd_seq *commands) 1730 { 1731 /* Call helper to do the real work for both simultaneous and 1732 * duplicate INIT chunk handling. 1733 */ 1734 return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); 1735 } 1736 1737 /* 1738 * Handle duplicated INIT messages. These are usually delayed 1739 * restransmissions. 1740 * 1741 * Section: 5.2.2 Unexpected INIT in States Other than CLOSED, 1742 * COOKIE-ECHOED and COOKIE-WAIT 1743 * 1744 * Unless otherwise stated, upon reception of an unexpected INIT for 1745 * this association, the endpoint shall generate an INIT ACK with a 1746 * State Cookie. In the outbound INIT ACK the endpoint MUST copy its 1747 * current Verification Tag and peer's Verification Tag into a reserved 1748 * place within the state cookie. We shall refer to these locations as 1749 * the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet 1750 * containing this INIT ACK MUST carry a Verification Tag value equal to 1751 * the Initiation Tag found in the unexpected INIT. And the INIT ACK 1752 * MUST contain a new Initiation Tag (randomly generated see Section 1753 * 5.3.1). Other parameters for the endpoint SHOULD be copied from the 1754 * existing parameters of the association (e.g. number of outbound 1755 * streams) into the INIT ACK and cookie. 1756 * 1757 * After sending out the INIT ACK, the endpoint shall take no further 1758 * actions, i.e., the existing association, including its current state, 1759 * and the corresponding TCB MUST NOT be changed. 1760 * 1761 * Note: Only when a TCB exists and the association is not in a COOKIE- 1762 * WAIT state are the Tie-Tags populated. For a normal association INIT 1763 * (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be 1764 * set to 0 (indicating that no previous TCB existed). The INIT ACK and 1765 * State Cookie are populated as specified in section 5.2.1. 1766 * 1767 * Verification Tag: Not specified, but an INIT has no way of knowing 1768 * what the verification tag could be, so we ignore it. 1769 * 1770 * Inputs 1771 * (endpoint, asoc, chunk) 1772 * 1773 * Outputs 1774 * (asoc, reply_msg, msg_up, timers, counters) 1775 * 1776 * The return value is the disposition of the chunk. 1777 */ 1778 enum sctp_disposition sctp_sf_do_5_2_2_dupinit( 1779 struct net *net, 1780 const struct sctp_endpoint *ep, 1781 const struct sctp_association *asoc, 1782 const union sctp_subtype type, 1783 void *arg, 1784 struct sctp_cmd_seq *commands) 1785 { 1786 /* Call helper to do the real work for both simultaneous and 1787 * duplicate INIT chunk handling. 1788 */ 1789 return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); 1790 } 1791 1792 1793 /* 1794 * Unexpected INIT-ACK handler. 1795 * 1796 * Section 5.2.3 1797 * If an INIT ACK received by an endpoint in any state other than the 1798 * COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk. 1799 * An unexpected INIT ACK usually indicates the processing of an old or 1800 * duplicated INIT chunk. 1801 */ 1802 enum sctp_disposition sctp_sf_do_5_2_3_initack( 1803 struct net *net, 1804 const struct sctp_endpoint *ep, 1805 const struct sctp_association *asoc, 1806 const union sctp_subtype type, 1807 void *arg, 1808 struct sctp_cmd_seq *commands) 1809 { 1810 /* Per the above section, we'll discard the chunk if we have an 1811 * endpoint. If this is an OOTB INIT-ACK, treat it as such. 1812 */ 1813 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) 1814 return sctp_sf_ootb(net, ep, asoc, type, arg, commands); 1815 else 1816 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); 1817 } 1818 1819 static int sctp_sf_do_assoc_update(struct sctp_association *asoc, 1820 struct sctp_association *new, 1821 struct sctp_cmd_seq *cmds) 1822 { 1823 struct net *net = asoc->base.net; 1824 struct sctp_chunk *abort; 1825 1826 if (!sctp_assoc_update(asoc, new)) 1827 return 0; 1828 1829 abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr)); 1830 if (abort) { 1831 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); 1832 sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 1833 } 1834 sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); 1835 sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED, 1836 SCTP_PERR(SCTP_ERROR_RSRC_LOW)); 1837 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 1838 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 1839 1840 return -ENOMEM; 1841 } 1842 1843 /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A') 1844 * 1845 * Section 5.2.4 1846 * A) In this case, the peer may have restarted. 1847 */ 1848 static enum sctp_disposition sctp_sf_do_dupcook_a( 1849 struct net *net, 1850 const struct sctp_endpoint *ep, 1851 const struct sctp_association *asoc, 1852 struct sctp_chunk *chunk, 1853 struct sctp_cmd_seq *commands, 1854 struct sctp_association *new_asoc) 1855 { 1856 struct sctp_init_chunk *peer_init; 1857 enum sctp_disposition disposition; 1858 struct sctp_ulpevent *ev; 1859 struct sctp_chunk *repl; 1860 struct sctp_chunk *err; 1861 1862 /* new_asoc is a brand-new association, so these are not yet 1863 * side effects--it is safe to run them here. 1864 */ 1865 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1866 1867 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, 1868 GFP_ATOMIC)) 1869 goto nomem; 1870 1871 if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) 1872 goto nomem; 1873 1874 if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) 1875 return SCTP_DISPOSITION_DISCARD; 1876 1877 /* Make sure no new addresses are being added during the 1878 * restart. Though this is a pretty complicated attack 1879 * since you'd have to get inside the cookie. 1880 */ 1881 if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) 1882 return SCTP_DISPOSITION_CONSUME; 1883 1884 /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes 1885 * the peer has restarted (Action A), it MUST NOT setup a new 1886 * association but instead resend the SHUTDOWN ACK and send an ERROR 1887 * chunk with a "Cookie Received while Shutting Down" error cause to 1888 * its peer. 1889 */ 1890 if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) { 1891 disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc, 1892 SCTP_ST_CHUNK(chunk->chunk_hdr->type), 1893 chunk, commands); 1894 if (SCTP_DISPOSITION_NOMEM == disposition) 1895 goto nomem; 1896 1897 err = sctp_make_op_error(asoc, chunk, 1898 SCTP_ERROR_COOKIE_IN_SHUTDOWN, 1899 NULL, 0, 0); 1900 if (err) 1901 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1902 SCTP_CHUNK(err)); 1903 1904 return SCTP_DISPOSITION_CONSUME; 1905 } 1906 1907 /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked 1908 * data. Consider the optional choice of resending of this data. 1909 */ 1910 sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); 1911 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 1912 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); 1913 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL()); 1914 1915 /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue 1916 * and ASCONF-ACK cache. 1917 */ 1918 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 1919 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 1920 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL()); 1921 1922 /* Update the content of current association. */ 1923 if (sctp_sf_do_assoc_update((struct sctp_association *)asoc, new_asoc, commands)) 1924 goto nomem; 1925 1926 repl = sctp_make_cookie_ack(asoc, chunk); 1927 if (!repl) 1928 goto nomem; 1929 1930 /* Report association restart to upper layer. */ 1931 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0, 1932 asoc->c.sinit_num_ostreams, 1933 asoc->c.sinit_max_instreams, 1934 NULL, GFP_ATOMIC); 1935 if (!ev) 1936 goto nomem_ev; 1937 1938 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1939 if ((sctp_state(asoc, SHUTDOWN_PENDING) || 1940 sctp_state(asoc, SHUTDOWN_SENT)) && 1941 (sctp_sstate(asoc->base.sk, CLOSING) || 1942 sock_flag(asoc->base.sk, SOCK_DEAD))) { 1943 /* If the socket has been closed by user, don't 1944 * transition to ESTABLISHED. Instead trigger SHUTDOWN 1945 * bundled with COOKIE_ACK. 1946 */ 1947 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1948 return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, 1949 SCTP_ST_CHUNK(0), repl, 1950 commands); 1951 } else { 1952 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1953 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 1954 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1955 } 1956 return SCTP_DISPOSITION_CONSUME; 1957 1958 nomem_ev: 1959 sctp_chunk_free(repl); 1960 nomem: 1961 return SCTP_DISPOSITION_NOMEM; 1962 } 1963 1964 /* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B') 1965 * 1966 * Section 5.2.4 1967 * B) In this case, both sides may be attempting to start an association 1968 * at about the same time but the peer endpoint started its INIT 1969 * after responding to the local endpoint's INIT 1970 */ 1971 /* This case represents an initialization collision. */ 1972 static enum sctp_disposition sctp_sf_do_dupcook_b( 1973 struct net *net, 1974 const struct sctp_endpoint *ep, 1975 const struct sctp_association *asoc, 1976 struct sctp_chunk *chunk, 1977 struct sctp_cmd_seq *commands, 1978 struct sctp_association *new_asoc) 1979 { 1980 struct sctp_init_chunk *peer_init; 1981 struct sctp_chunk *repl; 1982 1983 /* new_asoc is a brand-new association, so these are not yet 1984 * side effects--it is safe to run them here. 1985 */ 1986 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1987 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, 1988 GFP_ATOMIC)) 1989 goto nomem; 1990 1991 if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) 1992 goto nomem; 1993 1994 if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) 1995 return SCTP_DISPOSITION_DISCARD; 1996 1997 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1998 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 1999 if (asoc->state < SCTP_STATE_ESTABLISHED) 2000 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); 2001 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 2002 2003 /* Update the content of current association. */ 2004 if (sctp_sf_do_assoc_update((struct sctp_association *)asoc, new_asoc, commands)) 2005 goto nomem; 2006 2007 repl = sctp_make_cookie_ack(asoc, chunk); 2008 if (!repl) 2009 goto nomem; 2010 2011 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 2012 2013 /* RFC 2960 5.1 Normal Establishment of an Association 2014 * 2015 * D) IMPLEMENTATION NOTE: An implementation may choose to 2016 * send the Communication Up notification to the SCTP user 2017 * upon reception of a valid COOKIE ECHO chunk. 2018 * 2019 * Sadly, this needs to be implemented as a side-effect, because 2020 * we are not guaranteed to have set the association id of the real 2021 * association and so these notifications need to be delayed until 2022 * the association id is allocated. 2023 */ 2024 2025 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP)); 2026 2027 /* Sockets API Draft Section 5.3.1.6 2028 * When a peer sends a Adaptation Layer Indication parameter , SCTP 2029 * delivers this notification to inform the application that of the 2030 * peers requested adaptation layer. 2031 * 2032 * This also needs to be done as a side effect for the same reason as 2033 * above. 2034 */ 2035 if (asoc->peer.adaptation_ind) 2036 sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL()); 2037 2038 if (!asoc->peer.auth_capable) 2039 sctp_add_cmd_sf(commands, SCTP_CMD_PEER_NO_AUTH, SCTP_NULL()); 2040 2041 return SCTP_DISPOSITION_CONSUME; 2042 2043 nomem: 2044 return SCTP_DISPOSITION_NOMEM; 2045 } 2046 2047 /* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C') 2048 * 2049 * Section 5.2.4 2050 * C) In this case, the local endpoint's cookie has arrived late. 2051 * Before it arrived, the local endpoint sent an INIT and received an 2052 * INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag 2053 * but a new tag of its own. 2054 */ 2055 /* This case represents an initialization collision. */ 2056 static enum sctp_disposition sctp_sf_do_dupcook_c( 2057 struct net *net, 2058 const struct sctp_endpoint *ep, 2059 const struct sctp_association *asoc, 2060 struct sctp_chunk *chunk, 2061 struct sctp_cmd_seq *commands, 2062 struct sctp_association *new_asoc) 2063 { 2064 /* The cookie should be silently discarded. 2065 * The endpoint SHOULD NOT change states and should leave 2066 * any timers running. 2067 */ 2068 return SCTP_DISPOSITION_DISCARD; 2069 } 2070 2071 /* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D') 2072 * 2073 * Section 5.2.4 2074 * 2075 * D) When both local and remote tags match the endpoint should always 2076 * enter the ESTABLISHED state, if it has not already done so. 2077 */ 2078 /* This case represents an initialization collision. */ 2079 static enum sctp_disposition sctp_sf_do_dupcook_d( 2080 struct net *net, 2081 const struct sctp_endpoint *ep, 2082 const struct sctp_association *asoc, 2083 struct sctp_chunk *chunk, 2084 struct sctp_cmd_seq *commands, 2085 struct sctp_association *new_asoc) 2086 { 2087 struct sctp_ulpevent *ev = NULL, *ai_ev = NULL, *auth_ev = NULL; 2088 struct sctp_chunk *repl; 2089 2090 /* Clarification from Implementor's Guide: 2091 * D) When both local and remote tags match the endpoint should 2092 * enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state. 2093 * It should stop any cookie timer that may be running and send 2094 * a COOKIE ACK. 2095 */ 2096 2097 if (!sctp_auth_chunk_verify(net, chunk, asoc)) 2098 return SCTP_DISPOSITION_DISCARD; 2099 2100 /* Don't accidentally move back into established state. */ 2101 if (asoc->state < SCTP_STATE_ESTABLISHED) { 2102 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2103 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 2104 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 2105 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 2106 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); 2107 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, 2108 SCTP_NULL()); 2109 2110 /* RFC 2960 5.1 Normal Establishment of an Association 2111 * 2112 * D) IMPLEMENTATION NOTE: An implementation may choose 2113 * to send the Communication Up notification to the 2114 * SCTP user upon reception of a valid COOKIE 2115 * ECHO chunk. 2116 */ 2117 ev = sctp_ulpevent_make_assoc_change(asoc, 0, 2118 SCTP_COMM_UP, 0, 2119 asoc->c.sinit_num_ostreams, 2120 asoc->c.sinit_max_instreams, 2121 NULL, GFP_ATOMIC); 2122 if (!ev) 2123 goto nomem; 2124 2125 /* Sockets API Draft Section 5.3.1.6 2126 * When a peer sends a Adaptation Layer Indication parameter, 2127 * SCTP delivers this notification to inform the application 2128 * that of the peers requested adaptation layer. 2129 */ 2130 if (asoc->peer.adaptation_ind) { 2131 ai_ev = sctp_ulpevent_make_adaptation_indication(asoc, 2132 GFP_ATOMIC); 2133 if (!ai_ev) 2134 goto nomem; 2135 2136 } 2137 2138 if (!asoc->peer.auth_capable) { 2139 auth_ev = sctp_ulpevent_make_authkey(asoc, 0, 2140 SCTP_AUTH_NO_AUTH, 2141 GFP_ATOMIC); 2142 if (!auth_ev) 2143 goto nomem; 2144 } 2145 } 2146 2147 repl = sctp_make_cookie_ack(asoc, chunk); 2148 if (!repl) 2149 goto nomem; 2150 2151 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 2152 2153 if (ev) 2154 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 2155 SCTP_ULPEVENT(ev)); 2156 if (ai_ev) 2157 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 2158 SCTP_ULPEVENT(ai_ev)); 2159 if (auth_ev) 2160 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 2161 SCTP_ULPEVENT(auth_ev)); 2162 2163 return SCTP_DISPOSITION_CONSUME; 2164 2165 nomem: 2166 if (auth_ev) 2167 sctp_ulpevent_free(auth_ev); 2168 if (ai_ev) 2169 sctp_ulpevent_free(ai_ev); 2170 if (ev) 2171 sctp_ulpevent_free(ev); 2172 return SCTP_DISPOSITION_NOMEM; 2173 } 2174 2175 /* 2176 * Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying 2177 * chunk was retransmitted and then delayed in the network. 2178 * 2179 * Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists 2180 * 2181 * Verification Tag: None. Do cookie validation. 2182 * 2183 * Inputs 2184 * (endpoint, asoc, chunk) 2185 * 2186 * Outputs 2187 * (asoc, reply_msg, msg_up, timers, counters) 2188 * 2189 * The return value is the disposition of the chunk. 2190 */ 2191 enum sctp_disposition sctp_sf_do_5_2_4_dupcook( 2192 struct net *net, 2193 const struct sctp_endpoint *ep, 2194 const struct sctp_association *asoc, 2195 const union sctp_subtype type, 2196 void *arg, 2197 struct sctp_cmd_seq *commands) 2198 { 2199 struct sctp_association *new_asoc; 2200 struct sctp_chunk *chunk = arg; 2201 enum sctp_disposition retval; 2202 struct sctp_chunk *err_chk_p; 2203 int error = 0; 2204 char action; 2205 2206 /* Make sure that the chunk has a valid length from the protocol 2207 * perspective. In this case check to make sure we have at least 2208 * enough for the chunk header. Cookie length verification is 2209 * done later. 2210 */ 2211 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) { 2212 if (!sctp_vtag_verify(chunk, asoc)) 2213 asoc = NULL; 2214 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); 2215 } 2216 2217 /* "Decode" the chunk. We have no optional parameters so we 2218 * are in good shape. 2219 */ 2220 chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; 2221 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - 2222 sizeof(struct sctp_chunkhdr))) 2223 goto nomem; 2224 2225 /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie 2226 * of a duplicate COOKIE ECHO match the Verification Tags of the 2227 * current association, consider the State Cookie valid even if 2228 * the lifespan is exceeded. 2229 */ 2230 new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, 2231 &err_chk_p); 2232 2233 /* FIXME: 2234 * If the re-build failed, what is the proper error path 2235 * from here? 2236 * 2237 * [We should abort the association. --piggy] 2238 */ 2239 if (!new_asoc) { 2240 /* FIXME: Several errors are possible. A bad cookie should 2241 * be silently discarded, but think about logging it too. 2242 */ 2243 switch (error) { 2244 case -SCTP_IERROR_NOMEM: 2245 goto nomem; 2246 2247 case -SCTP_IERROR_STALE_COOKIE: 2248 sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, 2249 err_chk_p); 2250 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2251 case -SCTP_IERROR_BAD_SIG: 2252 default: 2253 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2254 } 2255 } 2256 2257 /* Update socket peer label if first association. */ 2258 if (security_sctp_assoc_request((struct sctp_endpoint *)ep, 2259 chunk->skb)) { 2260 sctp_association_free(new_asoc); 2261 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2262 } 2263 2264 /* Set temp so that it won't be added into hashtable */ 2265 new_asoc->temp = 1; 2266 2267 /* Compare the tie_tag in cookie with the verification tag of 2268 * current association. 2269 */ 2270 action = sctp_tietags_compare(new_asoc, asoc); 2271 2272 switch (action) { 2273 case 'A': /* Association restart. */ 2274 retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands, 2275 new_asoc); 2276 break; 2277 2278 case 'B': /* Collision case B. */ 2279 retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands, 2280 new_asoc); 2281 break; 2282 2283 case 'C': /* Collision case C. */ 2284 retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands, 2285 new_asoc); 2286 break; 2287 2288 case 'D': /* Collision case D. */ 2289 retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands, 2290 new_asoc); 2291 break; 2292 2293 default: /* Discard packet for all others. */ 2294 retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2295 break; 2296 } 2297 2298 /* Delete the temporary new association. */ 2299 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); 2300 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2301 2302 /* Restore association pointer to provide SCTP command interpreter 2303 * with a valid context in case it needs to manipulate 2304 * the queues */ 2305 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, 2306 SCTP_ASOC((struct sctp_association *)asoc)); 2307 2308 return retval; 2309 2310 nomem: 2311 return SCTP_DISPOSITION_NOMEM; 2312 } 2313 2314 /* 2315 * Process an ABORT. (SHUTDOWN-PENDING state) 2316 * 2317 * See sctp_sf_do_9_1_abort(). 2318 */ 2319 enum sctp_disposition sctp_sf_shutdown_pending_abort( 2320 struct net *net, 2321 const struct sctp_endpoint *ep, 2322 const struct sctp_association *asoc, 2323 const union sctp_subtype type, 2324 void *arg, 2325 struct sctp_cmd_seq *commands) 2326 { 2327 struct sctp_chunk *chunk = arg; 2328 2329 if (!sctp_vtag_verify_either(chunk, asoc)) 2330 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2331 2332 /* Make sure that the ABORT chunk has a valid length. 2333 * Since this is an ABORT chunk, we have to discard it 2334 * because of the following text: 2335 * RFC 2960, Section 3.3.7 2336 * If an endpoint receives an ABORT with a format error or for an 2337 * association that doesn't exist, it MUST silently discard it. 2338 * Because the length is "invalid", we can't really discard just 2339 * as we do not know its true length. So, to be safe, discard the 2340 * packet. 2341 */ 2342 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) 2343 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2344 2345 /* ADD-IP: Special case for ABORT chunks 2346 * F4) One special consideration is that ABORT Chunks arriving 2347 * destined to the IP address being deleted MUST be 2348 * ignored (see Section 5.3.1 for further details). 2349 */ 2350 if (SCTP_ADDR_DEL == 2351 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2352 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2353 2354 if (!sctp_err_chunk_valid(chunk)) 2355 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2356 2357 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); 2358 } 2359 2360 /* 2361 * Process an ABORT. (SHUTDOWN-SENT state) 2362 * 2363 * See sctp_sf_do_9_1_abort(). 2364 */ 2365 enum sctp_disposition sctp_sf_shutdown_sent_abort( 2366 struct net *net, 2367 const struct sctp_endpoint *ep, 2368 const struct sctp_association *asoc, 2369 const union sctp_subtype type, 2370 void *arg, 2371 struct sctp_cmd_seq *commands) 2372 { 2373 struct sctp_chunk *chunk = arg; 2374 2375 if (!sctp_vtag_verify_either(chunk, asoc)) 2376 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2377 2378 /* Make sure that the ABORT chunk has a valid length. 2379 * Since this is an ABORT chunk, we have to discard it 2380 * because of the following text: 2381 * RFC 2960, Section 3.3.7 2382 * If an endpoint receives an ABORT with a format error or for an 2383 * association that doesn't exist, it MUST silently discard it. 2384 * Because the length is "invalid", we can't really discard just 2385 * as we do not know its true length. So, to be safe, discard the 2386 * packet. 2387 */ 2388 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) 2389 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2390 2391 /* ADD-IP: Special case for ABORT chunks 2392 * F4) One special consideration is that ABORT Chunks arriving 2393 * destined to the IP address being deleted MUST be 2394 * ignored (see Section 5.3.1 for further details). 2395 */ 2396 if (SCTP_ADDR_DEL == 2397 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2398 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2399 2400 if (!sctp_err_chunk_valid(chunk)) 2401 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2402 2403 /* Stop the T2-shutdown timer. */ 2404 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2405 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 2406 2407 /* Stop the T5-shutdown guard timer. */ 2408 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2409 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 2410 2411 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); 2412 } 2413 2414 /* 2415 * Process an ABORT. (SHUTDOWN-ACK-SENT state) 2416 * 2417 * See sctp_sf_do_9_1_abort(). 2418 */ 2419 enum sctp_disposition sctp_sf_shutdown_ack_sent_abort( 2420 struct net *net, 2421 const struct sctp_endpoint *ep, 2422 const struct sctp_association *asoc, 2423 const union sctp_subtype type, 2424 void *arg, 2425 struct sctp_cmd_seq *commands) 2426 { 2427 /* The same T2 timer, so we should be able to use 2428 * common function with the SHUTDOWN-SENT state. 2429 */ 2430 return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands); 2431 } 2432 2433 /* 2434 * Handle an Error received in COOKIE_ECHOED state. 2435 * 2436 * Only handle the error type of stale COOKIE Error, the other errors will 2437 * be ignored. 2438 * 2439 * Inputs 2440 * (endpoint, asoc, chunk) 2441 * 2442 * Outputs 2443 * (asoc, reply_msg, msg_up, timers, counters) 2444 * 2445 * The return value is the disposition of the chunk. 2446 */ 2447 enum sctp_disposition sctp_sf_cookie_echoed_err( 2448 struct net *net, 2449 const struct sctp_endpoint *ep, 2450 const struct sctp_association *asoc, 2451 const union sctp_subtype type, 2452 void *arg, 2453 struct sctp_cmd_seq *commands) 2454 { 2455 struct sctp_chunk *chunk = arg; 2456 struct sctp_errhdr *err; 2457 2458 if (!sctp_vtag_verify(chunk, asoc)) 2459 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2460 2461 /* Make sure that the ERROR chunk has a valid length. 2462 * The parameter walking depends on this as well. 2463 */ 2464 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk))) 2465 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 2466 commands); 2467 2468 /* Process the error here */ 2469 /* FUTURE FIXME: When PR-SCTP related and other optional 2470 * parms are emitted, this will have to change to handle multiple 2471 * errors. 2472 */ 2473 sctp_walk_errors(err, chunk->chunk_hdr) { 2474 if (SCTP_ERROR_STALE_COOKIE == err->cause) 2475 return sctp_sf_do_5_2_6_stale(net, ep, asoc, type, 2476 arg, commands); 2477 } 2478 2479 /* It is possible to have malformed error causes, and that 2480 * will cause us to end the walk early. However, since 2481 * we are discarding the packet, there should be no adverse 2482 * affects. 2483 */ 2484 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2485 } 2486 2487 /* 2488 * Handle a Stale COOKIE Error 2489 * 2490 * Section: 5.2.6 Handle Stale COOKIE Error 2491 * If the association is in the COOKIE-ECHOED state, the endpoint may elect 2492 * one of the following three alternatives. 2493 * ... 2494 * 3) Send a new INIT chunk to the endpoint, adding a Cookie 2495 * Preservative parameter requesting an extension to the lifetime of 2496 * the State Cookie. When calculating the time extension, an 2497 * implementation SHOULD use the RTT information measured based on the 2498 * previous COOKIE ECHO / ERROR exchange, and should add no more 2499 * than 1 second beyond the measured RTT, due to long State Cookie 2500 * lifetimes making the endpoint more subject to a replay attack. 2501 * 2502 * Verification Tag: Not explicit, but safe to ignore. 2503 * 2504 * Inputs 2505 * (endpoint, asoc, chunk) 2506 * 2507 * Outputs 2508 * (asoc, reply_msg, msg_up, timers, counters) 2509 * 2510 * The return value is the disposition of the chunk. 2511 */ 2512 static enum sctp_disposition sctp_sf_do_5_2_6_stale( 2513 struct net *net, 2514 const struct sctp_endpoint *ep, 2515 const struct sctp_association *asoc, 2516 const union sctp_subtype type, 2517 void *arg, 2518 struct sctp_cmd_seq *commands) 2519 { 2520 int attempts = asoc->init_err_counter + 1; 2521 struct sctp_chunk *chunk = arg, *reply; 2522 struct sctp_cookie_preserve_param bht; 2523 struct sctp_bind_addr *bp; 2524 struct sctp_errhdr *err; 2525 u32 stale; 2526 2527 if (attempts > asoc->max_init_attempts) { 2528 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 2529 SCTP_ERROR(ETIMEDOUT)); 2530 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2531 SCTP_PERR(SCTP_ERROR_STALE_COOKIE)); 2532 return SCTP_DISPOSITION_DELETE_TCB; 2533 } 2534 2535 err = (struct sctp_errhdr *)(chunk->skb->data); 2536 2537 /* When calculating the time extension, an implementation 2538 * SHOULD use the RTT information measured based on the 2539 * previous COOKIE ECHO / ERROR exchange, and should add no 2540 * more than 1 second beyond the measured RTT, due to long 2541 * State Cookie lifetimes making the endpoint more subject to 2542 * a replay attack. 2543 * Measure of Staleness's unit is usec. (1/1000000 sec) 2544 * Suggested Cookie Life-span Increment's unit is msec. 2545 * (1/1000 sec) 2546 * In general, if you use the suggested cookie life, the value 2547 * found in the field of measure of staleness should be doubled 2548 * to give ample time to retransmit the new cookie and thus 2549 * yield a higher probability of success on the reattempt. 2550 */ 2551 stale = ntohl(*(__be32 *)((u8 *)err + sizeof(*err))); 2552 stale = (stale * 2) / 1000; 2553 2554 bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE; 2555 bht.param_hdr.length = htons(sizeof(bht)); 2556 bht.lifespan_increment = htonl(stale); 2557 2558 /* Build that new INIT chunk. */ 2559 bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; 2560 reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht)); 2561 if (!reply) 2562 goto nomem; 2563 2564 sctp_addto_chunk(reply, sizeof(bht), &bht); 2565 2566 /* Clear peer's init_tag cached in assoc as we are sending a new INIT */ 2567 sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL()); 2568 2569 /* Stop pending T3-rtx and heartbeat timers */ 2570 sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); 2571 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); 2572 2573 /* Delete non-primary peer ip addresses since we are transitioning 2574 * back to the COOKIE-WAIT state 2575 */ 2576 sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL()); 2577 2578 /* If we've sent any data bundled with COOKIE-ECHO we will need to 2579 * resend 2580 */ 2581 sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN, 2582 SCTP_TRANSPORT(asoc->peer.primary_path)); 2583 2584 /* Cast away the const modifier, as we want to just 2585 * rerun it through as a sideffect. 2586 */ 2587 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_INC, SCTP_NULL()); 2588 2589 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2590 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 2591 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 2592 SCTP_STATE(SCTP_STATE_COOKIE_WAIT)); 2593 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 2594 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 2595 2596 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 2597 2598 return SCTP_DISPOSITION_CONSUME; 2599 2600 nomem: 2601 return SCTP_DISPOSITION_NOMEM; 2602 } 2603 2604 /* 2605 * Process an ABORT. 2606 * 2607 * Section: 9.1 2608 * After checking the Verification Tag, the receiving endpoint shall 2609 * remove the association from its record, and shall report the 2610 * termination to its upper layer. 2611 * 2612 * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules 2613 * B) Rules for packet carrying ABORT: 2614 * 2615 * - The endpoint shall always fill in the Verification Tag field of the 2616 * outbound packet with the destination endpoint's tag value if it 2617 * is known. 2618 * 2619 * - If the ABORT is sent in response to an OOTB packet, the endpoint 2620 * MUST follow the procedure described in Section 8.4. 2621 * 2622 * - The receiver MUST accept the packet if the Verification Tag 2623 * matches either its own tag, OR the tag of its peer. Otherwise, the 2624 * receiver MUST silently discard the packet and take no further 2625 * action. 2626 * 2627 * Inputs 2628 * (endpoint, asoc, chunk) 2629 * 2630 * Outputs 2631 * (asoc, reply_msg, msg_up, timers, counters) 2632 * 2633 * The return value is the disposition of the chunk. 2634 */ 2635 enum sctp_disposition sctp_sf_do_9_1_abort( 2636 struct net *net, 2637 const struct sctp_endpoint *ep, 2638 const struct sctp_association *asoc, 2639 const union sctp_subtype type, 2640 void *arg, 2641 struct sctp_cmd_seq *commands) 2642 { 2643 struct sctp_chunk *chunk = arg; 2644 2645 if (!sctp_vtag_verify_either(chunk, asoc)) 2646 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2647 2648 /* Make sure that the ABORT chunk has a valid length. 2649 * Since this is an ABORT chunk, we have to discard it 2650 * because of the following text: 2651 * RFC 2960, Section 3.3.7 2652 * If an endpoint receives an ABORT with a format error or for an 2653 * association that doesn't exist, it MUST silently discard it. 2654 * Because the length is "invalid", we can't really discard just 2655 * as we do not know its true length. So, to be safe, discard the 2656 * packet. 2657 */ 2658 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) 2659 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2660 2661 /* ADD-IP: Special case for ABORT chunks 2662 * F4) One special consideration is that ABORT Chunks arriving 2663 * destined to the IP address being deleted MUST be 2664 * ignored (see Section 5.3.1 for further details). 2665 */ 2666 if (SCTP_ADDR_DEL == 2667 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2668 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2669 2670 if (!sctp_err_chunk_valid(chunk)) 2671 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2672 2673 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); 2674 } 2675 2676 static enum sctp_disposition __sctp_sf_do_9_1_abort( 2677 struct net *net, 2678 const struct sctp_endpoint *ep, 2679 const struct sctp_association *asoc, 2680 const union sctp_subtype type, 2681 void *arg, 2682 struct sctp_cmd_seq *commands) 2683 { 2684 __be16 error = SCTP_ERROR_NO_ERROR; 2685 struct sctp_chunk *chunk = arg; 2686 unsigned int len; 2687 2688 /* See if we have an error cause code in the chunk. */ 2689 len = ntohs(chunk->chunk_hdr->length); 2690 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2691 error = ((struct sctp_errhdr *)chunk->skb->data)->cause; 2692 2693 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); 2694 /* ASSOC_FAILED will DELETE_TCB. */ 2695 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error)); 2696 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 2697 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 2698 2699 return SCTP_DISPOSITION_ABORT; 2700 } 2701 2702 /* 2703 * Process an ABORT. (COOKIE-WAIT state) 2704 * 2705 * See sctp_sf_do_9_1_abort() above. 2706 */ 2707 enum sctp_disposition sctp_sf_cookie_wait_abort( 2708 struct net *net, 2709 const struct sctp_endpoint *ep, 2710 const struct sctp_association *asoc, 2711 const union sctp_subtype type, 2712 void *arg, 2713 struct sctp_cmd_seq *commands) 2714 { 2715 __be16 error = SCTP_ERROR_NO_ERROR; 2716 struct sctp_chunk *chunk = arg; 2717 unsigned int len; 2718 2719 if (!sctp_vtag_verify_either(chunk, asoc)) 2720 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2721 2722 /* Make sure that the ABORT chunk has a valid length. 2723 * Since this is an ABORT chunk, we have to discard it 2724 * because of the following text: 2725 * RFC 2960, Section 3.3.7 2726 * If an endpoint receives an ABORT with a format error or for an 2727 * association that doesn't exist, it MUST silently discard it. 2728 * Because the length is "invalid", we can't really discard just 2729 * as we do not know its true length. So, to be safe, discard the 2730 * packet. 2731 */ 2732 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) 2733 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2734 2735 /* See if we have an error cause code in the chunk. */ 2736 len = ntohs(chunk->chunk_hdr->length); 2737 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2738 error = ((struct sctp_errhdr *)chunk->skb->data)->cause; 2739 2740 return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc, 2741 chunk->transport); 2742 } 2743 2744 /* 2745 * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state) 2746 */ 2747 enum sctp_disposition sctp_sf_cookie_wait_icmp_abort( 2748 struct net *net, 2749 const struct sctp_endpoint *ep, 2750 const struct sctp_association *asoc, 2751 const union sctp_subtype type, 2752 void *arg, 2753 struct sctp_cmd_seq *commands) 2754 { 2755 return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR, 2756 ENOPROTOOPT, asoc, 2757 (struct sctp_transport *)arg); 2758 } 2759 2760 /* 2761 * Process an ABORT. (COOKIE-ECHOED state) 2762 */ 2763 enum sctp_disposition sctp_sf_cookie_echoed_abort( 2764 struct net *net, 2765 const struct sctp_endpoint *ep, 2766 const struct sctp_association *asoc, 2767 const union sctp_subtype type, 2768 void *arg, 2769 struct sctp_cmd_seq *commands) 2770 { 2771 /* There is a single T1 timer, so we should be able to use 2772 * common function with the COOKIE-WAIT state. 2773 */ 2774 return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands); 2775 } 2776 2777 /* 2778 * Stop T1 timer and abort association with "INIT failed". 2779 * 2780 * This is common code called by several sctp_sf_*_abort() functions above. 2781 */ 2782 static enum sctp_disposition sctp_stop_t1_and_abort( 2783 struct net *net, 2784 struct sctp_cmd_seq *commands, 2785 __be16 error, int sk_err, 2786 const struct sctp_association *asoc, 2787 struct sctp_transport *transport) 2788 { 2789 pr_debug("%s: ABORT received (INIT)\n", __func__); 2790 2791 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 2792 SCTP_STATE(SCTP_STATE_CLOSED)); 2793 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 2794 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2795 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 2796 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); 2797 /* CMD_INIT_FAILED will DELETE_TCB. */ 2798 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2799 SCTP_PERR(error)); 2800 2801 return SCTP_DISPOSITION_ABORT; 2802 } 2803 2804 /* 2805 * sctp_sf_do_9_2_shut 2806 * 2807 * Section: 9.2 2808 * Upon the reception of the SHUTDOWN, the peer endpoint shall 2809 * - enter the SHUTDOWN-RECEIVED state, 2810 * 2811 * - stop accepting new data from its SCTP user 2812 * 2813 * - verify, by checking the Cumulative TSN Ack field of the chunk, 2814 * that all its outstanding DATA chunks have been received by the 2815 * SHUTDOWN sender. 2816 * 2817 * Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT 2818 * send a SHUTDOWN in response to a ULP request. And should discard 2819 * subsequent SHUTDOWN chunks. 2820 * 2821 * If there are still outstanding DATA chunks left, the SHUTDOWN 2822 * receiver shall continue to follow normal data transmission 2823 * procedures defined in Section 6 until all outstanding DATA chunks 2824 * are acknowledged; however, the SHUTDOWN receiver MUST NOT accept 2825 * new data from its SCTP user. 2826 * 2827 * Verification Tag: 8.5 Verification Tag [Normal verification] 2828 * 2829 * Inputs 2830 * (endpoint, asoc, chunk) 2831 * 2832 * Outputs 2833 * (asoc, reply_msg, msg_up, timers, counters) 2834 * 2835 * The return value is the disposition of the chunk. 2836 */ 2837 enum sctp_disposition sctp_sf_do_9_2_shutdown( 2838 struct net *net, 2839 const struct sctp_endpoint *ep, 2840 const struct sctp_association *asoc, 2841 const union sctp_subtype type, 2842 void *arg, 2843 struct sctp_cmd_seq *commands) 2844 { 2845 enum sctp_disposition disposition; 2846 struct sctp_chunk *chunk = arg; 2847 struct sctp_shutdownhdr *sdh; 2848 struct sctp_ulpevent *ev; 2849 __u32 ctsn; 2850 2851 if (!sctp_vtag_verify(chunk, asoc)) 2852 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2853 2854 /* Make sure that the SHUTDOWN chunk has a valid length. */ 2855 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk))) 2856 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 2857 commands); 2858 2859 /* Convert the elaborate header. */ 2860 sdh = (struct sctp_shutdownhdr *)chunk->skb->data; 2861 skb_pull(chunk->skb, sizeof(*sdh)); 2862 chunk->subh.shutdown_hdr = sdh; 2863 ctsn = ntohl(sdh->cum_tsn_ack); 2864 2865 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { 2866 pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, 2867 asoc->ctsn_ack_point); 2868 2869 return SCTP_DISPOSITION_DISCARD; 2870 } 2871 2872 /* If Cumulative TSN Ack beyond the max tsn currently 2873 * send, terminating the association and respond to the 2874 * sender with an ABORT. 2875 */ 2876 if (!TSN_lt(ctsn, asoc->next_tsn)) 2877 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); 2878 2879 /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT 2880 * When a peer sends a SHUTDOWN, SCTP delivers this notification to 2881 * inform the application that it should cease sending data. 2882 */ 2883 ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC); 2884 if (!ev) { 2885 disposition = SCTP_DISPOSITION_NOMEM; 2886 goto out; 2887 } 2888 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 2889 2890 /* Upon the reception of the SHUTDOWN, the peer endpoint shall 2891 * - enter the SHUTDOWN-RECEIVED state, 2892 * - stop accepting new data from its SCTP user 2893 * 2894 * [This is implicit in the new state.] 2895 */ 2896 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 2897 SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED)); 2898 disposition = SCTP_DISPOSITION_CONSUME; 2899 2900 if (sctp_outq_is_empty(&asoc->outqueue)) { 2901 disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type, 2902 arg, commands); 2903 } 2904 2905 if (SCTP_DISPOSITION_NOMEM == disposition) 2906 goto out; 2907 2908 /* - verify, by checking the Cumulative TSN Ack field of the 2909 * chunk, that all its outstanding DATA chunks have been 2910 * received by the SHUTDOWN sender. 2911 */ 2912 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, 2913 SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack)); 2914 2915 out: 2916 return disposition; 2917 } 2918 2919 /* 2920 * sctp_sf_do_9_2_shut_ctsn 2921 * 2922 * Once an endpoint has reached the SHUTDOWN-RECEIVED state, 2923 * it MUST NOT send a SHUTDOWN in response to a ULP request. 2924 * The Cumulative TSN Ack of the received SHUTDOWN chunk 2925 * MUST be processed. 2926 */ 2927 enum sctp_disposition sctp_sf_do_9_2_shut_ctsn( 2928 struct net *net, 2929 const struct sctp_endpoint *ep, 2930 const struct sctp_association *asoc, 2931 const union sctp_subtype type, 2932 void *arg, 2933 struct sctp_cmd_seq *commands) 2934 { 2935 struct sctp_chunk *chunk = arg; 2936 struct sctp_shutdownhdr *sdh; 2937 __u32 ctsn; 2938 2939 if (!sctp_vtag_verify(chunk, asoc)) 2940 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2941 2942 /* Make sure that the SHUTDOWN chunk has a valid length. */ 2943 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk))) 2944 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 2945 commands); 2946 2947 sdh = (struct sctp_shutdownhdr *)chunk->skb->data; 2948 ctsn = ntohl(sdh->cum_tsn_ack); 2949 2950 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { 2951 pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, 2952 asoc->ctsn_ack_point); 2953 2954 return SCTP_DISPOSITION_DISCARD; 2955 } 2956 2957 /* If Cumulative TSN Ack beyond the max tsn currently 2958 * send, terminating the association and respond to the 2959 * sender with an ABORT. 2960 */ 2961 if (!TSN_lt(ctsn, asoc->next_tsn)) 2962 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); 2963 2964 /* verify, by checking the Cumulative TSN Ack field of the 2965 * chunk, that all its outstanding DATA chunks have been 2966 * received by the SHUTDOWN sender. 2967 */ 2968 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, 2969 SCTP_BE32(sdh->cum_tsn_ack)); 2970 2971 return SCTP_DISPOSITION_CONSUME; 2972 } 2973 2974 /* RFC 2960 9.2 2975 * If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk 2976 * (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination 2977 * transport addresses (either in the IP addresses or in the INIT chunk) 2978 * that belong to this association, it should discard the INIT chunk and 2979 * retransmit the SHUTDOWN ACK chunk. 2980 */ 2981 static enum sctp_disposition 2982 __sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, 2983 const struct sctp_association *asoc, 2984 const union sctp_subtype type, void *arg, 2985 struct sctp_cmd_seq *commands) 2986 { 2987 struct sctp_chunk *chunk = arg; 2988 struct sctp_chunk *reply; 2989 2990 /* Make sure that the chunk has a valid length */ 2991 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 2992 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 2993 commands); 2994 2995 /* Since we are not going to really process this INIT, there 2996 * is no point in verifying chunk boundaries. Just generate 2997 * the SHUTDOWN ACK. 2998 */ 2999 reply = sctp_make_shutdown_ack(asoc, chunk); 3000 if (NULL == reply) 3001 goto nomem; 3002 3003 /* Set the transport for the SHUTDOWN ACK chunk and the timeout for 3004 * the T2-SHUTDOWN timer. 3005 */ 3006 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); 3007 3008 /* and restart the T2-shutdown timer. */ 3009 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 3010 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 3011 3012 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 3013 3014 return SCTP_DISPOSITION_CONSUME; 3015 nomem: 3016 return SCTP_DISPOSITION_NOMEM; 3017 } 3018 3019 enum sctp_disposition 3020 sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, 3021 const struct sctp_association *asoc, 3022 const union sctp_subtype type, void *arg, 3023 struct sctp_cmd_seq *commands) 3024 { 3025 struct sctp_chunk *chunk = arg; 3026 3027 if (!chunk->singleton) 3028 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3029 3030 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk))) 3031 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3032 3033 if (chunk->sctp_hdr->vtag != 0) 3034 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 3035 3036 return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands); 3037 } 3038 3039 /* 3040 * sctp_sf_do_ecn_cwr 3041 * 3042 * Section: Appendix A: Explicit Congestion Notification 3043 * 3044 * CWR: 3045 * 3046 * RFC 2481 details a specific bit for a sender to send in the header of 3047 * its next outbound TCP segment to indicate to its peer that it has 3048 * reduced its congestion window. This is termed the CWR bit. For 3049 * SCTP the same indication is made by including the CWR chunk. 3050 * This chunk contains one data element, i.e. the TSN number that 3051 * was sent in the ECNE chunk. This element represents the lowest 3052 * TSN number in the datagram that was originally marked with the 3053 * CE bit. 3054 * 3055 * Verification Tag: 8.5 Verification Tag [Normal verification] 3056 * Inputs 3057 * (endpoint, asoc, chunk) 3058 * 3059 * Outputs 3060 * (asoc, reply_msg, msg_up, timers, counters) 3061 * 3062 * The return value is the disposition of the chunk. 3063 */ 3064 enum sctp_disposition sctp_sf_do_ecn_cwr(struct net *net, 3065 const struct sctp_endpoint *ep, 3066 const struct sctp_association *asoc, 3067 const union sctp_subtype type, 3068 void *arg, 3069 struct sctp_cmd_seq *commands) 3070 { 3071 struct sctp_chunk *chunk = arg; 3072 struct sctp_cwrhdr *cwr; 3073 u32 lowest_tsn; 3074 3075 if (!sctp_vtag_verify(chunk, asoc)) 3076 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3077 3078 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk))) 3079 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3080 commands); 3081 3082 cwr = (struct sctp_cwrhdr *)chunk->skb->data; 3083 skb_pull(chunk->skb, sizeof(*cwr)); 3084 3085 lowest_tsn = ntohl(cwr->lowest_tsn); 3086 3087 /* Does this CWR ack the last sent congestion notification? */ 3088 if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) { 3089 /* Stop sending ECNE. */ 3090 sctp_add_cmd_sf(commands, 3091 SCTP_CMD_ECN_CWR, 3092 SCTP_U32(lowest_tsn)); 3093 } 3094 return SCTP_DISPOSITION_CONSUME; 3095 } 3096 3097 /* 3098 * sctp_sf_do_ecne 3099 * 3100 * Section: Appendix A: Explicit Congestion Notification 3101 * 3102 * ECN-Echo 3103 * 3104 * RFC 2481 details a specific bit for a receiver to send back in its 3105 * TCP acknowledgements to notify the sender of the Congestion 3106 * Experienced (CE) bit having arrived from the network. For SCTP this 3107 * same indication is made by including the ECNE chunk. This chunk 3108 * contains one data element, i.e. the lowest TSN associated with the IP 3109 * datagram marked with the CE bit..... 3110 * 3111 * Verification Tag: 8.5 Verification Tag [Normal verification] 3112 * Inputs 3113 * (endpoint, asoc, chunk) 3114 * 3115 * Outputs 3116 * (asoc, reply_msg, msg_up, timers, counters) 3117 * 3118 * The return value is the disposition of the chunk. 3119 */ 3120 enum sctp_disposition sctp_sf_do_ecne(struct net *net, 3121 const struct sctp_endpoint *ep, 3122 const struct sctp_association *asoc, 3123 const union sctp_subtype type, 3124 void *arg, struct sctp_cmd_seq *commands) 3125 { 3126 struct sctp_chunk *chunk = arg; 3127 struct sctp_ecnehdr *ecne; 3128 3129 if (!sctp_vtag_verify(chunk, asoc)) 3130 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3131 3132 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk))) 3133 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3134 commands); 3135 3136 ecne = (struct sctp_ecnehdr *)chunk->skb->data; 3137 skb_pull(chunk->skb, sizeof(*ecne)); 3138 3139 /* If this is a newer ECNE than the last CWR packet we sent out */ 3140 sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE, 3141 SCTP_U32(ntohl(ecne->lowest_tsn))); 3142 3143 return SCTP_DISPOSITION_CONSUME; 3144 } 3145 3146 /* 3147 * Section: 6.2 Acknowledgement on Reception of DATA Chunks 3148 * 3149 * The SCTP endpoint MUST always acknowledge the reception of each valid 3150 * DATA chunk. 3151 * 3152 * The guidelines on delayed acknowledgement algorithm specified in 3153 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an 3154 * acknowledgement SHOULD be generated for at least every second packet 3155 * (not every second DATA chunk) received, and SHOULD be generated within 3156 * 200 ms of the arrival of any unacknowledged DATA chunk. In some 3157 * situations it may be beneficial for an SCTP transmitter to be more 3158 * conservative than the algorithms detailed in this document allow. 3159 * However, an SCTP transmitter MUST NOT be more aggressive than the 3160 * following algorithms allow. 3161 * 3162 * A SCTP receiver MUST NOT generate more than one SACK for every 3163 * incoming packet, other than to update the offered window as the 3164 * receiving application consumes new data. 3165 * 3166 * Verification Tag: 8.5 Verification Tag [Normal verification] 3167 * 3168 * Inputs 3169 * (endpoint, asoc, chunk) 3170 * 3171 * Outputs 3172 * (asoc, reply_msg, msg_up, timers, counters) 3173 * 3174 * The return value is the disposition of the chunk. 3175 */ 3176 enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net, 3177 const struct sctp_endpoint *ep, 3178 const struct sctp_association *asoc, 3179 const union sctp_subtype type, 3180 void *arg, 3181 struct sctp_cmd_seq *commands) 3182 { 3183 union sctp_arg force = SCTP_NOFORCE(); 3184 struct sctp_chunk *chunk = arg; 3185 int error; 3186 3187 if (!sctp_vtag_verify(chunk, asoc)) { 3188 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3189 SCTP_NULL()); 3190 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3191 } 3192 3193 if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream))) 3194 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3195 commands); 3196 3197 error = sctp_eat_data(asoc, chunk, commands); 3198 switch (error) { 3199 case SCTP_IERROR_NO_ERROR: 3200 break; 3201 case SCTP_IERROR_HIGH_TSN: 3202 case SCTP_IERROR_BAD_STREAM: 3203 SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS); 3204 goto discard_noforce; 3205 case SCTP_IERROR_DUP_TSN: 3206 case SCTP_IERROR_IGNORE_TSN: 3207 SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS); 3208 goto discard_force; 3209 case SCTP_IERROR_NO_DATA: 3210 return SCTP_DISPOSITION_ABORT; 3211 case SCTP_IERROR_PROTO_VIOLATION: 3212 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, 3213 (u8 *)chunk->subh.data_hdr, 3214 sctp_datahdr_len(&asoc->stream)); 3215 default: 3216 BUG(); 3217 } 3218 3219 if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) 3220 force = SCTP_FORCE(); 3221 3222 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { 3223 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 3224 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 3225 } 3226 3227 /* If this is the last chunk in a packet, we need to count it 3228 * toward sack generation. Note that we need to SACK every 3229 * OTHER packet containing data chunks, EVEN IF WE DISCARD 3230 * THEM. We elect to NOT generate SACK's if the chunk fails 3231 * the verification tag test. 3232 * 3233 * RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks 3234 * 3235 * The SCTP endpoint MUST always acknowledge the reception of 3236 * each valid DATA chunk. 3237 * 3238 * The guidelines on delayed acknowledgement algorithm 3239 * specified in Section 4.2 of [RFC2581] SHOULD be followed. 3240 * Specifically, an acknowledgement SHOULD be generated for at 3241 * least every second packet (not every second DATA chunk) 3242 * received, and SHOULD be generated within 200 ms of the 3243 * arrival of any unacknowledged DATA chunk. In some 3244 * situations it may be beneficial for an SCTP transmitter to 3245 * be more conservative than the algorithms detailed in this 3246 * document allow. However, an SCTP transmitter MUST NOT be 3247 * more aggressive than the following algorithms allow. 3248 */ 3249 if (chunk->end_of_packet) 3250 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force); 3251 3252 return SCTP_DISPOSITION_CONSUME; 3253 3254 discard_force: 3255 /* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks 3256 * 3257 * When a packet arrives with duplicate DATA chunk(s) and with 3258 * no new DATA chunk(s), the endpoint MUST immediately send a 3259 * SACK with no delay. If a packet arrives with duplicate 3260 * DATA chunk(s) bundled with new DATA chunks, the endpoint 3261 * MAY immediately send a SACK. Normally receipt of duplicate 3262 * DATA chunks will occur when the original SACK chunk was lost 3263 * and the peer's RTO has expired. The duplicate TSN number(s) 3264 * SHOULD be reported in the SACK as duplicate. 3265 */ 3266 /* In our case, we split the MAY SACK advice up whether or not 3267 * the last chunk is a duplicate.' 3268 */ 3269 if (chunk->end_of_packet) 3270 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); 3271 return SCTP_DISPOSITION_DISCARD; 3272 3273 discard_noforce: 3274 if (chunk->end_of_packet) 3275 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force); 3276 3277 return SCTP_DISPOSITION_DISCARD; 3278 } 3279 3280 /* 3281 * sctp_sf_eat_data_fast_4_4 3282 * 3283 * Section: 4 (4) 3284 * (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received 3285 * DATA chunks without delay. 3286 * 3287 * Verification Tag: 8.5 Verification Tag [Normal verification] 3288 * Inputs 3289 * (endpoint, asoc, chunk) 3290 * 3291 * Outputs 3292 * (asoc, reply_msg, msg_up, timers, counters) 3293 * 3294 * The return value is the disposition of the chunk. 3295 */ 3296 enum sctp_disposition sctp_sf_eat_data_fast_4_4( 3297 struct net *net, 3298 const struct sctp_endpoint *ep, 3299 const struct sctp_association *asoc, 3300 const union sctp_subtype type, 3301 void *arg, 3302 struct sctp_cmd_seq *commands) 3303 { 3304 struct sctp_chunk *chunk = arg; 3305 int error; 3306 3307 if (!sctp_vtag_verify(chunk, asoc)) { 3308 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3309 SCTP_NULL()); 3310 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3311 } 3312 3313 if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream))) 3314 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3315 commands); 3316 3317 error = sctp_eat_data(asoc, chunk, commands); 3318 switch (error) { 3319 case SCTP_IERROR_NO_ERROR: 3320 case SCTP_IERROR_HIGH_TSN: 3321 case SCTP_IERROR_DUP_TSN: 3322 case SCTP_IERROR_IGNORE_TSN: 3323 case SCTP_IERROR_BAD_STREAM: 3324 break; 3325 case SCTP_IERROR_NO_DATA: 3326 return SCTP_DISPOSITION_ABORT; 3327 case SCTP_IERROR_PROTO_VIOLATION: 3328 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, 3329 (u8 *)chunk->subh.data_hdr, 3330 sctp_datahdr_len(&asoc->stream)); 3331 default: 3332 BUG(); 3333 } 3334 3335 /* Go a head and force a SACK, since we are shutting down. */ 3336 3337 /* Implementor's Guide. 3338 * 3339 * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately 3340 * respond to each received packet containing one or more DATA chunk(s) 3341 * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer 3342 */ 3343 if (chunk->end_of_packet) { 3344 /* We must delay the chunk creation since the cumulative 3345 * TSN has not been updated yet. 3346 */ 3347 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); 3348 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); 3349 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 3350 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 3351 } 3352 3353 return SCTP_DISPOSITION_CONSUME; 3354 } 3355 3356 /* 3357 * Section: 6.2 Processing a Received SACK 3358 * D) Any time a SACK arrives, the endpoint performs the following: 3359 * 3360 * i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point, 3361 * then drop the SACK. Since Cumulative TSN Ack is monotonically 3362 * increasing, a SACK whose Cumulative TSN Ack is less than the 3363 * Cumulative TSN Ack Point indicates an out-of-order SACK. 3364 * 3365 * ii) Set rwnd equal to the newly received a_rwnd minus the number 3366 * of bytes still outstanding after processing the Cumulative TSN Ack 3367 * and the Gap Ack Blocks. 3368 * 3369 * iii) If the SACK is missing a TSN that was previously 3370 * acknowledged via a Gap Ack Block (e.g., the data receiver 3371 * reneged on the data), then mark the corresponding DATA chunk 3372 * as available for retransmit: Mark it as missing for fast 3373 * retransmit as described in Section 7.2.4 and if no retransmit 3374 * timer is running for the destination address to which the DATA 3375 * chunk was originally transmitted, then T3-rtx is started for 3376 * that destination address. 3377 * 3378 * Verification Tag: 8.5 Verification Tag [Normal verification] 3379 * 3380 * Inputs 3381 * (endpoint, asoc, chunk) 3382 * 3383 * Outputs 3384 * (asoc, reply_msg, msg_up, timers, counters) 3385 * 3386 * The return value is the disposition of the chunk. 3387 */ 3388 enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net, 3389 const struct sctp_endpoint *ep, 3390 const struct sctp_association *asoc, 3391 const union sctp_subtype type, 3392 void *arg, 3393 struct sctp_cmd_seq *commands) 3394 { 3395 struct sctp_chunk *chunk = arg; 3396 struct sctp_sackhdr *sackh; 3397 __u32 ctsn; 3398 3399 if (!sctp_vtag_verify(chunk, asoc)) 3400 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3401 3402 /* Make sure that the SACK chunk has a valid length. */ 3403 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_sack_chunk))) 3404 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3405 commands); 3406 3407 /* Pull the SACK chunk from the data buffer */ 3408 sackh = sctp_sm_pull_sack(chunk); 3409 /* Was this a bogus SACK? */ 3410 if (!sackh) 3411 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3412 chunk->subh.sack_hdr = sackh; 3413 ctsn = ntohl(sackh->cum_tsn_ack); 3414 3415 /* If Cumulative TSN Ack beyond the max tsn currently 3416 * send, terminating the association and respond to the 3417 * sender with an ABORT. 3418 */ 3419 if (TSN_lte(asoc->next_tsn, ctsn)) 3420 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); 3421 3422 trace_sctp_probe(ep, asoc, chunk); 3423 3424 /* i) If Cumulative TSN Ack is less than the Cumulative TSN 3425 * Ack Point, then drop the SACK. Since Cumulative TSN 3426 * Ack is monotonically increasing, a SACK whose 3427 * Cumulative TSN Ack is less than the Cumulative TSN Ack 3428 * Point indicates an out-of-order SACK. 3429 */ 3430 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { 3431 pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, 3432 asoc->ctsn_ack_point); 3433 3434 return SCTP_DISPOSITION_DISCARD; 3435 } 3436 3437 /* Return this SACK for further processing. */ 3438 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk)); 3439 3440 /* Note: We do the rest of the work on the PROCESS_SACK 3441 * sideeffect. 3442 */ 3443 return SCTP_DISPOSITION_CONSUME; 3444 } 3445 3446 /* 3447 * Generate an ABORT in response to a packet. 3448 * 3449 * Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41 3450 * 3451 * 8) The receiver should respond to the sender of the OOTB packet with 3452 * an ABORT. When sending the ABORT, the receiver of the OOTB packet 3453 * MUST fill in the Verification Tag field of the outbound packet 3454 * with the value found in the Verification Tag field of the OOTB 3455 * packet and set the T-bit in the Chunk Flags to indicate that the 3456 * Verification Tag is reflected. After sending this ABORT, the 3457 * receiver of the OOTB packet shall discard the OOTB packet and take 3458 * no further action. 3459 * 3460 * Verification Tag: 3461 * 3462 * The return value is the disposition of the chunk. 3463 */ 3464 static enum sctp_disposition sctp_sf_tabort_8_4_8( 3465 struct net *net, 3466 const struct sctp_endpoint *ep, 3467 const struct sctp_association *asoc, 3468 const union sctp_subtype type, 3469 void *arg, 3470 struct sctp_cmd_seq *commands) 3471 { 3472 struct sctp_packet *packet = NULL; 3473 struct sctp_chunk *chunk = arg; 3474 struct sctp_chunk *abort; 3475 3476 packet = sctp_ootb_pkt_new(net, asoc, chunk); 3477 if (!packet) 3478 return SCTP_DISPOSITION_NOMEM; 3479 3480 /* Make an ABORT. The T bit will be set if the asoc 3481 * is NULL. 3482 */ 3483 abort = sctp_make_abort(asoc, chunk, 0); 3484 if (!abort) { 3485 sctp_ootb_pkt_free(packet); 3486 return SCTP_DISPOSITION_NOMEM; 3487 } 3488 3489 /* Reflect vtag if T-Bit is set */ 3490 if (sctp_test_T_bit(abort)) 3491 packet->vtag = ntohl(chunk->sctp_hdr->vtag); 3492 3493 /* Set the skb to the belonging sock for accounting. */ 3494 abort->skb->sk = ep->base.sk; 3495 3496 sctp_packet_append_chunk(packet, abort); 3497 3498 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); 3499 3500 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 3501 3502 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3503 return SCTP_DISPOSITION_CONSUME; 3504 } 3505 3506 /* Handling of SCTP Packets Containing an INIT Chunk Matching an 3507 * Existing Associations when the UDP encap port is incorrect. 3508 * 3509 * From Section 4 at draft-tuexen-tsvwg-sctp-udp-encaps-cons-03. 3510 */ 3511 static enum sctp_disposition sctp_sf_new_encap_port( 3512 struct net *net, 3513 const struct sctp_endpoint *ep, 3514 const struct sctp_association *asoc, 3515 const union sctp_subtype type, 3516 void *arg, 3517 struct sctp_cmd_seq *commands) 3518 { 3519 struct sctp_packet *packet = NULL; 3520 struct sctp_chunk *chunk = arg; 3521 struct sctp_chunk *abort; 3522 3523 packet = sctp_ootb_pkt_new(net, asoc, chunk); 3524 if (!packet) 3525 return SCTP_DISPOSITION_NOMEM; 3526 3527 abort = sctp_make_new_encap_port(asoc, chunk); 3528 if (!abort) { 3529 sctp_ootb_pkt_free(packet); 3530 return SCTP_DISPOSITION_NOMEM; 3531 } 3532 3533 abort->skb->sk = ep->base.sk; 3534 3535 sctp_packet_append_chunk(packet, abort); 3536 3537 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 3538 SCTP_PACKET(packet)); 3539 3540 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 3541 3542 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3543 return SCTP_DISPOSITION_CONSUME; 3544 } 3545 3546 /* 3547 * Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR 3548 * event as ULP notification for each cause included in the chunk. 3549 * 3550 * API 5.3.1.3 - SCTP_REMOTE_ERROR 3551 * 3552 * The return value is the disposition of the chunk. 3553 */ 3554 enum sctp_disposition sctp_sf_operr_notify(struct net *net, 3555 const struct sctp_endpoint *ep, 3556 const struct sctp_association *asoc, 3557 const union sctp_subtype type, 3558 void *arg, 3559 struct sctp_cmd_seq *commands) 3560 { 3561 struct sctp_chunk *chunk = arg; 3562 struct sctp_errhdr *err; 3563 3564 if (!sctp_vtag_verify(chunk, asoc)) 3565 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3566 3567 /* Make sure that the ERROR chunk has a valid length. */ 3568 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk))) 3569 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3570 commands); 3571 sctp_walk_errors(err, chunk->chunk_hdr); 3572 if ((void *)err != (void *)chunk->chunk_end) 3573 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, 3574 (void *)err, commands); 3575 3576 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 3577 SCTP_CHUNK(chunk)); 3578 3579 return SCTP_DISPOSITION_CONSUME; 3580 } 3581 3582 /* 3583 * Process an inbound SHUTDOWN ACK. 3584 * 3585 * From Section 9.2: 3586 * Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall 3587 * stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its 3588 * peer, and remove all record of the association. 3589 * 3590 * The return value is the disposition. 3591 */ 3592 enum sctp_disposition sctp_sf_do_9_2_final(struct net *net, 3593 const struct sctp_endpoint *ep, 3594 const struct sctp_association *asoc, 3595 const union sctp_subtype type, 3596 void *arg, 3597 struct sctp_cmd_seq *commands) 3598 { 3599 struct sctp_chunk *chunk = arg; 3600 struct sctp_chunk *reply; 3601 struct sctp_ulpevent *ev; 3602 3603 if (!sctp_vtag_verify(chunk, asoc)) 3604 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3605 3606 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ 3607 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 3608 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3609 commands); 3610 /* 10.2 H) SHUTDOWN COMPLETE notification 3611 * 3612 * When SCTP completes the shutdown procedures (section 9.2) this 3613 * notification is passed to the upper layer. 3614 */ 3615 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, 3616 0, 0, 0, NULL, GFP_ATOMIC); 3617 if (!ev) 3618 goto nomem; 3619 3620 /* ...send a SHUTDOWN COMPLETE chunk to its peer, */ 3621 reply = sctp_make_shutdown_complete(asoc, chunk); 3622 if (!reply) 3623 goto nomem_chunk; 3624 3625 /* Do all the commands now (after allocation), so that we 3626 * have consistent state if memory allocation fails 3627 */ 3628 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 3629 3630 /* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall 3631 * stop the T2-shutdown timer, 3632 */ 3633 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3634 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 3635 3636 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3637 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 3638 3639 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 3640 SCTP_STATE(SCTP_STATE_CLOSED)); 3641 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); 3642 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 3643 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 3644 3645 /* ...and remove all record of the association. */ 3646 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 3647 return SCTP_DISPOSITION_DELETE_TCB; 3648 3649 nomem_chunk: 3650 sctp_ulpevent_free(ev); 3651 nomem: 3652 return SCTP_DISPOSITION_NOMEM; 3653 } 3654 3655 /* 3656 * RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41. 3657 * 3658 * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should 3659 * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. 3660 * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB 3661 * packet must fill in the Verification Tag field of the outbound 3662 * packet with the Verification Tag received in the SHUTDOWN ACK and 3663 * set the T-bit in the Chunk Flags to indicate that the Verification 3664 * Tag is reflected. 3665 * 3666 * 8) The receiver should respond to the sender of the OOTB packet with 3667 * an ABORT. When sending the ABORT, the receiver of the OOTB packet 3668 * MUST fill in the Verification Tag field of the outbound packet 3669 * with the value found in the Verification Tag field of the OOTB 3670 * packet and set the T-bit in the Chunk Flags to indicate that the 3671 * Verification Tag is reflected. After sending this ABORT, the 3672 * receiver of the OOTB packet shall discard the OOTB packet and take 3673 * no further action. 3674 */ 3675 enum sctp_disposition sctp_sf_ootb(struct net *net, 3676 const struct sctp_endpoint *ep, 3677 const struct sctp_association *asoc, 3678 const union sctp_subtype type, 3679 void *arg, struct sctp_cmd_seq *commands) 3680 { 3681 struct sctp_chunk *chunk = arg; 3682 struct sk_buff *skb = chunk->skb; 3683 struct sctp_chunkhdr *ch; 3684 struct sctp_errhdr *err; 3685 int ootb_cookie_ack = 0; 3686 int ootb_shut_ack = 0; 3687 __u8 *ch_end; 3688 3689 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); 3690 3691 ch = (struct sctp_chunkhdr *)chunk->chunk_hdr; 3692 do { 3693 /* Report violation if the chunk is less then minimal */ 3694 if (ntohs(ch->length) < sizeof(*ch)) 3695 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3696 commands); 3697 3698 /* Report violation if chunk len overflows */ 3699 ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length)); 3700 if (ch_end > skb_tail_pointer(skb)) 3701 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3702 commands); 3703 3704 /* Now that we know we at least have a chunk header, 3705 * do things that are type appropriate. 3706 */ 3707 if (SCTP_CID_SHUTDOWN_ACK == ch->type) 3708 ootb_shut_ack = 1; 3709 3710 /* RFC 2960, Section 3.3.7 3711 * Moreover, under any circumstances, an endpoint that 3712 * receives an ABORT MUST NOT respond to that ABORT by 3713 * sending an ABORT of its own. 3714 */ 3715 if (SCTP_CID_ABORT == ch->type) 3716 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3717 3718 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR 3719 * or a COOKIE ACK the SCTP Packet should be silently 3720 * discarded. 3721 */ 3722 3723 if (SCTP_CID_COOKIE_ACK == ch->type) 3724 ootb_cookie_ack = 1; 3725 3726 if (SCTP_CID_ERROR == ch->type) { 3727 sctp_walk_errors(err, ch) { 3728 if (SCTP_ERROR_STALE_COOKIE == err->cause) { 3729 ootb_cookie_ack = 1; 3730 break; 3731 } 3732 } 3733 } 3734 3735 ch = (struct sctp_chunkhdr *)ch_end; 3736 } while (ch_end < skb_tail_pointer(skb)); 3737 3738 if (ootb_shut_ack) 3739 return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands); 3740 else if (ootb_cookie_ack) 3741 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3742 else 3743 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); 3744 } 3745 3746 /* 3747 * Handle an "Out of the blue" SHUTDOWN ACK. 3748 * 3749 * Section: 8.4 5, sctpimpguide 2.41. 3750 * 3751 * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should 3752 * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. 3753 * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB 3754 * packet must fill in the Verification Tag field of the outbound 3755 * packet with the Verification Tag received in the SHUTDOWN ACK and 3756 * set the T-bit in the Chunk Flags to indicate that the Verification 3757 * Tag is reflected. 3758 * 3759 * Inputs 3760 * (endpoint, asoc, type, arg, commands) 3761 * 3762 * Outputs 3763 * (enum sctp_disposition) 3764 * 3765 * The return value is the disposition of the chunk. 3766 */ 3767 static enum sctp_disposition sctp_sf_shut_8_4_5( 3768 struct net *net, 3769 const struct sctp_endpoint *ep, 3770 const struct sctp_association *asoc, 3771 const union sctp_subtype type, 3772 void *arg, 3773 struct sctp_cmd_seq *commands) 3774 { 3775 struct sctp_packet *packet = NULL; 3776 struct sctp_chunk *chunk = arg; 3777 struct sctp_chunk *shut; 3778 3779 packet = sctp_ootb_pkt_new(net, asoc, chunk); 3780 if (!packet) 3781 return SCTP_DISPOSITION_NOMEM; 3782 3783 /* Make an SHUTDOWN_COMPLETE. 3784 * The T bit will be set if the asoc is NULL. 3785 */ 3786 shut = sctp_make_shutdown_complete(asoc, chunk); 3787 if (!shut) { 3788 sctp_ootb_pkt_free(packet); 3789 return SCTP_DISPOSITION_NOMEM; 3790 } 3791 3792 /* Reflect vtag if T-Bit is set */ 3793 if (sctp_test_T_bit(shut)) 3794 packet->vtag = ntohl(chunk->sctp_hdr->vtag); 3795 3796 /* Set the skb to the belonging sock for accounting. */ 3797 shut->skb->sk = ep->base.sk; 3798 3799 sctp_packet_append_chunk(packet, shut); 3800 3801 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 3802 SCTP_PACKET(packet)); 3803 3804 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 3805 3806 /* We need to discard the rest of the packet to prevent 3807 * potential boomming attacks from additional bundled chunks. 3808 * This is documented in SCTP Threats ID. 3809 */ 3810 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3811 } 3812 3813 /* 3814 * Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state. 3815 * 3816 * Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK 3817 * If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the 3818 * procedures in section 8.4 SHOULD be followed, in other words it 3819 * should be treated as an Out Of The Blue packet. 3820 * [This means that we do NOT check the Verification Tag on these 3821 * chunks. --piggy ] 3822 * 3823 */ 3824 enum sctp_disposition sctp_sf_do_8_5_1_E_sa(struct net *net, 3825 const struct sctp_endpoint *ep, 3826 const struct sctp_association *asoc, 3827 const union sctp_subtype type, 3828 void *arg, 3829 struct sctp_cmd_seq *commands) 3830 { 3831 struct sctp_chunk *chunk = arg; 3832 3833 if (!sctp_vtag_verify(chunk, asoc)) 3834 asoc = NULL; 3835 3836 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ 3837 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 3838 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3839 commands); 3840 3841 /* Although we do have an association in this case, it corresponds 3842 * to a restarted association. So the packet is treated as an OOTB 3843 * packet and the state function that handles OOTB SHUTDOWN_ACK is 3844 * called with a NULL association. 3845 */ 3846 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); 3847 3848 return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands); 3849 } 3850 3851 /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */ 3852 enum sctp_disposition sctp_sf_do_asconf(struct net *net, 3853 const struct sctp_endpoint *ep, 3854 const struct sctp_association *asoc, 3855 const union sctp_subtype type, 3856 void *arg, 3857 struct sctp_cmd_seq *commands) 3858 { 3859 struct sctp_paramhdr *err_param = NULL; 3860 struct sctp_chunk *asconf_ack = NULL; 3861 struct sctp_chunk *chunk = arg; 3862 struct sctp_addiphdr *hdr; 3863 __u32 serial; 3864 3865 if (!sctp_vtag_verify(chunk, asoc)) { 3866 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3867 SCTP_NULL()); 3868 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3869 } 3870 3871 /* Make sure that the ASCONF ADDIP chunk has a valid length. */ 3872 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk))) 3873 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3874 commands); 3875 3876 /* ADD-IP: Section 4.1.1 3877 * This chunk MUST be sent in an authenticated way by using 3878 * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk 3879 * is received unauthenticated it MUST be silently discarded as 3880 * described in [I-D.ietf-tsvwg-sctp-auth]. 3881 */ 3882 if (!asoc->peer.asconf_capable || 3883 (!net->sctp.addip_noauth && !chunk->auth)) 3884 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 3885 3886 hdr = (struct sctp_addiphdr *)chunk->skb->data; 3887 serial = ntohl(hdr->serial); 3888 3889 /* Verify the ASCONF chunk before processing it. */ 3890 if (!sctp_verify_asconf(asoc, chunk, true, &err_param)) 3891 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, 3892 (void *)err_param, commands); 3893 3894 /* ADDIP 5.2 E1) Compare the value of the serial number to the value 3895 * the endpoint stored in a new association variable 3896 * 'Peer-Serial-Number'. 3897 */ 3898 if (serial == asoc->peer.addip_serial + 1) { 3899 /* If this is the first instance of ASCONF in the packet, 3900 * we can clean our old ASCONF-ACKs. 3901 */ 3902 if (!chunk->has_asconf) 3903 sctp_assoc_clean_asconf_ack_cache(asoc); 3904 3905 /* ADDIP 5.2 E4) When the Sequence Number matches the next one 3906 * expected, process the ASCONF as described below and after 3907 * processing the ASCONF Chunk, append an ASCONF-ACK Chunk to 3908 * the response packet and cache a copy of it (in the event it 3909 * later needs to be retransmitted). 3910 * 3911 * Essentially, do V1-V5. 3912 */ 3913 asconf_ack = sctp_process_asconf((struct sctp_association *) 3914 asoc, chunk); 3915 if (!asconf_ack) 3916 return SCTP_DISPOSITION_NOMEM; 3917 } else if (serial < asoc->peer.addip_serial + 1) { 3918 /* ADDIP 5.2 E2) 3919 * If the value found in the Sequence Number is less than the 3920 * ('Peer- Sequence-Number' + 1), simply skip to the next 3921 * ASCONF, and include in the outbound response packet 3922 * any previously cached ASCONF-ACK response that was 3923 * sent and saved that matches the Sequence Number of the 3924 * ASCONF. Note: It is possible that no cached ASCONF-ACK 3925 * Chunk exists. This will occur when an older ASCONF 3926 * arrives out of order. In such a case, the receiver 3927 * should skip the ASCONF Chunk and not include ASCONF-ACK 3928 * Chunk for that chunk. 3929 */ 3930 asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); 3931 if (!asconf_ack) 3932 return SCTP_DISPOSITION_DISCARD; 3933 3934 /* Reset the transport so that we select the correct one 3935 * this time around. This is to make sure that we don't 3936 * accidentally use a stale transport that's been removed. 3937 */ 3938 asconf_ack->transport = NULL; 3939 } else { 3940 /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since 3941 * it must be either a stale packet or from an attacker. 3942 */ 3943 return SCTP_DISPOSITION_DISCARD; 3944 } 3945 3946 /* ADDIP 5.2 E6) The destination address of the SCTP packet 3947 * containing the ASCONF-ACK Chunks MUST be the source address of 3948 * the SCTP packet that held the ASCONF Chunks. 3949 * 3950 * To do this properly, we'll set the destination address of the chunk 3951 * and at the transmit time, will try look up the transport to use. 3952 * Since ASCONFs may be bundled, the correct transport may not be 3953 * created until we process the entire packet, thus this workaround. 3954 */ 3955 asconf_ack->dest = chunk->source; 3956 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); 3957 if (asoc->new_transport) { 3958 sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands); 3959 ((struct sctp_association *)asoc)->new_transport = NULL; 3960 } 3961 3962 return SCTP_DISPOSITION_CONSUME; 3963 } 3964 3965 static enum sctp_disposition sctp_send_next_asconf( 3966 struct net *net, 3967 const struct sctp_endpoint *ep, 3968 struct sctp_association *asoc, 3969 const union sctp_subtype type, 3970 struct sctp_cmd_seq *commands) 3971 { 3972 struct sctp_chunk *asconf; 3973 struct list_head *entry; 3974 3975 if (list_empty(&asoc->addip_chunk_list)) 3976 return SCTP_DISPOSITION_CONSUME; 3977 3978 entry = asoc->addip_chunk_list.next; 3979 asconf = list_entry(entry, struct sctp_chunk, list); 3980 3981 list_del_init(entry); 3982 sctp_chunk_hold(asconf); 3983 asoc->addip_last_asconf = asconf; 3984 3985 return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands); 3986 } 3987 3988 /* 3989 * ADDIP Section 4.3 General rules for address manipulation 3990 * When building TLV parameters for the ASCONF Chunk that will add or 3991 * delete IP addresses the D0 to D13 rules should be applied: 3992 */ 3993 enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net, 3994 const struct sctp_endpoint *ep, 3995 const struct sctp_association *asoc, 3996 const union sctp_subtype type, 3997 void *arg, 3998 struct sctp_cmd_seq *commands) 3999 { 4000 struct sctp_chunk *last_asconf = asoc->addip_last_asconf; 4001 struct sctp_paramhdr *err_param = NULL; 4002 struct sctp_chunk *asconf_ack = arg; 4003 struct sctp_addiphdr *addip_hdr; 4004 __u32 sent_serial, rcvd_serial; 4005 struct sctp_chunk *abort; 4006 4007 if (!sctp_vtag_verify(asconf_ack, asoc)) { 4008 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 4009 SCTP_NULL()); 4010 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4011 } 4012 4013 /* Make sure that the ADDIP chunk has a valid length. */ 4014 if (!sctp_chunk_length_valid(asconf_ack, 4015 sizeof(struct sctp_addip_chunk))) 4016 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4017 commands); 4018 4019 /* ADD-IP, Section 4.1.2: 4020 * This chunk MUST be sent in an authenticated way by using 4021 * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk 4022 * is received unauthenticated it MUST be silently discarded as 4023 * described in [I-D.ietf-tsvwg-sctp-auth]. 4024 */ 4025 if (!asoc->peer.asconf_capable || 4026 (!net->sctp.addip_noauth && !asconf_ack->auth)) 4027 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4028 4029 addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data; 4030 rcvd_serial = ntohl(addip_hdr->serial); 4031 4032 /* Verify the ASCONF-ACK chunk before processing it. */ 4033 if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param)) 4034 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, 4035 (void *)err_param, commands); 4036 4037 if (last_asconf) { 4038 addip_hdr = (struct sctp_addiphdr *)last_asconf->subh.addip_hdr; 4039 sent_serial = ntohl(addip_hdr->serial); 4040 } else { 4041 sent_serial = asoc->addip_serial - 1; 4042 } 4043 4044 /* D0) If an endpoint receives an ASCONF-ACK that is greater than or 4045 * equal to the next serial number to be used but no ASCONF chunk is 4046 * outstanding the endpoint MUST ABORT the association. Note that a 4047 * sequence number is greater than if it is no more than 2^^31-1 4048 * larger than the current sequence number (using serial arithmetic). 4049 */ 4050 if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) && 4051 !(asoc->addip_last_asconf)) { 4052 abort = sctp_make_abort(asoc, asconf_ack, 4053 sizeof(struct sctp_errhdr)); 4054 if (abort) { 4055 sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); 4056 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4057 SCTP_CHUNK(abort)); 4058 } 4059 /* We are going to ABORT, so we might as well stop 4060 * processing the rest of the chunks in the packet. 4061 */ 4062 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 4063 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 4064 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 4065 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4066 SCTP_ERROR(ECONNABORTED)); 4067 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4068 SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); 4069 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 4070 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 4071 return SCTP_DISPOSITION_ABORT; 4072 } 4073 4074 if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) { 4075 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 4076 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 4077 4078 if (!sctp_process_asconf_ack((struct sctp_association *)asoc, 4079 asconf_ack)) 4080 return sctp_send_next_asconf(net, ep, 4081 (struct sctp_association *)asoc, 4082 type, commands); 4083 4084 abort = sctp_make_abort(asoc, asconf_ack, 4085 sizeof(struct sctp_errhdr)); 4086 if (abort) { 4087 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); 4088 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4089 SCTP_CHUNK(abort)); 4090 } 4091 /* We are going to ABORT, so we might as well stop 4092 * processing the rest of the chunks in the packet. 4093 */ 4094 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 4095 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4096 SCTP_ERROR(ECONNABORTED)); 4097 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4098 SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); 4099 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 4100 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 4101 return SCTP_DISPOSITION_ABORT; 4102 } 4103 4104 return SCTP_DISPOSITION_DISCARD; 4105 } 4106 4107 /* RE-CONFIG Section 5.2 Upon reception of an RECONF Chunk. */ 4108 enum sctp_disposition sctp_sf_do_reconf(struct net *net, 4109 const struct sctp_endpoint *ep, 4110 const struct sctp_association *asoc, 4111 const union sctp_subtype type, 4112 void *arg, 4113 struct sctp_cmd_seq *commands) 4114 { 4115 struct sctp_paramhdr *err_param = NULL; 4116 struct sctp_chunk *chunk = arg; 4117 struct sctp_reconf_chunk *hdr; 4118 union sctp_params param; 4119 4120 if (!sctp_vtag_verify(chunk, asoc)) { 4121 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 4122 SCTP_NULL()); 4123 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4124 } 4125 4126 /* Make sure that the RECONF chunk has a valid length. */ 4127 if (!sctp_chunk_length_valid(chunk, sizeof(*hdr))) 4128 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4129 commands); 4130 4131 if (!sctp_verify_reconf(asoc, chunk, &err_param)) 4132 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, 4133 (void *)err_param, commands); 4134 4135 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; 4136 sctp_walk_params(param, hdr, params) { 4137 struct sctp_chunk *reply = NULL; 4138 struct sctp_ulpevent *ev = NULL; 4139 4140 if (param.p->type == SCTP_PARAM_RESET_OUT_REQUEST) 4141 reply = sctp_process_strreset_outreq( 4142 (struct sctp_association *)asoc, param, &ev); 4143 else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST) 4144 reply = sctp_process_strreset_inreq( 4145 (struct sctp_association *)asoc, param, &ev); 4146 else if (param.p->type == SCTP_PARAM_RESET_TSN_REQUEST) 4147 reply = sctp_process_strreset_tsnreq( 4148 (struct sctp_association *)asoc, param, &ev); 4149 else if (param.p->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) 4150 reply = sctp_process_strreset_addstrm_out( 4151 (struct sctp_association *)asoc, param, &ev); 4152 else if (param.p->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) 4153 reply = sctp_process_strreset_addstrm_in( 4154 (struct sctp_association *)asoc, param, &ev); 4155 else if (param.p->type == SCTP_PARAM_RESET_RESPONSE) 4156 reply = sctp_process_strreset_resp( 4157 (struct sctp_association *)asoc, param, &ev); 4158 4159 if (ev) 4160 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 4161 SCTP_ULPEVENT(ev)); 4162 4163 if (reply) 4164 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4165 SCTP_CHUNK(reply)); 4166 } 4167 4168 return SCTP_DISPOSITION_CONSUME; 4169 } 4170 4171 /* 4172 * PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP 4173 * 4174 * When a FORWARD TSN chunk arrives, the data receiver MUST first update 4175 * its cumulative TSN point to the value carried in the FORWARD TSN 4176 * chunk, and then MUST further advance its cumulative TSN point locally 4177 * if possible. 4178 * After the above processing, the data receiver MUST stop reporting any 4179 * missing TSNs earlier than or equal to the new cumulative TSN point. 4180 * 4181 * Verification Tag: 8.5 Verification Tag [Normal verification] 4182 * 4183 * The return value is the disposition of the chunk. 4184 */ 4185 enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net, 4186 const struct sctp_endpoint *ep, 4187 const struct sctp_association *asoc, 4188 const union sctp_subtype type, 4189 void *arg, 4190 struct sctp_cmd_seq *commands) 4191 { 4192 struct sctp_fwdtsn_hdr *fwdtsn_hdr; 4193 struct sctp_chunk *chunk = arg; 4194 __u16 len; 4195 __u32 tsn; 4196 4197 if (!sctp_vtag_verify(chunk, asoc)) { 4198 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 4199 SCTP_NULL()); 4200 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4201 } 4202 4203 if (!asoc->peer.prsctp_capable) 4204 return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands); 4205 4206 /* Make sure that the FORWARD_TSN chunk has valid length. */ 4207 if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream))) 4208 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4209 commands); 4210 4211 fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; 4212 chunk->subh.fwdtsn_hdr = fwdtsn_hdr; 4213 len = ntohs(chunk->chunk_hdr->length); 4214 len -= sizeof(struct sctp_chunkhdr); 4215 skb_pull(chunk->skb, len); 4216 4217 tsn = ntohl(fwdtsn_hdr->new_cum_tsn); 4218 pr_debug("%s: TSN 0x%x\n", __func__, tsn); 4219 4220 /* The TSN is too high--silently discard the chunk and count on it 4221 * getting retransmitted later. 4222 */ 4223 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) 4224 goto discard_noforce; 4225 4226 if (!asoc->stream.si->validate_ftsn(chunk)) 4227 goto discard_noforce; 4228 4229 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); 4230 if (len > sctp_ftsnhdr_len(&asoc->stream)) 4231 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, 4232 SCTP_CHUNK(chunk)); 4233 4234 /* Count this as receiving DATA. */ 4235 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { 4236 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 4237 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 4238 } 4239 4240 /* FIXME: For now send a SACK, but DATA processing may 4241 * send another. 4242 */ 4243 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); 4244 4245 return SCTP_DISPOSITION_CONSUME; 4246 4247 discard_noforce: 4248 return SCTP_DISPOSITION_DISCARD; 4249 } 4250 4251 enum sctp_disposition sctp_sf_eat_fwd_tsn_fast( 4252 struct net *net, 4253 const struct sctp_endpoint *ep, 4254 const struct sctp_association *asoc, 4255 const union sctp_subtype type, 4256 void *arg, 4257 struct sctp_cmd_seq *commands) 4258 { 4259 struct sctp_fwdtsn_hdr *fwdtsn_hdr; 4260 struct sctp_chunk *chunk = arg; 4261 __u16 len; 4262 __u32 tsn; 4263 4264 if (!sctp_vtag_verify(chunk, asoc)) { 4265 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 4266 SCTP_NULL()); 4267 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4268 } 4269 4270 if (!asoc->peer.prsctp_capable) 4271 return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands); 4272 4273 /* Make sure that the FORWARD_TSN chunk has a valid length. */ 4274 if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream))) 4275 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4276 commands); 4277 4278 fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; 4279 chunk->subh.fwdtsn_hdr = fwdtsn_hdr; 4280 len = ntohs(chunk->chunk_hdr->length); 4281 len -= sizeof(struct sctp_chunkhdr); 4282 skb_pull(chunk->skb, len); 4283 4284 tsn = ntohl(fwdtsn_hdr->new_cum_tsn); 4285 pr_debug("%s: TSN 0x%x\n", __func__, tsn); 4286 4287 /* The TSN is too high--silently discard the chunk and count on it 4288 * getting retransmitted later. 4289 */ 4290 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) 4291 goto gen_shutdown; 4292 4293 if (!asoc->stream.si->validate_ftsn(chunk)) 4294 goto gen_shutdown; 4295 4296 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); 4297 if (len > sctp_ftsnhdr_len(&asoc->stream)) 4298 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, 4299 SCTP_CHUNK(chunk)); 4300 4301 /* Go a head and force a SACK, since we are shutting down. */ 4302 gen_shutdown: 4303 /* Implementor's Guide. 4304 * 4305 * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately 4306 * respond to each received packet containing one or more DATA chunk(s) 4307 * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer 4308 */ 4309 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); 4310 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); 4311 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 4312 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 4313 4314 return SCTP_DISPOSITION_CONSUME; 4315 } 4316 4317 /* 4318 * SCTP-AUTH Section 6.3 Receiving authenticated chunks 4319 * 4320 * The receiver MUST use the HMAC algorithm indicated in the HMAC 4321 * Identifier field. If this algorithm was not specified by the 4322 * receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk 4323 * during association setup, the AUTH chunk and all chunks after it MUST 4324 * be discarded and an ERROR chunk SHOULD be sent with the error cause 4325 * defined in Section 4.1. 4326 * 4327 * If an endpoint with no shared key receives a Shared Key Identifier 4328 * other than 0, it MUST silently discard all authenticated chunks. If 4329 * the endpoint has at least one endpoint pair shared key for the peer, 4330 * it MUST use the key specified by the Shared Key Identifier if a 4331 * key has been configured for that Shared Key Identifier. If no 4332 * endpoint pair shared key has been configured for that Shared Key 4333 * Identifier, all authenticated chunks MUST be silently discarded. 4334 * 4335 * Verification Tag: 8.5 Verification Tag [Normal verification] 4336 * 4337 * The return value is the disposition of the chunk. 4338 */ 4339 static enum sctp_ierror sctp_sf_authenticate( 4340 const struct sctp_association *asoc, 4341 struct sctp_chunk *chunk) 4342 { 4343 struct sctp_shared_key *sh_key = NULL; 4344 struct sctp_authhdr *auth_hdr; 4345 __u8 *save_digest, *digest; 4346 struct sctp_hmac *hmac; 4347 unsigned int sig_len; 4348 __u16 key_id; 4349 4350 /* Pull in the auth header, so we can do some more verification */ 4351 auth_hdr = (struct sctp_authhdr *)chunk->skb->data; 4352 chunk->subh.auth_hdr = auth_hdr; 4353 skb_pull(chunk->skb, sizeof(*auth_hdr)); 4354 4355 /* Make sure that we support the HMAC algorithm from the auth 4356 * chunk. 4357 */ 4358 if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id)) 4359 return SCTP_IERROR_AUTH_BAD_HMAC; 4360 4361 /* Make sure that the provided shared key identifier has been 4362 * configured 4363 */ 4364 key_id = ntohs(auth_hdr->shkey_id); 4365 if (key_id != asoc->active_key_id) { 4366 sh_key = sctp_auth_get_shkey(asoc, key_id); 4367 if (!sh_key) 4368 return SCTP_IERROR_AUTH_BAD_KEYID; 4369 } 4370 4371 /* Make sure that the length of the signature matches what 4372 * we expect. 4373 */ 4374 sig_len = ntohs(chunk->chunk_hdr->length) - 4375 sizeof(struct sctp_auth_chunk); 4376 hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id)); 4377 if (sig_len != hmac->hmac_len) 4378 return SCTP_IERROR_PROTO_VIOLATION; 4379 4380 /* Now that we've done validation checks, we can compute and 4381 * verify the hmac. The steps involved are: 4382 * 1. Save the digest from the chunk. 4383 * 2. Zero out the digest in the chunk. 4384 * 3. Compute the new digest 4385 * 4. Compare saved and new digests. 4386 */ 4387 digest = auth_hdr->hmac; 4388 skb_pull(chunk->skb, sig_len); 4389 4390 save_digest = kmemdup(digest, sig_len, GFP_ATOMIC); 4391 if (!save_digest) 4392 goto nomem; 4393 4394 memset(digest, 0, sig_len); 4395 4396 sctp_auth_calculate_hmac(asoc, chunk->skb, 4397 (struct sctp_auth_chunk *)chunk->chunk_hdr, 4398 sh_key, GFP_ATOMIC); 4399 4400 /* Discard the packet if the digests do not match */ 4401 if (memcmp(save_digest, digest, sig_len)) { 4402 kfree(save_digest); 4403 return SCTP_IERROR_BAD_SIG; 4404 } 4405 4406 kfree(save_digest); 4407 chunk->auth = 1; 4408 4409 return SCTP_IERROR_NO_ERROR; 4410 nomem: 4411 return SCTP_IERROR_NOMEM; 4412 } 4413 4414 enum sctp_disposition sctp_sf_eat_auth(struct net *net, 4415 const struct sctp_endpoint *ep, 4416 const struct sctp_association *asoc, 4417 const union sctp_subtype type, 4418 void *arg, struct sctp_cmd_seq *commands) 4419 { 4420 struct sctp_chunk *chunk = arg; 4421 struct sctp_authhdr *auth_hdr; 4422 struct sctp_chunk *err_chunk; 4423 enum sctp_ierror error; 4424 4425 /* Make sure that the peer has AUTH capable */ 4426 if (!asoc->peer.auth_capable) 4427 return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands); 4428 4429 if (!sctp_vtag_verify(chunk, asoc)) { 4430 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 4431 SCTP_NULL()); 4432 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4433 } 4434 4435 /* Make sure that the AUTH chunk has valid length. */ 4436 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk))) 4437 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4438 commands); 4439 4440 auth_hdr = (struct sctp_authhdr *)chunk->skb->data; 4441 error = sctp_sf_authenticate(asoc, chunk); 4442 switch (error) { 4443 case SCTP_IERROR_AUTH_BAD_HMAC: 4444 /* Generate the ERROR chunk and discard the rest 4445 * of the packet 4446 */ 4447 err_chunk = sctp_make_op_error(asoc, chunk, 4448 SCTP_ERROR_UNSUP_HMAC, 4449 &auth_hdr->hmac_id, 4450 sizeof(__u16), 0); 4451 if (err_chunk) { 4452 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4453 SCTP_CHUNK(err_chunk)); 4454 } 4455 fallthrough; 4456 case SCTP_IERROR_AUTH_BAD_KEYID: 4457 case SCTP_IERROR_BAD_SIG: 4458 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4459 4460 case SCTP_IERROR_PROTO_VIOLATION: 4461 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4462 commands); 4463 4464 case SCTP_IERROR_NOMEM: 4465 return SCTP_DISPOSITION_NOMEM; 4466 4467 default: /* Prevent gcc warnings */ 4468 break; 4469 } 4470 4471 if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) { 4472 struct sctp_ulpevent *ev; 4473 4474 ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id), 4475 SCTP_AUTH_NEW_KEY, GFP_ATOMIC); 4476 4477 if (!ev) 4478 return -ENOMEM; 4479 4480 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 4481 SCTP_ULPEVENT(ev)); 4482 } 4483 4484 return SCTP_DISPOSITION_CONSUME; 4485 } 4486 4487 /* 4488 * Process an unknown chunk. 4489 * 4490 * Section: 3.2. Also, 2.1 in the implementor's guide. 4491 * 4492 * Chunk Types are encoded such that the highest-order two bits specify 4493 * the action that must be taken if the processing endpoint does not 4494 * recognize the Chunk Type. 4495 * 4496 * 00 - Stop processing this SCTP packet and discard it, do not process 4497 * any further chunks within it. 4498 * 4499 * 01 - Stop processing this SCTP packet and discard it, do not process 4500 * any further chunks within it, and report the unrecognized 4501 * chunk in an 'Unrecognized Chunk Type'. 4502 * 4503 * 10 - Skip this chunk and continue processing. 4504 * 4505 * 11 - Skip this chunk and continue processing, but report in an ERROR 4506 * Chunk using the 'Unrecognized Chunk Type' cause of error. 4507 * 4508 * The return value is the disposition of the chunk. 4509 */ 4510 enum sctp_disposition sctp_sf_unk_chunk(struct net *net, 4511 const struct sctp_endpoint *ep, 4512 const struct sctp_association *asoc, 4513 const union sctp_subtype type, 4514 void *arg, 4515 struct sctp_cmd_seq *commands) 4516 { 4517 struct sctp_chunk *unk_chunk = arg; 4518 struct sctp_chunk *err_chunk; 4519 struct sctp_chunkhdr *hdr; 4520 4521 pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk); 4522 4523 if (!sctp_vtag_verify(unk_chunk, asoc)) 4524 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4525 4526 /* Make sure that the chunk has a valid length. 4527 * Since we don't know the chunk type, we use a general 4528 * chunkhdr structure to make a comparison. 4529 */ 4530 if (!sctp_chunk_length_valid(unk_chunk, sizeof(*hdr))) 4531 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4532 commands); 4533 4534 switch (type.chunk & SCTP_CID_ACTION_MASK) { 4535 case SCTP_CID_ACTION_DISCARD: 4536 /* Discard the packet. */ 4537 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4538 case SCTP_CID_ACTION_DISCARD_ERR: 4539 /* Generate an ERROR chunk as response. */ 4540 hdr = unk_chunk->chunk_hdr; 4541 err_chunk = sctp_make_op_error(asoc, unk_chunk, 4542 SCTP_ERROR_UNKNOWN_CHUNK, hdr, 4543 SCTP_PAD4(ntohs(hdr->length)), 4544 0); 4545 if (err_chunk) { 4546 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4547 SCTP_CHUNK(err_chunk)); 4548 } 4549 4550 /* Discard the packet. */ 4551 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4552 return SCTP_DISPOSITION_CONSUME; 4553 case SCTP_CID_ACTION_SKIP: 4554 /* Skip the chunk. */ 4555 return SCTP_DISPOSITION_DISCARD; 4556 case SCTP_CID_ACTION_SKIP_ERR: 4557 /* Generate an ERROR chunk as response. */ 4558 hdr = unk_chunk->chunk_hdr; 4559 err_chunk = sctp_make_op_error(asoc, unk_chunk, 4560 SCTP_ERROR_UNKNOWN_CHUNK, hdr, 4561 SCTP_PAD4(ntohs(hdr->length)), 4562 0); 4563 if (err_chunk) { 4564 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4565 SCTP_CHUNK(err_chunk)); 4566 } 4567 /* Skip the chunk. */ 4568 return SCTP_DISPOSITION_CONSUME; 4569 default: 4570 break; 4571 } 4572 4573 return SCTP_DISPOSITION_DISCARD; 4574 } 4575 4576 /* 4577 * Discard the chunk. 4578 * 4579 * Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2 4580 * [Too numerous to mention...] 4581 * Verification Tag: No verification needed. 4582 * Inputs 4583 * (endpoint, asoc, chunk) 4584 * 4585 * Outputs 4586 * (asoc, reply_msg, msg_up, timers, counters) 4587 * 4588 * The return value is the disposition of the chunk. 4589 */ 4590 enum sctp_disposition sctp_sf_discard_chunk(struct net *net, 4591 const struct sctp_endpoint *ep, 4592 const struct sctp_association *asoc, 4593 const union sctp_subtype type, 4594 void *arg, 4595 struct sctp_cmd_seq *commands) 4596 { 4597 struct sctp_chunk *chunk = arg; 4598 4599 if (asoc && !sctp_vtag_verify(chunk, asoc)) 4600 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4601 4602 /* Make sure that the chunk has a valid length. 4603 * Since we don't know the chunk type, we use a general 4604 * chunkhdr structure to make a comparison. 4605 */ 4606 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 4607 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4608 commands); 4609 4610 pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk); 4611 4612 return SCTP_DISPOSITION_DISCARD; 4613 } 4614 4615 /* 4616 * Discard the whole packet. 4617 * 4618 * Section: 8.4 2) 4619 * 4620 * 2) If the OOTB packet contains an ABORT chunk, the receiver MUST 4621 * silently discard the OOTB packet and take no further action. 4622 * 4623 * Verification Tag: No verification necessary 4624 * 4625 * Inputs 4626 * (endpoint, asoc, chunk) 4627 * 4628 * Outputs 4629 * (asoc, reply_msg, msg_up, timers, counters) 4630 * 4631 * The return value is the disposition of the chunk. 4632 */ 4633 enum sctp_disposition sctp_sf_pdiscard(struct net *net, 4634 const struct sctp_endpoint *ep, 4635 const struct sctp_association *asoc, 4636 const union sctp_subtype type, 4637 void *arg, struct sctp_cmd_seq *commands) 4638 { 4639 SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS); 4640 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 4641 4642 return SCTP_DISPOSITION_CONSUME; 4643 } 4644 4645 4646 /* 4647 * The other end is violating protocol. 4648 * 4649 * Section: Not specified 4650 * Verification Tag: Not specified 4651 * Inputs 4652 * (endpoint, asoc, chunk) 4653 * 4654 * Outputs 4655 * (asoc, reply_msg, msg_up, timers, counters) 4656 * 4657 * We simply tag the chunk as a violation. The state machine will log 4658 * the violation and continue. 4659 */ 4660 enum sctp_disposition sctp_sf_violation(struct net *net, 4661 const struct sctp_endpoint *ep, 4662 const struct sctp_association *asoc, 4663 const union sctp_subtype type, 4664 void *arg, 4665 struct sctp_cmd_seq *commands) 4666 { 4667 struct sctp_chunk *chunk = arg; 4668 4669 if (!sctp_vtag_verify(chunk, asoc)) 4670 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 4671 4672 /* Make sure that the chunk has a valid length. */ 4673 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) 4674 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 4675 commands); 4676 4677 return SCTP_DISPOSITION_VIOLATION; 4678 } 4679 4680 /* 4681 * Common function to handle a protocol violation. 4682 */ 4683 static enum sctp_disposition sctp_sf_abort_violation( 4684 struct net *net, 4685 const struct sctp_endpoint *ep, 4686 const struct sctp_association *asoc, 4687 void *arg, 4688 struct sctp_cmd_seq *commands, 4689 const __u8 *payload, 4690 const size_t paylen) 4691 { 4692 struct sctp_packet *packet = NULL; 4693 struct sctp_chunk *chunk = arg; 4694 struct sctp_chunk *abort = NULL; 4695 4696 /* SCTP-AUTH, Section 6.3: 4697 * It should be noted that if the receiver wants to tear 4698 * down an association in an authenticated way only, the 4699 * handling of malformed packets should not result in 4700 * tearing down the association. 4701 * 4702 * This means that if we only want to abort associations 4703 * in an authenticated way (i.e AUTH+ABORT), then we 4704 * can't destroy this association just because the packet 4705 * was malformed. 4706 */ 4707 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) 4708 goto discard; 4709 4710 /* Make the abort chunk. */ 4711 abort = sctp_make_abort_violation(asoc, chunk, payload, paylen); 4712 if (!abort) 4713 goto nomem; 4714 4715 if (asoc) { 4716 /* Treat INIT-ACK as a special case during COOKIE-WAIT. */ 4717 if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK && 4718 !asoc->peer.i.init_tag) { 4719 struct sctp_initack_chunk *initack; 4720 4721 initack = (struct sctp_initack_chunk *)chunk->chunk_hdr; 4722 if (!sctp_chunk_length_valid(chunk, sizeof(*initack))) 4723 abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T; 4724 else { 4725 unsigned int inittag; 4726 4727 inittag = ntohl(initack->init_hdr.init_tag); 4728 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG, 4729 SCTP_U32(inittag)); 4730 } 4731 } 4732 4733 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4734 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 4735 4736 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { 4737 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 4738 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 4739 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4740 SCTP_ERROR(ECONNREFUSED)); 4741 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4742 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 4743 } else { 4744 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4745 SCTP_ERROR(ECONNABORTED)); 4746 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4747 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 4748 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 4749 } 4750 } else { 4751 packet = sctp_ootb_pkt_new(net, asoc, chunk); 4752 4753 if (!packet) 4754 goto nomem_pkt; 4755 4756 if (sctp_test_T_bit(abort)) 4757 packet->vtag = ntohl(chunk->sctp_hdr->vtag); 4758 4759 abort->skb->sk = ep->base.sk; 4760 4761 sctp_packet_append_chunk(packet, abort); 4762 4763 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 4764 SCTP_PACKET(packet)); 4765 4766 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 4767 } 4768 4769 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 4770 4771 discard: 4772 sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands); 4773 return SCTP_DISPOSITION_ABORT; 4774 4775 nomem_pkt: 4776 sctp_chunk_free(abort); 4777 nomem: 4778 return SCTP_DISPOSITION_NOMEM; 4779 } 4780 4781 /* 4782 * Handle a protocol violation when the chunk length is invalid. 4783 * "Invalid" length is identified as smaller than the minimal length a 4784 * given chunk can be. For example, a SACK chunk has invalid length 4785 * if its length is set to be smaller than the size of struct sctp_sack_chunk. 4786 * 4787 * We inform the other end by sending an ABORT with a Protocol Violation 4788 * error code. 4789 * 4790 * Section: Not specified 4791 * Verification Tag: Nothing to do 4792 * Inputs 4793 * (endpoint, asoc, chunk) 4794 * 4795 * Outputs 4796 * (reply_msg, msg_up, counters) 4797 * 4798 * Generate an ABORT chunk and terminate the association. 4799 */ 4800 static enum sctp_disposition sctp_sf_violation_chunklen( 4801 struct net *net, 4802 const struct sctp_endpoint *ep, 4803 const struct sctp_association *asoc, 4804 const union sctp_subtype type, 4805 void *arg, 4806 struct sctp_cmd_seq *commands) 4807 { 4808 static const char err_str[] = "The following chunk had invalid length:"; 4809 4810 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, 4811 sizeof(err_str)); 4812 } 4813 4814 /* 4815 * Handle a protocol violation when the parameter length is invalid. 4816 * If the length is smaller than the minimum length of a given parameter, 4817 * or accumulated length in multi parameters exceeds the end of the chunk, 4818 * the length is considered as invalid. 4819 */ 4820 static enum sctp_disposition sctp_sf_violation_paramlen( 4821 struct net *net, 4822 const struct sctp_endpoint *ep, 4823 const struct sctp_association *asoc, 4824 const union sctp_subtype type, 4825 void *arg, void *ext, 4826 struct sctp_cmd_seq *commands) 4827 { 4828 struct sctp_paramhdr *param = ext; 4829 struct sctp_chunk *abort = NULL; 4830 struct sctp_chunk *chunk = arg; 4831 4832 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) 4833 goto discard; 4834 4835 /* Make the abort chunk. */ 4836 abort = sctp_make_violation_paramlen(asoc, chunk, param); 4837 if (!abort) 4838 goto nomem; 4839 4840 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4841 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 4842 4843 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4844 SCTP_ERROR(ECONNABORTED)); 4845 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4846 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 4847 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 4848 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 4849 4850 discard: 4851 sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands); 4852 return SCTP_DISPOSITION_ABORT; 4853 nomem: 4854 return SCTP_DISPOSITION_NOMEM; 4855 } 4856 4857 /* Handle a protocol violation when the peer trying to advance the 4858 * cumulative tsn ack to a point beyond the max tsn currently sent. 4859 * 4860 * We inform the other end by sending an ABORT with a Protocol Violation 4861 * error code. 4862 */ 4863 static enum sctp_disposition sctp_sf_violation_ctsn( 4864 struct net *net, 4865 const struct sctp_endpoint *ep, 4866 const struct sctp_association *asoc, 4867 const union sctp_subtype type, 4868 void *arg, 4869 struct sctp_cmd_seq *commands) 4870 { 4871 static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:"; 4872 4873 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, 4874 sizeof(err_str)); 4875 } 4876 4877 /* Handle protocol violation of an invalid chunk bundling. For example, 4878 * when we have an association and we receive bundled INIT-ACK, or 4879 * SHUTDOWN-COMPLETE, our peer is clearly violating the "MUST NOT bundle" 4880 * statement from the specs. Additionally, there might be an attacker 4881 * on the path and we may not want to continue this communication. 4882 */ 4883 static enum sctp_disposition sctp_sf_violation_chunk( 4884 struct net *net, 4885 const struct sctp_endpoint *ep, 4886 const struct sctp_association *asoc, 4887 const union sctp_subtype type, 4888 void *arg, 4889 struct sctp_cmd_seq *commands) 4890 { 4891 static const char err_str[] = "The following chunk violates protocol:"; 4892 4893 if (!asoc) 4894 return sctp_sf_violation(net, ep, asoc, type, arg, commands); 4895 4896 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, 4897 sizeof(err_str)); 4898 } 4899 /*************************************************************************** 4900 * These are the state functions for handling primitive (Section 10) events. 4901 ***************************************************************************/ 4902 /* 4903 * sctp_sf_do_prm_asoc 4904 * 4905 * Section: 10.1 ULP-to-SCTP 4906 * B) Associate 4907 * 4908 * Format: ASSOCIATE(local SCTP instance name, destination transport addr, 4909 * outbound stream count) 4910 * -> association id [,destination transport addr list] [,outbound stream 4911 * count] 4912 * 4913 * This primitive allows the upper layer to initiate an association to a 4914 * specific peer endpoint. 4915 * 4916 * The peer endpoint shall be specified by one of the transport addresses 4917 * which defines the endpoint (see Section 1.4). If the local SCTP 4918 * instance has not been initialized, the ASSOCIATE is considered an 4919 * error. 4920 * [This is not relevant for the kernel implementation since we do all 4921 * initialization at boot time. It we hadn't initialized we wouldn't 4922 * get anywhere near this code.] 4923 * 4924 * An association id, which is a local handle to the SCTP association, 4925 * will be returned on successful establishment of the association. If 4926 * SCTP is not able to open an SCTP association with the peer endpoint, 4927 * an error is returned. 4928 * [In the kernel implementation, the struct sctp_association needs to 4929 * be created BEFORE causing this primitive to run.] 4930 * 4931 * Other association parameters may be returned, including the 4932 * complete destination transport addresses of the peer as well as the 4933 * outbound stream count of the local endpoint. One of the transport 4934 * address from the returned destination addresses will be selected by 4935 * the local endpoint as default primary path for sending SCTP packets 4936 * to this peer. The returned "destination transport addr list" can 4937 * be used by the ULP to change the default primary path or to force 4938 * sending a packet to a specific transport address. [All of this 4939 * stuff happens when the INIT ACK arrives. This is a NON-BLOCKING 4940 * function.] 4941 * 4942 * Mandatory attributes: 4943 * 4944 * o local SCTP instance name - obtained from the INITIALIZE operation. 4945 * [This is the argument asoc.] 4946 * o destination transport addr - specified as one of the transport 4947 * addresses of the peer endpoint with which the association is to be 4948 * established. 4949 * [This is asoc->peer.active_path.] 4950 * o outbound stream count - the number of outbound streams the ULP 4951 * would like to open towards this peer endpoint. 4952 * [BUG: This is not currently implemented.] 4953 * Optional attributes: 4954 * 4955 * None. 4956 * 4957 * The return value is a disposition. 4958 */ 4959 enum sctp_disposition sctp_sf_do_prm_asoc(struct net *net, 4960 const struct sctp_endpoint *ep, 4961 const struct sctp_association *asoc, 4962 const union sctp_subtype type, 4963 void *arg, 4964 struct sctp_cmd_seq *commands) 4965 { 4966 struct sctp_association *my_asoc; 4967 struct sctp_chunk *repl; 4968 4969 /* The comment below says that we enter COOKIE-WAIT AFTER 4970 * sending the INIT, but that doesn't actually work in our 4971 * implementation... 4972 */ 4973 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 4974 SCTP_STATE(SCTP_STATE_COOKIE_WAIT)); 4975 4976 /* RFC 2960 5.1 Normal Establishment of an Association 4977 * 4978 * A) "A" first sends an INIT chunk to "Z". In the INIT, "A" 4979 * must provide its Verification Tag (Tag_A) in the Initiate 4980 * Tag field. Tag_A SHOULD be a random number in the range of 4981 * 1 to 4294967295 (see 5.3.1 for Tag value selection). ... 4982 */ 4983 4984 repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0); 4985 if (!repl) 4986 goto nomem; 4987 4988 /* Choose transport for INIT. */ 4989 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, 4990 SCTP_CHUNK(repl)); 4991 4992 /* Cast away the const modifier, as we want to just 4993 * rerun it through as a sideffect. 4994 */ 4995 my_asoc = (struct sctp_association *)asoc; 4996 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc)); 4997 4998 /* After sending the INIT, "A" starts the T1-init timer and 4999 * enters the COOKIE-WAIT state. 5000 */ 5001 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 5002 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 5003 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 5004 return SCTP_DISPOSITION_CONSUME; 5005 5006 nomem: 5007 return SCTP_DISPOSITION_NOMEM; 5008 } 5009 5010 /* 5011 * Process the SEND primitive. 5012 * 5013 * Section: 10.1 ULP-to-SCTP 5014 * E) Send 5015 * 5016 * Format: SEND(association id, buffer address, byte count [,context] 5017 * [,stream id] [,life time] [,destination transport address] 5018 * [,unorder flag] [,no-bundle flag] [,payload protocol-id] ) 5019 * -> result 5020 * 5021 * This is the main method to send user data via SCTP. 5022 * 5023 * Mandatory attributes: 5024 * 5025 * o association id - local handle to the SCTP association 5026 * 5027 * o buffer address - the location where the user message to be 5028 * transmitted is stored; 5029 * 5030 * o byte count - The size of the user data in number of bytes; 5031 * 5032 * Optional attributes: 5033 * 5034 * o context - an optional 32 bit integer that will be carried in the 5035 * sending failure notification to the ULP if the transportation of 5036 * this User Message fails. 5037 * 5038 * o stream id - to indicate which stream to send the data on. If not 5039 * specified, stream 0 will be used. 5040 * 5041 * o life time - specifies the life time of the user data. The user data 5042 * will not be sent by SCTP after the life time expires. This 5043 * parameter can be used to avoid efforts to transmit stale 5044 * user messages. SCTP notifies the ULP if the data cannot be 5045 * initiated to transport (i.e. sent to the destination via SCTP's 5046 * send primitive) within the life time variable. However, the 5047 * user data will be transmitted if SCTP has attempted to transmit a 5048 * chunk before the life time expired. 5049 * 5050 * o destination transport address - specified as one of the destination 5051 * transport addresses of the peer endpoint to which this packet 5052 * should be sent. Whenever possible, SCTP should use this destination 5053 * transport address for sending the packets, instead of the current 5054 * primary path. 5055 * 5056 * o unorder flag - this flag, if present, indicates that the user 5057 * would like the data delivered in an unordered fashion to the peer 5058 * (i.e., the U flag is set to 1 on all DATA chunks carrying this 5059 * message). 5060 * 5061 * o no-bundle flag - instructs SCTP not to bundle this user data with 5062 * other outbound DATA chunks. SCTP MAY still bundle even when 5063 * this flag is present, when faced with network congestion. 5064 * 5065 * o payload protocol-id - A 32 bit unsigned integer that is to be 5066 * passed to the peer indicating the type of payload protocol data 5067 * being transmitted. This value is passed as opaque data by SCTP. 5068 * 5069 * The return value is the disposition. 5070 */ 5071 enum sctp_disposition sctp_sf_do_prm_send(struct net *net, 5072 const struct sctp_endpoint *ep, 5073 const struct sctp_association *asoc, 5074 const union sctp_subtype type, 5075 void *arg, 5076 struct sctp_cmd_seq *commands) 5077 { 5078 struct sctp_datamsg *msg = arg; 5079 5080 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg)); 5081 return SCTP_DISPOSITION_CONSUME; 5082 } 5083 5084 /* 5085 * Process the SHUTDOWN primitive. 5086 * 5087 * Section: 10.1: 5088 * C) Shutdown 5089 * 5090 * Format: SHUTDOWN(association id) 5091 * -> result 5092 * 5093 * Gracefully closes an association. Any locally queued user data 5094 * will be delivered to the peer. The association will be terminated only 5095 * after the peer acknowledges all the SCTP packets sent. A success code 5096 * will be returned on successful termination of the association. If 5097 * attempting to terminate the association results in a failure, an error 5098 * code shall be returned. 5099 * 5100 * Mandatory attributes: 5101 * 5102 * o association id - local handle to the SCTP association 5103 * 5104 * Optional attributes: 5105 * 5106 * None. 5107 * 5108 * The return value is the disposition. 5109 */ 5110 enum sctp_disposition sctp_sf_do_9_2_prm_shutdown( 5111 struct net *net, 5112 const struct sctp_endpoint *ep, 5113 const struct sctp_association *asoc, 5114 const union sctp_subtype type, 5115 void *arg, 5116 struct sctp_cmd_seq *commands) 5117 { 5118 enum sctp_disposition disposition; 5119 5120 /* From 9.2 Shutdown of an Association 5121 * Upon receipt of the SHUTDOWN primitive from its upper 5122 * layer, the endpoint enters SHUTDOWN-PENDING state and 5123 * remains there until all outstanding data has been 5124 * acknowledged by its peer. The endpoint accepts no new data 5125 * from its upper layer, but retransmits data to the far end 5126 * if necessary to fill gaps. 5127 */ 5128 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 5129 SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING)); 5130 5131 disposition = SCTP_DISPOSITION_CONSUME; 5132 if (sctp_outq_is_empty(&asoc->outqueue)) { 5133 disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, 5134 arg, commands); 5135 } 5136 5137 return disposition; 5138 } 5139 5140 /* 5141 * Process the ABORT primitive. 5142 * 5143 * Section: 10.1: 5144 * C) Abort 5145 * 5146 * Format: Abort(association id [, cause code]) 5147 * -> result 5148 * 5149 * Ungracefully closes an association. Any locally queued user data 5150 * will be discarded and an ABORT chunk is sent to the peer. A success code 5151 * will be returned on successful abortion of the association. If 5152 * attempting to abort the association results in a failure, an error 5153 * code shall be returned. 5154 * 5155 * Mandatory attributes: 5156 * 5157 * o association id - local handle to the SCTP association 5158 * 5159 * Optional attributes: 5160 * 5161 * o cause code - reason of the abort to be passed to the peer 5162 * 5163 * None. 5164 * 5165 * The return value is the disposition. 5166 */ 5167 enum sctp_disposition sctp_sf_do_9_1_prm_abort( 5168 struct net *net, 5169 const struct sctp_endpoint *ep, 5170 const struct sctp_association *asoc, 5171 const union sctp_subtype type, 5172 void *arg, 5173 struct sctp_cmd_seq *commands) 5174 { 5175 /* From 9.1 Abort of an Association 5176 * Upon receipt of the ABORT primitive from its upper 5177 * layer, the endpoint enters CLOSED state and 5178 * discard all outstanding data has been 5179 * acknowledged by its peer. The endpoint accepts no new data 5180 * from its upper layer, but retransmits data to the far end 5181 * if necessary to fill gaps. 5182 */ 5183 struct sctp_chunk *abort = arg; 5184 5185 if (abort) 5186 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 5187 5188 /* Even if we can't send the ABORT due to low memory delete the 5189 * TCB. This is a departure from our typical NOMEM handling. 5190 */ 5191 5192 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5193 SCTP_ERROR(ECONNABORTED)); 5194 /* Delete the established association. */ 5195 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5196 SCTP_PERR(SCTP_ERROR_USER_ABORT)); 5197 5198 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 5199 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 5200 5201 return SCTP_DISPOSITION_ABORT; 5202 } 5203 5204 /* We tried an illegal operation on an association which is closed. */ 5205 enum sctp_disposition sctp_sf_error_closed(struct net *net, 5206 const struct sctp_endpoint *ep, 5207 const struct sctp_association *asoc, 5208 const union sctp_subtype type, 5209 void *arg, 5210 struct sctp_cmd_seq *commands) 5211 { 5212 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL)); 5213 return SCTP_DISPOSITION_CONSUME; 5214 } 5215 5216 /* We tried an illegal operation on an association which is shutting 5217 * down. 5218 */ 5219 enum sctp_disposition sctp_sf_error_shutdown( 5220 struct net *net, 5221 const struct sctp_endpoint *ep, 5222 const struct sctp_association *asoc, 5223 const union sctp_subtype type, 5224 void *arg, 5225 struct sctp_cmd_seq *commands) 5226 { 5227 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, 5228 SCTP_ERROR(-ESHUTDOWN)); 5229 return SCTP_DISPOSITION_CONSUME; 5230 } 5231 5232 /* 5233 * sctp_cookie_wait_prm_shutdown 5234 * 5235 * Section: 4 Note: 2 5236 * Verification Tag: 5237 * Inputs 5238 * (endpoint, asoc) 5239 * 5240 * The RFC does not explicitly address this issue, but is the route through the 5241 * state table when someone issues a shutdown while in COOKIE_WAIT state. 5242 * 5243 * Outputs 5244 * (timers) 5245 */ 5246 enum sctp_disposition sctp_sf_cookie_wait_prm_shutdown( 5247 struct net *net, 5248 const struct sctp_endpoint *ep, 5249 const struct sctp_association *asoc, 5250 const union sctp_subtype type, 5251 void *arg, 5252 struct sctp_cmd_seq *commands) 5253 { 5254 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5255 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 5256 5257 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 5258 SCTP_STATE(SCTP_STATE_CLOSED)); 5259 5260 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); 5261 5262 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 5263 5264 return SCTP_DISPOSITION_DELETE_TCB; 5265 } 5266 5267 /* 5268 * sctp_cookie_echoed_prm_shutdown 5269 * 5270 * Section: 4 Note: 2 5271 * Verification Tag: 5272 * Inputs 5273 * (endpoint, asoc) 5274 * 5275 * The RFC does not explicitly address this issue, but is the route through the 5276 * state table when someone issues a shutdown while in COOKIE_ECHOED state. 5277 * 5278 * Outputs 5279 * (timers) 5280 */ 5281 enum sctp_disposition sctp_sf_cookie_echoed_prm_shutdown( 5282 struct net *net, 5283 const struct sctp_endpoint *ep, 5284 const struct sctp_association *asoc, 5285 const union sctp_subtype type, 5286 void *arg, 5287 struct sctp_cmd_seq *commands) 5288 { 5289 /* There is a single T1 timer, so we should be able to use 5290 * common function with the COOKIE-WAIT state. 5291 */ 5292 return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands); 5293 } 5294 5295 /* 5296 * sctp_sf_cookie_wait_prm_abort 5297 * 5298 * Section: 4 Note: 2 5299 * Verification Tag: 5300 * Inputs 5301 * (endpoint, asoc) 5302 * 5303 * The RFC does not explicitly address this issue, but is the route through the 5304 * state table when someone issues an abort while in COOKIE_WAIT state. 5305 * 5306 * Outputs 5307 * (timers) 5308 */ 5309 enum sctp_disposition sctp_sf_cookie_wait_prm_abort( 5310 struct net *net, 5311 const struct sctp_endpoint *ep, 5312 const struct sctp_association *asoc, 5313 const union sctp_subtype type, 5314 void *arg, 5315 struct sctp_cmd_seq *commands) 5316 { 5317 struct sctp_chunk *abort = arg; 5318 5319 /* Stop T1-init timer */ 5320 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5321 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 5322 5323 if (abort) 5324 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 5325 5326 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 5327 SCTP_STATE(SCTP_STATE_CLOSED)); 5328 5329 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 5330 5331 /* Even if we can't send the ABORT due to low memory delete the 5332 * TCB. This is a departure from our typical NOMEM handling. 5333 */ 5334 5335 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5336 SCTP_ERROR(ECONNREFUSED)); 5337 /* Delete the established association. */ 5338 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 5339 SCTP_PERR(SCTP_ERROR_USER_ABORT)); 5340 5341 return SCTP_DISPOSITION_ABORT; 5342 } 5343 5344 /* 5345 * sctp_sf_cookie_echoed_prm_abort 5346 * 5347 * Section: 4 Note: 3 5348 * Verification Tag: 5349 * Inputs 5350 * (endpoint, asoc) 5351 * 5352 * The RFC does not explcitly address this issue, but is the route through the 5353 * state table when someone issues an abort while in COOKIE_ECHOED state. 5354 * 5355 * Outputs 5356 * (timers) 5357 */ 5358 enum sctp_disposition sctp_sf_cookie_echoed_prm_abort( 5359 struct net *net, 5360 const struct sctp_endpoint *ep, 5361 const struct sctp_association *asoc, 5362 const union sctp_subtype type, 5363 void *arg, 5364 struct sctp_cmd_seq *commands) 5365 { 5366 /* There is a single T1 timer, so we should be able to use 5367 * common function with the COOKIE-WAIT state. 5368 */ 5369 return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands); 5370 } 5371 5372 /* 5373 * sctp_sf_shutdown_pending_prm_abort 5374 * 5375 * Inputs 5376 * (endpoint, asoc) 5377 * 5378 * The RFC does not explicitly address this issue, but is the route through the 5379 * state table when someone issues an abort while in SHUTDOWN-PENDING state. 5380 * 5381 * Outputs 5382 * (timers) 5383 */ 5384 enum sctp_disposition sctp_sf_shutdown_pending_prm_abort( 5385 struct net *net, 5386 const struct sctp_endpoint *ep, 5387 const struct sctp_association *asoc, 5388 const union sctp_subtype type, 5389 void *arg, 5390 struct sctp_cmd_seq *commands) 5391 { 5392 /* Stop the T5-shutdown guard timer. */ 5393 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5394 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5395 5396 return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands); 5397 } 5398 5399 /* 5400 * sctp_sf_shutdown_sent_prm_abort 5401 * 5402 * Inputs 5403 * (endpoint, asoc) 5404 * 5405 * The RFC does not explicitly address this issue, but is the route through the 5406 * state table when someone issues an abort while in SHUTDOWN-SENT state. 5407 * 5408 * Outputs 5409 * (timers) 5410 */ 5411 enum sctp_disposition sctp_sf_shutdown_sent_prm_abort( 5412 struct net *net, 5413 const struct sctp_endpoint *ep, 5414 const struct sctp_association *asoc, 5415 const union sctp_subtype type, 5416 void *arg, 5417 struct sctp_cmd_seq *commands) 5418 { 5419 /* Stop the T2-shutdown timer. */ 5420 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5421 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 5422 5423 /* Stop the T5-shutdown guard timer. */ 5424 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5425 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5426 5427 return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands); 5428 } 5429 5430 /* 5431 * sctp_sf_cookie_echoed_prm_abort 5432 * 5433 * Inputs 5434 * (endpoint, asoc) 5435 * 5436 * The RFC does not explcitly address this issue, but is the route through the 5437 * state table when someone issues an abort while in COOKIE_ECHOED state. 5438 * 5439 * Outputs 5440 * (timers) 5441 */ 5442 enum sctp_disposition sctp_sf_shutdown_ack_sent_prm_abort( 5443 struct net *net, 5444 const struct sctp_endpoint *ep, 5445 const struct sctp_association *asoc, 5446 const union sctp_subtype type, 5447 void *arg, 5448 struct sctp_cmd_seq *commands) 5449 { 5450 /* The same T2 timer, so we should be able to use 5451 * common function with the SHUTDOWN-SENT state. 5452 */ 5453 return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands); 5454 } 5455 5456 /* 5457 * Process the REQUESTHEARTBEAT primitive 5458 * 5459 * 10.1 ULP-to-SCTP 5460 * J) Request Heartbeat 5461 * 5462 * Format: REQUESTHEARTBEAT(association id, destination transport address) 5463 * 5464 * -> result 5465 * 5466 * Instructs the local endpoint to perform a HeartBeat on the specified 5467 * destination transport address of the given association. The returned 5468 * result should indicate whether the transmission of the HEARTBEAT 5469 * chunk to the destination address is successful. 5470 * 5471 * Mandatory attributes: 5472 * 5473 * o association id - local handle to the SCTP association 5474 * 5475 * o destination transport address - the transport address of the 5476 * association on which a heartbeat should be issued. 5477 */ 5478 enum sctp_disposition sctp_sf_do_prm_requestheartbeat( 5479 struct net *net, 5480 const struct sctp_endpoint *ep, 5481 const struct sctp_association *asoc, 5482 const union sctp_subtype type, 5483 void *arg, 5484 struct sctp_cmd_seq *commands) 5485 { 5486 if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type, 5487 (struct sctp_transport *)arg, commands)) 5488 return SCTP_DISPOSITION_NOMEM; 5489 5490 /* 5491 * RFC 2960 (bis), section 8.3 5492 * 5493 * D) Request an on-demand HEARTBEAT on a specific destination 5494 * transport address of a given association. 5495 * 5496 * The endpoint should increment the respective error counter of 5497 * the destination transport address each time a HEARTBEAT is sent 5498 * to that address and not acknowledged within one RTO. 5499 * 5500 */ 5501 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, 5502 SCTP_TRANSPORT(arg)); 5503 return SCTP_DISPOSITION_CONSUME; 5504 } 5505 5506 /* 5507 * ADDIP Section 4.1 ASCONF Chunk Procedures 5508 * When an endpoint has an ASCONF signaled change to be sent to the 5509 * remote endpoint it should do A1 to A9 5510 */ 5511 enum sctp_disposition sctp_sf_do_prm_asconf(struct net *net, 5512 const struct sctp_endpoint *ep, 5513 const struct sctp_association *asoc, 5514 const union sctp_subtype type, 5515 void *arg, 5516 struct sctp_cmd_seq *commands) 5517 { 5518 struct sctp_chunk *chunk = arg; 5519 5520 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); 5521 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 5522 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 5523 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); 5524 return SCTP_DISPOSITION_CONSUME; 5525 } 5526 5527 /* RE-CONFIG Section 5.1 RECONF Chunk Procedures */ 5528 enum sctp_disposition sctp_sf_do_prm_reconf(struct net *net, 5529 const struct sctp_endpoint *ep, 5530 const struct sctp_association *asoc, 5531 const union sctp_subtype type, 5532 void *arg, 5533 struct sctp_cmd_seq *commands) 5534 { 5535 struct sctp_chunk *chunk = arg; 5536 5537 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); 5538 return SCTP_DISPOSITION_CONSUME; 5539 } 5540 5541 /* 5542 * Ignore the primitive event 5543 * 5544 * The return value is the disposition of the primitive. 5545 */ 5546 enum sctp_disposition sctp_sf_ignore_primitive( 5547 struct net *net, 5548 const struct sctp_endpoint *ep, 5549 const struct sctp_association *asoc, 5550 const union sctp_subtype type, 5551 void *arg, 5552 struct sctp_cmd_seq *commands) 5553 { 5554 pr_debug("%s: primitive type:%d is ignored\n", __func__, 5555 type.primitive); 5556 5557 return SCTP_DISPOSITION_DISCARD; 5558 } 5559 5560 /*************************************************************************** 5561 * These are the state functions for the OTHER events. 5562 ***************************************************************************/ 5563 5564 /* 5565 * When the SCTP stack has no more user data to send or retransmit, this 5566 * notification is given to the user. Also, at the time when a user app 5567 * subscribes to this event, if there is no data to be sent or 5568 * retransmit, the stack will immediately send up this notification. 5569 */ 5570 enum sctp_disposition sctp_sf_do_no_pending_tsn( 5571 struct net *net, 5572 const struct sctp_endpoint *ep, 5573 const struct sctp_association *asoc, 5574 const union sctp_subtype type, 5575 void *arg, 5576 struct sctp_cmd_seq *commands) 5577 { 5578 struct sctp_ulpevent *event; 5579 5580 event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); 5581 if (!event) 5582 return SCTP_DISPOSITION_NOMEM; 5583 5584 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); 5585 5586 return SCTP_DISPOSITION_CONSUME; 5587 } 5588 5589 /* 5590 * Start the shutdown negotiation. 5591 * 5592 * From Section 9.2: 5593 * Once all its outstanding data has been acknowledged, the endpoint 5594 * shall send a SHUTDOWN chunk to its peer including in the Cumulative 5595 * TSN Ack field the last sequential TSN it has received from the peer. 5596 * It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT 5597 * state. If the timer expires, the endpoint must re-send the SHUTDOWN 5598 * with the updated last sequential TSN received from its peer. 5599 * 5600 * The return value is the disposition. 5601 */ 5602 enum sctp_disposition sctp_sf_do_9_2_start_shutdown( 5603 struct net *net, 5604 const struct sctp_endpoint *ep, 5605 const struct sctp_association *asoc, 5606 const union sctp_subtype type, 5607 void *arg, 5608 struct sctp_cmd_seq *commands) 5609 { 5610 struct sctp_chunk *reply; 5611 5612 /* Once all its outstanding data has been acknowledged, the 5613 * endpoint shall send a SHUTDOWN chunk to its peer including 5614 * in the Cumulative TSN Ack field the last sequential TSN it 5615 * has received from the peer. 5616 */ 5617 reply = sctp_make_shutdown(asoc, arg); 5618 if (!reply) 5619 goto nomem; 5620 5621 /* Set the transport for the SHUTDOWN chunk and the timeout for the 5622 * T2-shutdown timer. 5623 */ 5624 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); 5625 5626 /* It shall then start the T2-shutdown timer */ 5627 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 5628 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 5629 5630 /* RFC 4960 Section 9.2 5631 * The sender of the SHUTDOWN MAY also start an overall guard timer 5632 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. 5633 */ 5634 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 5635 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5636 5637 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) 5638 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5639 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 5640 5641 /* and enter the SHUTDOWN-SENT state. */ 5642 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 5643 SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT)); 5644 5645 /* sctp-implguide 2.10 Issues with Heartbeating and failover 5646 * 5647 * HEARTBEAT ... is discontinued after sending either SHUTDOWN 5648 * or SHUTDOWN-ACK. 5649 */ 5650 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); 5651 5652 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 5653 5654 return SCTP_DISPOSITION_CONSUME; 5655 5656 nomem: 5657 return SCTP_DISPOSITION_NOMEM; 5658 } 5659 5660 /* 5661 * Generate a SHUTDOWN ACK now that everything is SACK'd. 5662 * 5663 * From Section 9.2: 5664 * 5665 * If it has no more outstanding DATA chunks, the SHUTDOWN receiver 5666 * shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own, 5667 * entering the SHUTDOWN-ACK-SENT state. If the timer expires, the 5668 * endpoint must re-send the SHUTDOWN ACK. 5669 * 5670 * The return value is the disposition. 5671 */ 5672 enum sctp_disposition sctp_sf_do_9_2_shutdown_ack( 5673 struct net *net, 5674 const struct sctp_endpoint *ep, 5675 const struct sctp_association *asoc, 5676 const union sctp_subtype type, 5677 void *arg, 5678 struct sctp_cmd_seq *commands) 5679 { 5680 struct sctp_chunk *chunk = arg; 5681 struct sctp_chunk *reply; 5682 5683 /* There are 2 ways of getting here: 5684 * 1) called in response to a SHUTDOWN chunk 5685 * 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued. 5686 * 5687 * For the case (2), the arg parameter is set to NULL. We need 5688 * to check that we have a chunk before accessing it's fields. 5689 */ 5690 if (chunk) { 5691 if (!sctp_vtag_verify(chunk, asoc)) 5692 return sctp_sf_pdiscard(net, ep, asoc, type, arg, 5693 commands); 5694 5695 /* Make sure that the SHUTDOWN chunk has a valid length. */ 5696 if (!sctp_chunk_length_valid( 5697 chunk, sizeof(struct sctp_shutdown_chunk))) 5698 return sctp_sf_violation_chunklen(net, ep, asoc, type, 5699 arg, commands); 5700 } 5701 5702 /* If it has no more outstanding DATA chunks, the SHUTDOWN receiver 5703 * shall send a SHUTDOWN ACK ... 5704 */ 5705 reply = sctp_make_shutdown_ack(asoc, chunk); 5706 if (!reply) 5707 goto nomem; 5708 5709 /* Set the transport for the SHUTDOWN ACK chunk and the timeout for 5710 * the T2-shutdown timer. 5711 */ 5712 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); 5713 5714 /* and start/restart a T2-shutdown timer of its own, */ 5715 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 5716 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 5717 5718 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) 5719 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5720 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 5721 5722 /* Enter the SHUTDOWN-ACK-SENT state. */ 5723 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 5724 SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT)); 5725 5726 /* sctp-implguide 2.10 Issues with Heartbeating and failover 5727 * 5728 * HEARTBEAT ... is discontinued after sending either SHUTDOWN 5729 * or SHUTDOWN-ACK. 5730 */ 5731 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); 5732 5733 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 5734 5735 return SCTP_DISPOSITION_CONSUME; 5736 5737 nomem: 5738 return SCTP_DISPOSITION_NOMEM; 5739 } 5740 5741 /* 5742 * Ignore the event defined as other 5743 * 5744 * The return value is the disposition of the event. 5745 */ 5746 enum sctp_disposition sctp_sf_ignore_other(struct net *net, 5747 const struct sctp_endpoint *ep, 5748 const struct sctp_association *asoc, 5749 const union sctp_subtype type, 5750 void *arg, 5751 struct sctp_cmd_seq *commands) 5752 { 5753 pr_debug("%s: the event other type:%d is ignored\n", 5754 __func__, type.other); 5755 5756 return SCTP_DISPOSITION_DISCARD; 5757 } 5758 5759 /************************************************************ 5760 * These are the state functions for handling timeout events. 5761 ************************************************************/ 5762 5763 /* 5764 * RTX Timeout 5765 * 5766 * Section: 6.3.3 Handle T3-rtx Expiration 5767 * 5768 * Whenever the retransmission timer T3-rtx expires for a destination 5769 * address, do the following: 5770 * [See below] 5771 * 5772 * The return value is the disposition of the chunk. 5773 */ 5774 enum sctp_disposition sctp_sf_do_6_3_3_rtx(struct net *net, 5775 const struct sctp_endpoint *ep, 5776 const struct sctp_association *asoc, 5777 const union sctp_subtype type, 5778 void *arg, 5779 struct sctp_cmd_seq *commands) 5780 { 5781 struct sctp_transport *transport = arg; 5782 5783 SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); 5784 5785 if (asoc->overall_error_count >= asoc->max_retrans) { 5786 if (asoc->peer.zero_window_announced && 5787 asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { 5788 /* 5789 * We are here likely because the receiver had its rwnd 5790 * closed for a while and we have not been able to 5791 * transmit the locally queued data within the maximum 5792 * retransmission attempts limit. Start the T5 5793 * shutdown guard timer to give the receiver one last 5794 * chance and some additional time to recover before 5795 * aborting. 5796 */ 5797 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE, 5798 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5799 } else { 5800 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5801 SCTP_ERROR(ETIMEDOUT)); 5802 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 5803 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5804 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5805 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 5806 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 5807 return SCTP_DISPOSITION_DELETE_TCB; 5808 } 5809 } 5810 5811 /* E1) For the destination address for which the timer 5812 * expires, adjust its ssthresh with rules defined in Section 5813 * 7.2.3 and set the cwnd <- MTU. 5814 */ 5815 5816 /* E2) For the destination address for which the timer 5817 * expires, set RTO <- RTO * 2 ("back off the timer"). The 5818 * maximum value discussed in rule C7 above (RTO.max) may be 5819 * used to provide an upper bound to this doubling operation. 5820 */ 5821 5822 /* E3) Determine how many of the earliest (i.e., lowest TSN) 5823 * outstanding DATA chunks for the address for which the 5824 * T3-rtx has expired will fit into a single packet, subject 5825 * to the MTU constraint for the path corresponding to the 5826 * destination transport address to which the retransmission 5827 * is being sent (this may be different from the address for 5828 * which the timer expires [see Section 6.4]). Call this 5829 * value K. Bundle and retransmit those K DATA chunks in a 5830 * single packet to the destination endpoint. 5831 * 5832 * Note: Any DATA chunks that were sent to the address for 5833 * which the T3-rtx timer expired but did not fit in one MTU 5834 * (rule E3 above), should be marked for retransmission and 5835 * sent as soon as cwnd allows (normally when a SACK arrives). 5836 */ 5837 5838 /* Do some failure management (Section 8.2). */ 5839 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); 5840 5841 /* NB: Rules E4 and F1 are implicit in R1. */ 5842 sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport)); 5843 5844 return SCTP_DISPOSITION_CONSUME; 5845 } 5846 5847 /* 5848 * Generate delayed SACK on timeout 5849 * 5850 * Section: 6.2 Acknowledgement on Reception of DATA Chunks 5851 * 5852 * The guidelines on delayed acknowledgement algorithm specified in 5853 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an 5854 * acknowledgement SHOULD be generated for at least every second packet 5855 * (not every second DATA chunk) received, and SHOULD be generated 5856 * within 200 ms of the arrival of any unacknowledged DATA chunk. In 5857 * some situations it may be beneficial for an SCTP transmitter to be 5858 * more conservative than the algorithms detailed in this document 5859 * allow. However, an SCTP transmitter MUST NOT be more aggressive than 5860 * the following algorithms allow. 5861 */ 5862 enum sctp_disposition sctp_sf_do_6_2_sack(struct net *net, 5863 const struct sctp_endpoint *ep, 5864 const struct sctp_association *asoc, 5865 const union sctp_subtype type, 5866 void *arg, 5867 struct sctp_cmd_seq *commands) 5868 { 5869 SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS); 5870 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); 5871 return SCTP_DISPOSITION_CONSUME; 5872 } 5873 5874 /* 5875 * sctp_sf_t1_init_timer_expire 5876 * 5877 * Section: 4 Note: 2 5878 * Verification Tag: 5879 * Inputs 5880 * (endpoint, asoc) 5881 * 5882 * RFC 2960 Section 4 Notes 5883 * 2) If the T1-init timer expires, the endpoint MUST retransmit INIT 5884 * and re-start the T1-init timer without changing state. This MUST 5885 * be repeated up to 'Max.Init.Retransmits' times. After that, the 5886 * endpoint MUST abort the initialization process and report the 5887 * error to SCTP user. 5888 * 5889 * Outputs 5890 * (timers, events) 5891 * 5892 */ 5893 enum sctp_disposition sctp_sf_t1_init_timer_expire( 5894 struct net *net, 5895 const struct sctp_endpoint *ep, 5896 const struct sctp_association *asoc, 5897 const union sctp_subtype type, 5898 void *arg, 5899 struct sctp_cmd_seq *commands) 5900 { 5901 int attempts = asoc->init_err_counter + 1; 5902 struct sctp_chunk *repl = NULL; 5903 struct sctp_bind_addr *bp; 5904 5905 pr_debug("%s: timer T1 expired (INIT)\n", __func__); 5906 5907 SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS); 5908 5909 if (attempts <= asoc->max_init_attempts) { 5910 bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; 5911 repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); 5912 if (!repl) 5913 return SCTP_DISPOSITION_NOMEM; 5914 5915 /* Choose transport for INIT. */ 5916 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, 5917 SCTP_CHUNK(repl)); 5918 5919 /* Issue a sideeffect to do the needed accounting. */ 5920 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART, 5921 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 5922 5923 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 5924 } else { 5925 pr_debug("%s: giving up on INIT, attempts:%d " 5926 "max_init_attempts:%d\n", __func__, attempts, 5927 asoc->max_init_attempts); 5928 5929 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5930 SCTP_ERROR(ETIMEDOUT)); 5931 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 5932 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5933 return SCTP_DISPOSITION_DELETE_TCB; 5934 } 5935 5936 return SCTP_DISPOSITION_CONSUME; 5937 } 5938 5939 /* 5940 * sctp_sf_t1_cookie_timer_expire 5941 * 5942 * Section: 4 Note: 2 5943 * Verification Tag: 5944 * Inputs 5945 * (endpoint, asoc) 5946 * 5947 * RFC 2960 Section 4 Notes 5948 * 3) If the T1-cookie timer expires, the endpoint MUST retransmit 5949 * COOKIE ECHO and re-start the T1-cookie timer without changing 5950 * state. This MUST be repeated up to 'Max.Init.Retransmits' times. 5951 * After that, the endpoint MUST abort the initialization process and 5952 * report the error to SCTP user. 5953 * 5954 * Outputs 5955 * (timers, events) 5956 * 5957 */ 5958 enum sctp_disposition sctp_sf_t1_cookie_timer_expire( 5959 struct net *net, 5960 const struct sctp_endpoint *ep, 5961 const struct sctp_association *asoc, 5962 const union sctp_subtype type, 5963 void *arg, 5964 struct sctp_cmd_seq *commands) 5965 { 5966 int attempts = asoc->init_err_counter + 1; 5967 struct sctp_chunk *repl = NULL; 5968 5969 pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__); 5970 5971 SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS); 5972 5973 if (attempts <= asoc->max_init_attempts) { 5974 repl = sctp_make_cookie_echo(asoc, NULL); 5975 if (!repl) 5976 return SCTP_DISPOSITION_NOMEM; 5977 5978 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, 5979 SCTP_CHUNK(repl)); 5980 /* Issue a sideeffect to do the needed accounting. */ 5981 sctp_add_cmd_sf(commands, SCTP_CMD_COOKIEECHO_RESTART, 5982 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 5983 5984 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 5985 } else { 5986 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5987 SCTP_ERROR(ETIMEDOUT)); 5988 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 5989 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5990 return SCTP_DISPOSITION_DELETE_TCB; 5991 } 5992 5993 return SCTP_DISPOSITION_CONSUME; 5994 } 5995 5996 /* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN 5997 * with the updated last sequential TSN received from its peer. 5998 * 5999 * An endpoint should limit the number of retransmission of the 6000 * SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'. 6001 * If this threshold is exceeded the endpoint should destroy the TCB and 6002 * MUST report the peer endpoint unreachable to the upper layer (and 6003 * thus the association enters the CLOSED state). The reception of any 6004 * packet from its peer (i.e. as the peer sends all of its queued DATA 6005 * chunks) should clear the endpoint's retransmission count and restart 6006 * the T2-Shutdown timer, giving its peer ample opportunity to transmit 6007 * all of its queued DATA chunks that have not yet been sent. 6008 */ 6009 enum sctp_disposition sctp_sf_t2_timer_expire( 6010 struct net *net, 6011 const struct sctp_endpoint *ep, 6012 const struct sctp_association *asoc, 6013 const union sctp_subtype type, 6014 void *arg, 6015 struct sctp_cmd_seq *commands) 6016 { 6017 struct sctp_chunk *reply = NULL; 6018 6019 pr_debug("%s: timer T2 expired\n", __func__); 6020 6021 SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS); 6022 6023 ((struct sctp_association *)asoc)->shutdown_retries++; 6024 6025 if (asoc->overall_error_count >= asoc->max_retrans) { 6026 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 6027 SCTP_ERROR(ETIMEDOUT)); 6028 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 6029 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 6030 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 6031 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 6032 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 6033 return SCTP_DISPOSITION_DELETE_TCB; 6034 } 6035 6036 switch (asoc->state) { 6037 case SCTP_STATE_SHUTDOWN_SENT: 6038 reply = sctp_make_shutdown(asoc, NULL); 6039 break; 6040 6041 case SCTP_STATE_SHUTDOWN_ACK_SENT: 6042 reply = sctp_make_shutdown_ack(asoc, NULL); 6043 break; 6044 6045 default: 6046 BUG(); 6047 break; 6048 } 6049 6050 if (!reply) 6051 goto nomem; 6052 6053 /* Do some failure management (Section 8.2). 6054 * If we remove the transport an SHUTDOWN was last sent to, don't 6055 * do failure management. 6056 */ 6057 if (asoc->shutdown_last_sent_to) 6058 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, 6059 SCTP_TRANSPORT(asoc->shutdown_last_sent_to)); 6060 6061 /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for 6062 * the T2-shutdown timer. 6063 */ 6064 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); 6065 6066 /* Restart the T2-shutdown timer. */ 6067 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 6068 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 6069 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 6070 return SCTP_DISPOSITION_CONSUME; 6071 6072 nomem: 6073 return SCTP_DISPOSITION_NOMEM; 6074 } 6075 6076 /* 6077 * ADDIP Section 4.1 ASCONF Chunk Procedures 6078 * If the T4 RTO timer expires the endpoint should do B1 to B5 6079 */ 6080 enum sctp_disposition sctp_sf_t4_timer_expire( 6081 struct net *net, 6082 const struct sctp_endpoint *ep, 6083 const struct sctp_association *asoc, 6084 const union sctp_subtype type, 6085 void *arg, 6086 struct sctp_cmd_seq *commands) 6087 { 6088 struct sctp_chunk *chunk = asoc->addip_last_asconf; 6089 struct sctp_transport *transport = chunk->transport; 6090 6091 SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS); 6092 6093 /* ADDIP 4.1 B1) Increment the error counters and perform path failure 6094 * detection on the appropriate destination address as defined in 6095 * RFC2960 [5] section 8.1 and 8.2. 6096 */ 6097 if (transport) 6098 sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, 6099 SCTP_TRANSPORT(transport)); 6100 6101 /* Reconfig T4 timer and transport. */ 6102 sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); 6103 6104 /* ADDIP 4.1 B2) Increment the association error counters and perform 6105 * endpoint failure detection on the association as defined in 6106 * RFC2960 [5] section 8.1 and 8.2. 6107 * association error counter is incremented in SCTP_CMD_STRIKE. 6108 */ 6109 if (asoc->overall_error_count >= asoc->max_retrans) { 6110 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 6111 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 6112 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 6113 SCTP_ERROR(ETIMEDOUT)); 6114 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 6115 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 6116 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 6117 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 6118 return SCTP_DISPOSITION_ABORT; 6119 } 6120 6121 /* ADDIP 4.1 B3) Back-off the destination address RTO value to which 6122 * the ASCONF chunk was sent by doubling the RTO timer value. 6123 * This is done in SCTP_CMD_STRIKE. 6124 */ 6125 6126 /* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible 6127 * choose an alternate destination address (please refer to RFC2960 6128 * [5] section 6.4.1). An endpoint MUST NOT add new parameters to this 6129 * chunk, it MUST be the same (including its serial number) as the last 6130 * ASCONF sent. 6131 */ 6132 sctp_chunk_hold(asoc->addip_last_asconf); 6133 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 6134 SCTP_CHUNK(asoc->addip_last_asconf)); 6135 6136 /* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different 6137 * destination is selected, then the RTO used will be that of the new 6138 * destination address. 6139 */ 6140 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 6141 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 6142 6143 return SCTP_DISPOSITION_CONSUME; 6144 } 6145 6146 /* sctpimpguide-05 Section 2.12.2 6147 * The sender of the SHUTDOWN MAY also start an overall guard timer 6148 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. 6149 * At the expiration of this timer the sender SHOULD abort the association 6150 * by sending an ABORT chunk. 6151 */ 6152 enum sctp_disposition sctp_sf_t5_timer_expire( 6153 struct net *net, 6154 const struct sctp_endpoint *ep, 6155 const struct sctp_association *asoc, 6156 const union sctp_subtype type, 6157 void *arg, 6158 struct sctp_cmd_seq *commands) 6159 { 6160 struct sctp_chunk *reply = NULL; 6161 6162 pr_debug("%s: timer T5 expired\n", __func__); 6163 6164 SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS); 6165 6166 reply = sctp_make_abort(asoc, NULL, 0); 6167 if (!reply) 6168 goto nomem; 6169 6170 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 6171 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 6172 SCTP_ERROR(ETIMEDOUT)); 6173 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 6174 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 6175 6176 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 6177 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 6178 6179 return SCTP_DISPOSITION_DELETE_TCB; 6180 nomem: 6181 return SCTP_DISPOSITION_NOMEM; 6182 } 6183 6184 /* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires, 6185 * the association is automatically closed by starting the shutdown process. 6186 * The work that needs to be done is same as when SHUTDOWN is initiated by 6187 * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown(). 6188 */ 6189 enum sctp_disposition sctp_sf_autoclose_timer_expire( 6190 struct net *net, 6191 const struct sctp_endpoint *ep, 6192 const struct sctp_association *asoc, 6193 const union sctp_subtype type, 6194 void *arg, 6195 struct sctp_cmd_seq *commands) 6196 { 6197 enum sctp_disposition disposition; 6198 6199 SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS); 6200 6201 /* From 9.2 Shutdown of an Association 6202 * Upon receipt of the SHUTDOWN primitive from its upper 6203 * layer, the endpoint enters SHUTDOWN-PENDING state and 6204 * remains there until all outstanding data has been 6205 * acknowledged by its peer. The endpoint accepts no new data 6206 * from its upper layer, but retransmits data to the far end 6207 * if necessary to fill gaps. 6208 */ 6209 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 6210 SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING)); 6211 6212 disposition = SCTP_DISPOSITION_CONSUME; 6213 if (sctp_outq_is_empty(&asoc->outqueue)) { 6214 disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, 6215 NULL, commands); 6216 } 6217 6218 return disposition; 6219 } 6220 6221 /***************************************************************************** 6222 * These are sa state functions which could apply to all types of events. 6223 ****************************************************************************/ 6224 6225 /* 6226 * This table entry is not implemented. 6227 * 6228 * Inputs 6229 * (endpoint, asoc, chunk) 6230 * 6231 * The return value is the disposition of the chunk. 6232 */ 6233 enum sctp_disposition sctp_sf_not_impl(struct net *net, 6234 const struct sctp_endpoint *ep, 6235 const struct sctp_association *asoc, 6236 const union sctp_subtype type, 6237 void *arg, struct sctp_cmd_seq *commands) 6238 { 6239 return SCTP_DISPOSITION_NOT_IMPL; 6240 } 6241 6242 /* 6243 * This table entry represents a bug. 6244 * 6245 * Inputs 6246 * (endpoint, asoc, chunk) 6247 * 6248 * The return value is the disposition of the chunk. 6249 */ 6250 enum sctp_disposition sctp_sf_bug(struct net *net, 6251 const struct sctp_endpoint *ep, 6252 const struct sctp_association *asoc, 6253 const union sctp_subtype type, 6254 void *arg, struct sctp_cmd_seq *commands) 6255 { 6256 return SCTP_DISPOSITION_BUG; 6257 } 6258 6259 /* 6260 * This table entry represents the firing of a timer in the wrong state. 6261 * Since timer deletion cannot be guaranteed a timer 'may' end up firing 6262 * when the association is in the wrong state. This event should 6263 * be ignored, so as to prevent any rearming of the timer. 6264 * 6265 * Inputs 6266 * (endpoint, asoc, chunk) 6267 * 6268 * The return value is the disposition of the chunk. 6269 */ 6270 enum sctp_disposition sctp_sf_timer_ignore(struct net *net, 6271 const struct sctp_endpoint *ep, 6272 const struct sctp_association *asoc, 6273 const union sctp_subtype type, 6274 void *arg, 6275 struct sctp_cmd_seq *commands) 6276 { 6277 pr_debug("%s: timer %d ignored\n", __func__, type.chunk); 6278 6279 return SCTP_DISPOSITION_CONSUME; 6280 } 6281 6282 /******************************************************************** 6283 * 2nd Level Abstractions 6284 ********************************************************************/ 6285 6286 /* Pull the SACK chunk based on the SACK header. */ 6287 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk) 6288 { 6289 struct sctp_sackhdr *sack; 6290 __u16 num_dup_tsns; 6291 unsigned int len; 6292 __u16 num_blocks; 6293 6294 /* Protect ourselves from reading too far into 6295 * the skb from a bogus sender. 6296 */ 6297 sack = (struct sctp_sackhdr *) chunk->skb->data; 6298 6299 num_blocks = ntohs(sack->num_gap_ack_blocks); 6300 num_dup_tsns = ntohs(sack->num_dup_tsns); 6301 len = sizeof(struct sctp_sackhdr); 6302 len += (num_blocks + num_dup_tsns) * sizeof(__u32); 6303 if (len > chunk->skb->len) 6304 return NULL; 6305 6306 skb_pull(chunk->skb, len); 6307 6308 return sack; 6309 } 6310 6311 /* Create an ABORT packet to be sent as a response, with the specified 6312 * error causes. 6313 */ 6314 static struct sctp_packet *sctp_abort_pkt_new( 6315 struct net *net, 6316 const struct sctp_endpoint *ep, 6317 const struct sctp_association *asoc, 6318 struct sctp_chunk *chunk, 6319 const void *payload, size_t paylen) 6320 { 6321 struct sctp_packet *packet; 6322 struct sctp_chunk *abort; 6323 6324 packet = sctp_ootb_pkt_new(net, asoc, chunk); 6325 6326 if (packet) { 6327 /* Make an ABORT. 6328 * The T bit will be set if the asoc is NULL. 6329 */ 6330 abort = sctp_make_abort(asoc, chunk, paylen); 6331 if (!abort) { 6332 sctp_ootb_pkt_free(packet); 6333 return NULL; 6334 } 6335 6336 /* Reflect vtag if T-Bit is set */ 6337 if (sctp_test_T_bit(abort)) 6338 packet->vtag = ntohl(chunk->sctp_hdr->vtag); 6339 6340 /* Add specified error causes, i.e., payload, to the 6341 * end of the chunk. 6342 */ 6343 sctp_addto_chunk(abort, paylen, payload); 6344 6345 /* Set the skb to the belonging sock for accounting. */ 6346 abort->skb->sk = ep->base.sk; 6347 6348 sctp_packet_append_chunk(packet, abort); 6349 6350 } 6351 6352 return packet; 6353 } 6354 6355 /* Allocate a packet for responding in the OOTB conditions. */ 6356 static struct sctp_packet *sctp_ootb_pkt_new( 6357 struct net *net, 6358 const struct sctp_association *asoc, 6359 const struct sctp_chunk *chunk) 6360 { 6361 struct sctp_transport *transport; 6362 struct sctp_packet *packet; 6363 __u16 sport, dport; 6364 __u32 vtag; 6365 6366 /* Get the source and destination port from the inbound packet. */ 6367 sport = ntohs(chunk->sctp_hdr->dest); 6368 dport = ntohs(chunk->sctp_hdr->source); 6369 6370 /* The V-tag is going to be the same as the inbound packet if no 6371 * association exists, otherwise, use the peer's vtag. 6372 */ 6373 if (asoc) { 6374 /* Special case the INIT-ACK as there is no peer's vtag 6375 * yet. 6376 */ 6377 switch (chunk->chunk_hdr->type) { 6378 case SCTP_CID_INIT: 6379 case SCTP_CID_INIT_ACK: 6380 { 6381 struct sctp_initack_chunk *initack; 6382 6383 initack = (struct sctp_initack_chunk *)chunk->chunk_hdr; 6384 vtag = ntohl(initack->init_hdr.init_tag); 6385 break; 6386 } 6387 default: 6388 vtag = asoc->peer.i.init_tag; 6389 break; 6390 } 6391 } else { 6392 /* Special case the INIT and stale COOKIE_ECHO as there is no 6393 * vtag yet. 6394 */ 6395 switch (chunk->chunk_hdr->type) { 6396 case SCTP_CID_INIT: 6397 { 6398 struct sctp_init_chunk *init; 6399 6400 init = (struct sctp_init_chunk *)chunk->chunk_hdr; 6401 vtag = ntohl(init->init_hdr.init_tag); 6402 break; 6403 } 6404 default: 6405 vtag = ntohl(chunk->sctp_hdr->vtag); 6406 break; 6407 } 6408 } 6409 6410 /* Make a transport for the bucket, Eliza... */ 6411 transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC); 6412 if (!transport) 6413 goto nomem; 6414 6415 transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port; 6416 6417 /* Cache a route for the transport with the chunk's destination as 6418 * the source address. 6419 */ 6420 sctp_transport_route(transport, (union sctp_addr *)&chunk->dest, 6421 sctp_sk(net->sctp.ctl_sock)); 6422 6423 packet = &transport->packet; 6424 sctp_packet_init(packet, transport, sport, dport); 6425 sctp_packet_config(packet, vtag, 0); 6426 6427 return packet; 6428 6429 nomem: 6430 return NULL; 6431 } 6432 6433 /* Free the packet allocated earlier for responding in the OOTB condition. */ 6434 void sctp_ootb_pkt_free(struct sctp_packet *packet) 6435 { 6436 sctp_transport_free(packet->transport); 6437 } 6438 6439 /* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */ 6440 static void sctp_send_stale_cookie_err(struct net *net, 6441 const struct sctp_endpoint *ep, 6442 const struct sctp_association *asoc, 6443 const struct sctp_chunk *chunk, 6444 struct sctp_cmd_seq *commands, 6445 struct sctp_chunk *err_chunk) 6446 { 6447 struct sctp_packet *packet; 6448 6449 if (err_chunk) { 6450 packet = sctp_ootb_pkt_new(net, asoc, chunk); 6451 if (packet) { 6452 struct sctp_signed_cookie *cookie; 6453 6454 /* Override the OOTB vtag from the cookie. */ 6455 cookie = chunk->subh.cookie_hdr; 6456 packet->vtag = cookie->c.peer_vtag; 6457 6458 /* Set the skb to the belonging sock for accounting. */ 6459 err_chunk->skb->sk = ep->base.sk; 6460 sctp_packet_append_chunk(packet, err_chunk); 6461 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 6462 SCTP_PACKET(packet)); 6463 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 6464 } else 6465 sctp_chunk_free (err_chunk); 6466 } 6467 } 6468 6469 6470 /* Process a data chunk */ 6471 static int sctp_eat_data(const struct sctp_association *asoc, 6472 struct sctp_chunk *chunk, 6473 struct sctp_cmd_seq *commands) 6474 { 6475 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; 6476 struct sock *sk = asoc->base.sk; 6477 struct net *net = sock_net(sk); 6478 struct sctp_datahdr *data_hdr; 6479 struct sctp_chunk *err; 6480 enum sctp_verb deliver; 6481 size_t datalen; 6482 __u32 tsn; 6483 int tmp; 6484 6485 data_hdr = (struct sctp_datahdr *)chunk->skb->data; 6486 chunk->subh.data_hdr = data_hdr; 6487 skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream)); 6488 6489 tsn = ntohl(data_hdr->tsn); 6490 pr_debug("%s: TSN 0x%x\n", __func__, tsn); 6491 6492 /* ASSERT: Now skb->data is really the user data. */ 6493 6494 /* Process ECN based congestion. 6495 * 6496 * Since the chunk structure is reused for all chunks within 6497 * a packet, we use ecn_ce_done to track if we've already 6498 * done CE processing for this packet. 6499 * 6500 * We need to do ECN processing even if we plan to discard the 6501 * chunk later. 6502 */ 6503 6504 if (asoc->peer.ecn_capable && !chunk->ecn_ce_done) { 6505 struct sctp_af *af = SCTP_INPUT_CB(chunk->skb)->af; 6506 chunk->ecn_ce_done = 1; 6507 6508 if (af->is_ce(sctp_gso_headskb(chunk->skb))) { 6509 /* Do real work as side effect. */ 6510 sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE, 6511 SCTP_U32(tsn)); 6512 } 6513 } 6514 6515 tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn); 6516 if (tmp < 0) { 6517 /* The TSN is too high--silently discard the chunk and 6518 * count on it getting retransmitted later. 6519 */ 6520 if (chunk->asoc) 6521 chunk->asoc->stats.outofseqtsns++; 6522 return SCTP_IERROR_HIGH_TSN; 6523 } else if (tmp > 0) { 6524 /* This is a duplicate. Record it. */ 6525 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn)); 6526 return SCTP_IERROR_DUP_TSN; 6527 } 6528 6529 /* This is a new TSN. */ 6530 6531 /* Discard if there is no room in the receive window. 6532 * Actually, allow a little bit of overflow (up to a MTU). 6533 */ 6534 datalen = ntohs(chunk->chunk_hdr->length); 6535 datalen -= sctp_datachk_len(&asoc->stream); 6536 6537 deliver = SCTP_CMD_CHUNK_ULP; 6538 6539 /* Think about partial delivery. */ 6540 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { 6541 6542 /* Even if we don't accept this chunk there is 6543 * memory pressure. 6544 */ 6545 sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL()); 6546 } 6547 6548 /* Spill over rwnd a little bit. Note: While allowed, this spill over 6549 * seems a bit troublesome in that frag_point varies based on 6550 * PMTU. In cases, such as loopback, this might be a rather 6551 * large spill over. 6552 */ 6553 if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || 6554 (datalen > asoc->rwnd + asoc->frag_point))) { 6555 6556 /* If this is the next TSN, consider reneging to make 6557 * room. Note: Playing nice with a confused sender. A 6558 * malicious sender can still eat up all our buffer 6559 * space and in the future we may want to detect and 6560 * do more drastic reneging. 6561 */ 6562 if (sctp_tsnmap_has_gap(map) && 6563 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { 6564 pr_debug("%s: reneging for tsn:%u\n", __func__, tsn); 6565 deliver = SCTP_CMD_RENEGE; 6566 } else { 6567 pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n", 6568 __func__, tsn, datalen, asoc->rwnd); 6569 6570 return SCTP_IERROR_IGNORE_TSN; 6571 } 6572 } 6573 6574 /* 6575 * Also try to renege to limit our memory usage in the event that 6576 * we are under memory pressure 6577 * If we can't renege, don't worry about it, the sk_rmem_schedule 6578 * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our 6579 * memory usage too much 6580 */ 6581 if (sk_under_memory_pressure(sk)) { 6582 if (sctp_tsnmap_has_gap(map) && 6583 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { 6584 pr_debug("%s: under pressure, reneging for tsn:%u\n", 6585 __func__, tsn); 6586 deliver = SCTP_CMD_RENEGE; 6587 } else { 6588 sk_mem_reclaim(sk); 6589 } 6590 } 6591 6592 /* 6593 * Section 3.3.10.9 No User Data (9) 6594 * 6595 * Cause of error 6596 * --------------- 6597 * No User Data: This error cause is returned to the originator of a 6598 * DATA chunk if a received DATA chunk has no user data. 6599 */ 6600 if (unlikely(0 == datalen)) { 6601 err = sctp_make_abort_no_data(asoc, chunk, tsn); 6602 if (err) { 6603 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 6604 SCTP_CHUNK(err)); 6605 } 6606 /* We are going to ABORT, so we might as well stop 6607 * processing the rest of the chunks in the packet. 6608 */ 6609 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 6610 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 6611 SCTP_ERROR(ECONNABORTED)); 6612 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 6613 SCTP_PERR(SCTP_ERROR_NO_DATA)); 6614 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 6615 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 6616 return SCTP_IERROR_NO_DATA; 6617 } 6618 6619 chunk->data_accepted = 1; 6620 6621 /* Note: Some chunks may get overcounted (if we drop) or overcounted 6622 * if we renege and the chunk arrives again. 6623 */ 6624 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 6625 SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS); 6626 if (chunk->asoc) 6627 chunk->asoc->stats.iuodchunks++; 6628 } else { 6629 SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS); 6630 if (chunk->asoc) 6631 chunk->asoc->stats.iodchunks++; 6632 } 6633 6634 /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number 6635 * 6636 * If an endpoint receive a DATA chunk with an invalid stream 6637 * identifier, it shall acknowledge the reception of the DATA chunk 6638 * following the normal procedure, immediately send an ERROR chunk 6639 * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) 6640 * and discard the DATA chunk. 6641 */ 6642 if (ntohs(data_hdr->stream) >= asoc->stream.incnt) { 6643 /* Mark tsn as received even though we drop it */ 6644 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 6645 6646 err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, 6647 &data_hdr->stream, 6648 sizeof(data_hdr->stream), 6649 sizeof(u16)); 6650 if (err) 6651 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 6652 SCTP_CHUNK(err)); 6653 return SCTP_IERROR_BAD_STREAM; 6654 } 6655 6656 /* Check to see if the SSN is possible for this TSN. 6657 * The biggest gap we can record is 4K wide. Since SSNs wrap 6658 * at an unsigned short, there is no way that an SSN can 6659 * wrap and for a valid TSN. We can simply check if the current 6660 * SSN is smaller then the next expected one. If it is, it wrapped 6661 * and is invalid. 6662 */ 6663 if (!asoc->stream.si->validate_data(chunk)) 6664 return SCTP_IERROR_PROTO_VIOLATION; 6665 6666 /* Send the data up to the user. Note: Schedule the 6667 * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK 6668 * chunk needs the updated rwnd. 6669 */ 6670 sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk)); 6671 6672 return SCTP_IERROR_NO_ERROR; 6673 } 6674