1 /* SCTP kernel implementation 2 * (C) Copyright Red Hat Inc. 2017 3 * 4 * This file is part of the SCTP kernel implementation 5 * 6 * These functions implement sctp stream message interleaving, mostly 7 * including I-DATA and I-FORWARD-TSN chunks process. 8 * 9 * This SCTP implementation is free software; 10 * you can redistribute it and/or modify it under the terms of 11 * the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This SCTP implementation is distributed in the hope that it 16 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 17 * ************************ 18 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 19 * See the GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with GNU CC; see the file COPYING. If not, see 23 * <http://www.gnu.org/licenses/>. 24 * 25 * Please send any bug reports or fixes you make to the 26 * email addresched(es): 27 * lksctp developers <linux-sctp@vger.kernel.org> 28 * 29 * Written or modified by: 30 * Xin Long <lucien.xin@gmail.com> 31 */ 32 33 #include <net/busy_poll.h> 34 #include <net/sctp/sctp.h> 35 #include <net/sctp/sm.h> 36 #include <net/sctp/ulpevent.h> 37 #include <linux/sctp.h> 38 39 static struct sctp_chunk *sctp_make_idatafrag_empty( 40 const struct sctp_association *asoc, 41 const struct sctp_sndrcvinfo *sinfo, 42 int len, __u8 flags, gfp_t gfp) 43 { 44 struct sctp_chunk *retval; 45 struct sctp_idatahdr dp; 46 47 memset(&dp, 0, sizeof(dp)); 48 dp.stream = htons(sinfo->sinfo_stream); 49 50 if (sinfo->sinfo_flags & SCTP_UNORDERED) 51 flags |= SCTP_DATA_UNORDERED; 52 53 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp); 54 if (!retval) 55 return NULL; 56 57 retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); 58 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); 59 60 return retval; 61 } 62 63 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk) 64 { 65 struct sctp_stream *stream; 66 struct sctp_chunk *lchunk; 67 __u32 cfsn = 0; 68 __u16 sid; 69 70 if (chunk->has_mid) 71 return; 72 73 sid = sctp_chunk_stream_no(chunk); 74 stream = &chunk->asoc->stream; 75 76 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) { 77 struct sctp_idatahdr *hdr; 78 __u32 mid; 79 80 lchunk->has_mid = 1; 81 82 hdr = lchunk->subh.idata_hdr; 83 84 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) 85 hdr->ppid = lchunk->sinfo.sinfo_ppid; 86 else 87 hdr->fsn = htonl(cfsn++); 88 89 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 90 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ? 91 sctp_mid_uo_next(stream, out, sid) : 92 sctp_mid_uo_peek(stream, out, sid); 93 } else { 94 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ? 95 sctp_mid_next(stream, out, sid) : 96 sctp_mid_peek(stream, out, sid); 97 } 98 hdr->mid = htonl(mid); 99 } 100 } 101 102 static bool sctp_validate_data(struct sctp_chunk *chunk) 103 { 104 const struct sctp_stream *stream; 105 __u16 sid, ssn; 106 107 if (chunk->chunk_hdr->type != SCTP_CID_DATA) 108 return false; 109 110 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 111 return true; 112 113 stream = &chunk->asoc->stream; 114 sid = sctp_chunk_stream_no(chunk); 115 ssn = ntohs(chunk->subh.data_hdr->ssn); 116 117 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)); 118 } 119 120 static bool sctp_validate_idata(struct sctp_chunk *chunk) 121 { 122 struct sctp_stream *stream; 123 __u32 mid; 124 __u16 sid; 125 126 if (chunk->chunk_hdr->type != SCTP_CID_I_DATA) 127 return false; 128 129 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 130 return true; 131 132 stream = &chunk->asoc->stream; 133 sid = sctp_chunk_stream_no(chunk); 134 mid = ntohl(chunk->subh.idata_hdr->mid); 135 136 return !MID_lt(mid, sctp_mid_peek(stream, in, sid)); 137 } 138 139 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq, 140 struct sctp_ulpevent *event) 141 { 142 struct sctp_ulpevent *cevent; 143 struct sk_buff *pos; 144 145 pos = skb_peek_tail(&ulpq->reasm); 146 if (!pos) { 147 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); 148 return; 149 } 150 151 cevent = sctp_skb2event(pos); 152 153 if (event->stream == cevent->stream && 154 event->mid == cevent->mid && 155 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG || 156 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && 157 event->fsn > cevent->fsn))) { 158 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); 159 return; 160 } 161 162 if ((event->stream == cevent->stream && 163 MID_lt(cevent->mid, event->mid)) || 164 event->stream > cevent->stream) { 165 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); 166 return; 167 } 168 169 skb_queue_walk(&ulpq->reasm, pos) { 170 cevent = sctp_skb2event(pos); 171 172 if (event->stream < cevent->stream || 173 (event->stream == cevent->stream && 174 MID_lt(event->mid, cevent->mid))) 175 break; 176 177 if (event->stream == cevent->stream && 178 event->mid == cevent->mid && 179 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) && 180 (event->msg_flags & SCTP_DATA_FIRST_FRAG || 181 event->fsn < cevent->fsn)) 182 break; 183 } 184 185 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); 186 } 187 188 static struct sctp_ulpevent *sctp_intl_retrieve_partial( 189 struct sctp_ulpq *ulpq, 190 struct sctp_ulpevent *event) 191 { 192 struct sk_buff *first_frag = NULL; 193 struct sk_buff *last_frag = NULL; 194 struct sctp_ulpevent *retval; 195 struct sctp_stream_in *sin; 196 struct sk_buff *pos; 197 __u32 next_fsn = 0; 198 int is_last = 0; 199 200 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 201 202 skb_queue_walk(&ulpq->reasm, pos) { 203 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 204 205 if (cevent->stream < event->stream) 206 continue; 207 208 if (cevent->stream > event->stream || 209 cevent->mid != sin->mid) 210 break; 211 212 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 213 case SCTP_DATA_FIRST_FRAG: 214 goto out; 215 case SCTP_DATA_MIDDLE_FRAG: 216 if (!first_frag) { 217 if (cevent->fsn == sin->fsn) { 218 first_frag = pos; 219 last_frag = pos; 220 next_fsn = cevent->fsn + 1; 221 } 222 } else if (cevent->fsn == next_fsn) { 223 last_frag = pos; 224 next_fsn++; 225 } else { 226 goto out; 227 } 228 break; 229 case SCTP_DATA_LAST_FRAG: 230 if (!first_frag) { 231 if (cevent->fsn == sin->fsn) { 232 first_frag = pos; 233 last_frag = pos; 234 next_fsn = 0; 235 is_last = 1; 236 } 237 } else if (cevent->fsn == next_fsn) { 238 last_frag = pos; 239 next_fsn = 0; 240 is_last = 1; 241 } 242 goto out; 243 default: 244 goto out; 245 } 246 } 247 248 out: 249 if (!first_frag) 250 return NULL; 251 252 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 253 &ulpq->reasm, first_frag, 254 last_frag); 255 if (retval) { 256 sin->fsn = next_fsn; 257 if (is_last) { 258 retval->msg_flags |= MSG_EOR; 259 sin->pd_mode = 0; 260 } 261 } 262 263 return retval; 264 } 265 266 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled( 267 struct sctp_ulpq *ulpq, 268 struct sctp_ulpevent *event) 269 { 270 struct sctp_association *asoc = ulpq->asoc; 271 struct sk_buff *pos, *first_frag = NULL; 272 struct sctp_ulpevent *retval = NULL; 273 struct sk_buff *pd_first = NULL; 274 struct sk_buff *pd_last = NULL; 275 struct sctp_stream_in *sin; 276 __u32 next_fsn = 0; 277 __u32 pd_point = 0; 278 __u32 pd_len = 0; 279 __u32 mid = 0; 280 281 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 282 283 skb_queue_walk(&ulpq->reasm, pos) { 284 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 285 286 if (cevent->stream < event->stream) 287 continue; 288 if (cevent->stream > event->stream) 289 break; 290 291 if (MID_lt(cevent->mid, event->mid)) 292 continue; 293 if (MID_lt(event->mid, cevent->mid)) 294 break; 295 296 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 297 case SCTP_DATA_FIRST_FRAG: 298 if (cevent->mid == sin->mid) { 299 pd_first = pos; 300 pd_last = pos; 301 pd_len = pos->len; 302 } 303 304 first_frag = pos; 305 next_fsn = 0; 306 mid = cevent->mid; 307 break; 308 309 case SCTP_DATA_MIDDLE_FRAG: 310 if (first_frag && cevent->mid == mid && 311 cevent->fsn == next_fsn) { 312 next_fsn++; 313 if (pd_first) { 314 pd_last = pos; 315 pd_len += pos->len; 316 } 317 } else { 318 first_frag = NULL; 319 } 320 break; 321 322 case SCTP_DATA_LAST_FRAG: 323 if (first_frag && cevent->mid == mid && 324 cevent->fsn == next_fsn) 325 goto found; 326 else 327 first_frag = NULL; 328 break; 329 } 330 } 331 332 if (!pd_first) 333 goto out; 334 335 pd_point = sctp_sk(asoc->base.sk)->pd_point; 336 if (pd_point && pd_point <= pd_len) { 337 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 338 &ulpq->reasm, 339 pd_first, pd_last); 340 if (retval) { 341 sin->fsn = next_fsn; 342 sin->pd_mode = 1; 343 } 344 } 345 goto out; 346 347 found: 348 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 349 &ulpq->reasm, 350 first_frag, pos); 351 if (retval) 352 retval->msg_flags |= MSG_EOR; 353 354 out: 355 return retval; 356 } 357 358 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq, 359 struct sctp_ulpevent *event) 360 { 361 struct sctp_ulpevent *retval = NULL; 362 struct sctp_stream_in *sin; 363 364 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { 365 event->msg_flags |= MSG_EOR; 366 return event; 367 } 368 369 sctp_intl_store_reasm(ulpq, event); 370 371 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 372 if (sin->pd_mode && event->mid == sin->mid && 373 event->fsn == sin->fsn) 374 retval = sctp_intl_retrieve_partial(ulpq, event); 375 376 if (!retval) 377 retval = sctp_intl_retrieve_reassembled(ulpq, event); 378 379 return retval; 380 } 381 382 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq, 383 struct sctp_ulpevent *event) 384 { 385 struct sctp_ulpevent *cevent; 386 struct sk_buff *pos; 387 388 pos = skb_peek_tail(&ulpq->lobby); 389 if (!pos) { 390 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); 391 return; 392 } 393 394 cevent = (struct sctp_ulpevent *)pos->cb; 395 if (event->stream == cevent->stream && 396 MID_lt(cevent->mid, event->mid)) { 397 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); 398 return; 399 } 400 401 if (event->stream > cevent->stream) { 402 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); 403 return; 404 } 405 406 skb_queue_walk(&ulpq->lobby, pos) { 407 cevent = (struct sctp_ulpevent *)pos->cb; 408 409 if (cevent->stream > event->stream) 410 break; 411 412 if (cevent->stream == event->stream && 413 MID_lt(event->mid, cevent->mid)) 414 break; 415 } 416 417 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); 418 } 419 420 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq, 421 struct sctp_ulpevent *event) 422 { 423 struct sk_buff_head *event_list; 424 struct sctp_stream *stream; 425 struct sk_buff *pos, *tmp; 426 __u16 sid = event->stream; 427 428 stream = &ulpq->asoc->stream; 429 event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev; 430 431 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 432 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb; 433 434 if (cevent->stream > sid) 435 break; 436 437 if (cevent->stream < sid) 438 continue; 439 440 if (cevent->mid != sctp_mid_peek(stream, in, sid)) 441 break; 442 443 sctp_mid_next(stream, in, sid); 444 445 __skb_unlink(pos, &ulpq->lobby); 446 447 __skb_queue_tail(event_list, pos); 448 } 449 } 450 451 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq, 452 struct sctp_ulpevent *event) 453 { 454 struct sctp_stream *stream; 455 __u16 sid; 456 457 stream = &ulpq->asoc->stream; 458 sid = event->stream; 459 460 if (event->mid != sctp_mid_peek(stream, in, sid)) { 461 sctp_intl_store_ordered(ulpq, event); 462 return NULL; 463 } 464 465 sctp_mid_next(stream, in, sid); 466 467 sctp_intl_retrieve_ordered(ulpq, event); 468 469 return event; 470 } 471 472 static int sctp_enqueue_event(struct sctp_ulpq *ulpq, 473 struct sctp_ulpevent *event) 474 { 475 struct sk_buff *skb = sctp_event2skb(event); 476 struct sock *sk = ulpq->asoc->base.sk; 477 struct sctp_sock *sp = sctp_sk(sk); 478 struct sk_buff_head *skb_list; 479 480 skb_list = (struct sk_buff_head *)skb->prev; 481 482 if (sk->sk_shutdown & RCV_SHUTDOWN && 483 (sk->sk_shutdown & SEND_SHUTDOWN || 484 !sctp_ulpevent_is_notification(event))) 485 goto out_free; 486 487 if (!sctp_ulpevent_is_notification(event)) { 488 sk_mark_napi_id(sk, skb); 489 sk_incoming_cpu_update(sk); 490 } 491 492 if (!sctp_ulpevent_is_enabled(event, &sp->subscribe)) 493 goto out_free; 494 495 if (skb_list) 496 skb_queue_splice_tail_init(skb_list, 497 &sk->sk_receive_queue); 498 else 499 __skb_queue_tail(&sk->sk_receive_queue, skb); 500 501 if (!sp->data_ready_signalled) { 502 sp->data_ready_signalled = 1; 503 sk->sk_data_ready(sk); 504 } 505 506 return 1; 507 508 out_free: 509 if (skb_list) 510 sctp_queue_purge_ulpevents(skb_list); 511 else 512 sctp_ulpevent_free(event); 513 514 return 0; 515 } 516 517 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq, 518 struct sctp_ulpevent *event) 519 { 520 struct sctp_ulpevent *cevent; 521 struct sk_buff *pos; 522 523 pos = skb_peek_tail(&ulpq->reasm_uo); 524 if (!pos) { 525 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); 526 return; 527 } 528 529 cevent = sctp_skb2event(pos); 530 531 if (event->stream == cevent->stream && 532 event->mid == cevent->mid && 533 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG || 534 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && 535 event->fsn > cevent->fsn))) { 536 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); 537 return; 538 } 539 540 if ((event->stream == cevent->stream && 541 MID_lt(cevent->mid, event->mid)) || 542 event->stream > cevent->stream) { 543 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); 544 return; 545 } 546 547 skb_queue_walk(&ulpq->reasm_uo, pos) { 548 cevent = sctp_skb2event(pos); 549 550 if (event->stream < cevent->stream || 551 (event->stream == cevent->stream && 552 MID_lt(event->mid, cevent->mid))) 553 break; 554 555 if (event->stream == cevent->stream && 556 event->mid == cevent->mid && 557 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) && 558 (event->msg_flags & SCTP_DATA_FIRST_FRAG || 559 event->fsn < cevent->fsn)) 560 break; 561 } 562 563 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event)); 564 } 565 566 static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo( 567 struct sctp_ulpq *ulpq, 568 struct sctp_ulpevent *event) 569 { 570 struct sk_buff *first_frag = NULL; 571 struct sk_buff *last_frag = NULL; 572 struct sctp_ulpevent *retval; 573 struct sctp_stream_in *sin; 574 struct sk_buff *pos; 575 __u32 next_fsn = 0; 576 int is_last = 0; 577 578 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 579 580 skb_queue_walk(&ulpq->reasm_uo, pos) { 581 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 582 583 if (cevent->stream < event->stream) 584 continue; 585 if (cevent->stream > event->stream) 586 break; 587 588 if (MID_lt(cevent->mid, sin->mid_uo)) 589 continue; 590 if (MID_lt(sin->mid_uo, cevent->mid)) 591 break; 592 593 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 594 case SCTP_DATA_FIRST_FRAG: 595 goto out; 596 case SCTP_DATA_MIDDLE_FRAG: 597 if (!first_frag) { 598 if (cevent->fsn == sin->fsn_uo) { 599 first_frag = pos; 600 last_frag = pos; 601 next_fsn = cevent->fsn + 1; 602 } 603 } else if (cevent->fsn == next_fsn) { 604 last_frag = pos; 605 next_fsn++; 606 } else { 607 goto out; 608 } 609 break; 610 case SCTP_DATA_LAST_FRAG: 611 if (!first_frag) { 612 if (cevent->fsn == sin->fsn_uo) { 613 first_frag = pos; 614 last_frag = pos; 615 next_fsn = 0; 616 is_last = 1; 617 } 618 } else if (cevent->fsn == next_fsn) { 619 last_frag = pos; 620 next_fsn = 0; 621 is_last = 1; 622 } 623 goto out; 624 default: 625 goto out; 626 } 627 } 628 629 out: 630 if (!first_frag) 631 return NULL; 632 633 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 634 &ulpq->reasm_uo, first_frag, 635 last_frag); 636 if (retval) { 637 sin->fsn_uo = next_fsn; 638 if (is_last) { 639 retval->msg_flags |= MSG_EOR; 640 sin->pd_mode_uo = 0; 641 } 642 } 643 644 return retval; 645 } 646 647 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo( 648 struct sctp_ulpq *ulpq, 649 struct sctp_ulpevent *event) 650 { 651 struct sctp_association *asoc = ulpq->asoc; 652 struct sk_buff *pos, *first_frag = NULL; 653 struct sctp_ulpevent *retval = NULL; 654 struct sk_buff *pd_first = NULL; 655 struct sk_buff *pd_last = NULL; 656 struct sctp_stream_in *sin; 657 __u32 next_fsn = 0; 658 __u32 pd_point = 0; 659 __u32 pd_len = 0; 660 __u32 mid = 0; 661 662 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 663 664 skb_queue_walk(&ulpq->reasm_uo, pos) { 665 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 666 667 if (cevent->stream < event->stream) 668 continue; 669 if (cevent->stream > event->stream) 670 break; 671 672 if (MID_lt(cevent->mid, event->mid)) 673 continue; 674 if (MID_lt(event->mid, cevent->mid)) 675 break; 676 677 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 678 case SCTP_DATA_FIRST_FRAG: 679 if (!sin->pd_mode_uo) { 680 sin->mid_uo = cevent->mid; 681 pd_first = pos; 682 pd_last = pos; 683 pd_len = pos->len; 684 } 685 686 first_frag = pos; 687 next_fsn = 0; 688 mid = cevent->mid; 689 break; 690 691 case SCTP_DATA_MIDDLE_FRAG: 692 if (first_frag && cevent->mid == mid && 693 cevent->fsn == next_fsn) { 694 next_fsn++; 695 if (pd_first) { 696 pd_last = pos; 697 pd_len += pos->len; 698 } 699 } else { 700 first_frag = NULL; 701 } 702 break; 703 704 case SCTP_DATA_LAST_FRAG: 705 if (first_frag && cevent->mid == mid && 706 cevent->fsn == next_fsn) 707 goto found; 708 else 709 first_frag = NULL; 710 break; 711 } 712 } 713 714 if (!pd_first) 715 goto out; 716 717 pd_point = sctp_sk(asoc->base.sk)->pd_point; 718 if (pd_point && pd_point <= pd_len) { 719 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 720 &ulpq->reasm_uo, 721 pd_first, pd_last); 722 if (retval) { 723 sin->fsn_uo = next_fsn; 724 sin->pd_mode_uo = 1; 725 } 726 } 727 goto out; 728 729 found: 730 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 731 &ulpq->reasm_uo, 732 first_frag, pos); 733 if (retval) 734 retval->msg_flags |= MSG_EOR; 735 736 out: 737 return retval; 738 } 739 740 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq, 741 struct sctp_ulpevent *event) 742 { 743 struct sctp_ulpevent *retval = NULL; 744 struct sctp_stream_in *sin; 745 746 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { 747 event->msg_flags |= MSG_EOR; 748 return event; 749 } 750 751 sctp_intl_store_reasm_uo(ulpq, event); 752 753 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 754 if (sin->pd_mode_uo && event->mid == sin->mid_uo && 755 event->fsn == sin->fsn_uo) 756 retval = sctp_intl_retrieve_partial_uo(ulpq, event); 757 758 if (!retval) 759 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event); 760 761 return retval; 762 } 763 764 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq) 765 { 766 struct sctp_stream_in *csin, *sin = NULL; 767 struct sk_buff *first_frag = NULL; 768 struct sk_buff *last_frag = NULL; 769 struct sctp_ulpevent *retval; 770 struct sk_buff *pos; 771 __u32 next_fsn = 0; 772 __u16 sid = 0; 773 774 skb_queue_walk(&ulpq->reasm_uo, pos) { 775 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 776 777 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream); 778 if (csin->pd_mode_uo) 779 continue; 780 781 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 782 case SCTP_DATA_FIRST_FRAG: 783 if (first_frag) 784 goto out; 785 first_frag = pos; 786 last_frag = pos; 787 next_fsn = 0; 788 sin = csin; 789 sid = cevent->stream; 790 sin->mid_uo = cevent->mid; 791 break; 792 case SCTP_DATA_MIDDLE_FRAG: 793 if (!first_frag) 794 break; 795 if (cevent->stream == sid && 796 cevent->mid == sin->mid_uo && 797 cevent->fsn == next_fsn) { 798 next_fsn++; 799 last_frag = pos; 800 } else { 801 goto out; 802 } 803 break; 804 case SCTP_DATA_LAST_FRAG: 805 if (first_frag) 806 goto out; 807 break; 808 default: 809 break; 810 } 811 } 812 813 if (!first_frag) 814 return NULL; 815 816 out: 817 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 818 &ulpq->reasm_uo, first_frag, 819 last_frag); 820 if (retval) { 821 sin->fsn_uo = next_fsn; 822 sin->pd_mode_uo = 1; 823 } 824 825 return retval; 826 } 827 828 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq, 829 struct sctp_chunk *chunk, gfp_t gfp) 830 { 831 struct sctp_ulpevent *event; 832 struct sk_buff_head temp; 833 int event_eor = 0; 834 835 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); 836 if (!event) 837 return -ENOMEM; 838 839 event->mid = ntohl(chunk->subh.idata_hdr->mid); 840 if (event->msg_flags & SCTP_DATA_FIRST_FRAG) 841 event->ppid = chunk->subh.idata_hdr->ppid; 842 else 843 event->fsn = ntohl(chunk->subh.idata_hdr->fsn); 844 845 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) { 846 event = sctp_intl_reasm(ulpq, event); 847 if (event && event->msg_flags & MSG_EOR) { 848 skb_queue_head_init(&temp); 849 __skb_queue_tail(&temp, sctp_event2skb(event)); 850 851 event = sctp_intl_order(ulpq, event); 852 } 853 } else { 854 event = sctp_intl_reasm_uo(ulpq, event); 855 } 856 857 if (event) { 858 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; 859 sctp_enqueue_event(ulpq, event); 860 } 861 862 return event_eor; 863 } 864 865 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq) 866 { 867 struct sctp_stream_in *csin, *sin = NULL; 868 struct sk_buff *first_frag = NULL; 869 struct sk_buff *last_frag = NULL; 870 struct sctp_ulpevent *retval; 871 struct sk_buff *pos; 872 __u32 next_fsn = 0; 873 __u16 sid = 0; 874 875 skb_queue_walk(&ulpq->reasm, pos) { 876 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 877 878 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream); 879 if (csin->pd_mode) 880 continue; 881 882 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 883 case SCTP_DATA_FIRST_FRAG: 884 if (first_frag) 885 goto out; 886 if (cevent->mid == csin->mid) { 887 first_frag = pos; 888 last_frag = pos; 889 next_fsn = 0; 890 sin = csin; 891 sid = cevent->stream; 892 } 893 break; 894 case SCTP_DATA_MIDDLE_FRAG: 895 if (!first_frag) 896 break; 897 if (cevent->stream == sid && 898 cevent->mid == sin->mid && 899 cevent->fsn == next_fsn) { 900 next_fsn++; 901 last_frag = pos; 902 } else { 903 goto out; 904 } 905 break; 906 case SCTP_DATA_LAST_FRAG: 907 if (first_frag) 908 goto out; 909 break; 910 default: 911 break; 912 } 913 } 914 915 if (!first_frag) 916 return NULL; 917 918 out: 919 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 920 &ulpq->reasm, first_frag, 921 last_frag); 922 if (retval) { 923 sin->fsn = next_fsn; 924 sin->pd_mode = 1; 925 } 926 927 return retval; 928 } 929 930 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp) 931 { 932 struct sctp_ulpevent *event; 933 934 if (!skb_queue_empty(&ulpq->reasm)) { 935 do { 936 event = sctp_intl_retrieve_first(ulpq); 937 if (event) 938 sctp_enqueue_event(ulpq, event); 939 } while (event); 940 } 941 942 if (!skb_queue_empty(&ulpq->reasm_uo)) { 943 do { 944 event = sctp_intl_retrieve_first_uo(ulpq); 945 if (event) 946 sctp_enqueue_event(ulpq, event); 947 } while (event); 948 } 949 } 950 951 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 952 gfp_t gfp) 953 { 954 struct sctp_association *asoc = ulpq->asoc; 955 __u32 freed = 0; 956 __u16 needed; 957 958 needed = ntohs(chunk->chunk_hdr->length) - 959 sizeof(struct sctp_idata_chunk); 960 961 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { 962 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); 963 if (freed < needed) 964 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm, 965 needed); 966 if (freed < needed) 967 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo, 968 needed); 969 } 970 971 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) 972 sctp_intl_start_pd(ulpq, gfp); 973 974 sk_mem_reclaim(asoc->base.sk); 975 } 976 977 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid, 978 __u32 mid, __u16 flags, gfp_t gfp) 979 { 980 struct sock *sk = ulpq->asoc->base.sk; 981 struct sctp_ulpevent *ev = NULL; 982 983 if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT, 984 &sctp_sk(sk)->subscribe)) 985 return; 986 987 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED, 988 sid, mid, flags, gfp); 989 if (ev) { 990 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); 991 992 if (!sctp_sk(sk)->data_ready_signalled) { 993 sctp_sk(sk)->data_ready_signalled = 1; 994 sk->sk_data_ready(sk); 995 } 996 } 997 } 998 999 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) 1000 { 1001 struct sctp_stream *stream = &ulpq->asoc->stream; 1002 struct sctp_ulpevent *cevent, *event = NULL; 1003 struct sk_buff_head *lobby = &ulpq->lobby; 1004 struct sk_buff *pos, *tmp; 1005 struct sk_buff_head temp; 1006 __u16 csid; 1007 __u32 cmid; 1008 1009 skb_queue_head_init(&temp); 1010 sctp_skb_for_each(pos, lobby, tmp) { 1011 cevent = (struct sctp_ulpevent *)pos->cb; 1012 csid = cevent->stream; 1013 cmid = cevent->mid; 1014 1015 if (csid > sid) 1016 break; 1017 1018 if (csid < sid) 1019 continue; 1020 1021 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid))) 1022 break; 1023 1024 __skb_unlink(pos, lobby); 1025 if (!event) 1026 event = sctp_skb2event(pos); 1027 1028 __skb_queue_tail(&temp, pos); 1029 } 1030 1031 if (!event && pos != (struct sk_buff *)lobby) { 1032 cevent = (struct sctp_ulpevent *)pos->cb; 1033 csid = cevent->stream; 1034 cmid = cevent->mid; 1035 1036 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) { 1037 sctp_mid_next(stream, in, csid); 1038 __skb_unlink(pos, lobby); 1039 __skb_queue_tail(&temp, pos); 1040 event = sctp_skb2event(pos); 1041 } 1042 } 1043 1044 if (event) { 1045 sctp_intl_retrieve_ordered(ulpq, event); 1046 sctp_enqueue_event(ulpq, event); 1047 } 1048 } 1049 1050 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) 1051 { 1052 struct sctp_stream *stream = &ulpq->asoc->stream; 1053 __u16 sid; 1054 1055 for (sid = 0; sid < stream->incnt; sid++) { 1056 struct sctp_stream_in *sin = SCTP_SI(stream, sid); 1057 __u32 mid; 1058 1059 if (sin->pd_mode_uo) { 1060 sin->pd_mode_uo = 0; 1061 1062 mid = sin->mid_uo; 1063 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp); 1064 } 1065 1066 if (sin->pd_mode) { 1067 sin->pd_mode = 0; 1068 1069 mid = sin->mid; 1070 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp); 1071 sctp_mid_skip(stream, in, sid, mid); 1072 1073 sctp_intl_reap_ordered(ulpq, sid); 1074 } 1075 } 1076 1077 /* intl abort pd happens only when all data needs to be cleaned */ 1078 sctp_ulpq_flush(ulpq); 1079 } 1080 1081 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist, 1082 int nskips, __be16 stream, __u8 flags) 1083 { 1084 int i; 1085 1086 for (i = 0; i < nskips; i++) 1087 if (skiplist[i].stream == stream && 1088 skiplist[i].flags == flags) 1089 return i; 1090 1091 return i; 1092 } 1093 1094 #define SCTP_FTSN_U_BIT 0x1 1095 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn) 1096 { 1097 struct sctp_ifwdtsn_skip ftsn_skip_arr[10]; 1098 struct sctp_association *asoc = q->asoc; 1099 struct sctp_chunk *ftsn_chunk = NULL; 1100 struct list_head *lchunk, *temp; 1101 int nskips = 0, skip_pos; 1102 struct sctp_chunk *chunk; 1103 __u32 tsn; 1104 1105 if (!asoc->peer.prsctp_capable) 1106 return; 1107 1108 if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) 1109 asoc->adv_peer_ack_point = ctsn; 1110 1111 list_for_each_safe(lchunk, temp, &q->abandoned) { 1112 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); 1113 tsn = ntohl(chunk->subh.data_hdr->tsn); 1114 1115 if (TSN_lte(tsn, ctsn)) { 1116 list_del_init(lchunk); 1117 sctp_chunk_free(chunk); 1118 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) { 1119 __be16 sid = chunk->subh.idata_hdr->stream; 1120 __be32 mid = chunk->subh.idata_hdr->mid; 1121 __u8 flags = 0; 1122 1123 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 1124 flags |= SCTP_FTSN_U_BIT; 1125 1126 asoc->adv_peer_ack_point = tsn; 1127 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips, 1128 sid, flags); 1129 ftsn_skip_arr[skip_pos].stream = sid; 1130 ftsn_skip_arr[skip_pos].reserved = 0; 1131 ftsn_skip_arr[skip_pos].flags = flags; 1132 ftsn_skip_arr[skip_pos].mid = mid; 1133 if (skip_pos == nskips) 1134 nskips++; 1135 if (nskips == 10) 1136 break; 1137 } else { 1138 break; 1139 } 1140 } 1141 1142 if (asoc->adv_peer_ack_point > ctsn) 1143 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point, 1144 nskips, &ftsn_skip_arr[0]); 1145 1146 if (ftsn_chunk) { 1147 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1148 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); 1149 } 1150 } 1151 1152 #define _sctp_walk_ifwdtsn(pos, chunk, end) \ 1153 for (pos = chunk->subh.ifwdtsn_hdr->skip; \ 1154 (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++) 1155 1156 #define sctp_walk_ifwdtsn(pos, ch) \ 1157 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \ 1158 sizeof(struct sctp_ifwdtsn_chunk)) 1159 1160 static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk) 1161 { 1162 struct sctp_fwdtsn_skip *skip; 1163 __u16 incnt; 1164 1165 if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN) 1166 return false; 1167 1168 incnt = chunk->asoc->stream.incnt; 1169 sctp_walk_fwdtsn(skip, chunk) 1170 if (ntohs(skip->stream) >= incnt) 1171 return false; 1172 1173 return true; 1174 } 1175 1176 static bool sctp_validate_iftsn(struct sctp_chunk *chunk) 1177 { 1178 struct sctp_ifwdtsn_skip *skip; 1179 __u16 incnt; 1180 1181 if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN) 1182 return false; 1183 1184 incnt = chunk->asoc->stream.incnt; 1185 sctp_walk_ifwdtsn(skip, chunk) 1186 if (ntohs(skip->stream) >= incnt) 1187 return false; 1188 1189 return true; 1190 } 1191 1192 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1193 { 1194 /* Move the Cumulattive TSN Ack ahead. */ 1195 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn); 1196 /* purge the fragmentation queue */ 1197 sctp_ulpq_reasm_flushtsn(ulpq, ftsn); 1198 /* Abort any in progress partial delivery. */ 1199 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC); 1200 } 1201 1202 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1203 { 1204 struct sk_buff *pos, *tmp; 1205 1206 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { 1207 struct sctp_ulpevent *event = sctp_skb2event(pos); 1208 __u32 tsn = event->tsn; 1209 1210 if (TSN_lte(tsn, ftsn)) { 1211 __skb_unlink(pos, &ulpq->reasm); 1212 sctp_ulpevent_free(event); 1213 } 1214 } 1215 1216 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) { 1217 struct sctp_ulpevent *event = sctp_skb2event(pos); 1218 __u32 tsn = event->tsn; 1219 1220 if (TSN_lte(tsn, ftsn)) { 1221 __skb_unlink(pos, &ulpq->reasm_uo); 1222 sctp_ulpevent_free(event); 1223 } 1224 } 1225 } 1226 1227 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1228 { 1229 /* Move the Cumulattive TSN Ack ahead. */ 1230 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn); 1231 /* purge the fragmentation queue */ 1232 sctp_intl_reasm_flushtsn(ulpq, ftsn); 1233 /* abort only when it's for all data */ 1234 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map)) 1235 sctp_intl_abort_pd(ulpq, GFP_ATOMIC); 1236 } 1237 1238 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) 1239 { 1240 struct sctp_fwdtsn_skip *skip; 1241 1242 /* Walk through all the skipped SSNs */ 1243 sctp_walk_fwdtsn(skip, chunk) 1244 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); 1245 } 1246 1247 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid, 1248 __u8 flags) 1249 { 1250 struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid); 1251 struct sctp_stream *stream = &ulpq->asoc->stream; 1252 1253 if (flags & SCTP_FTSN_U_BIT) { 1254 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) { 1255 sin->pd_mode_uo = 0; 1256 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, 1257 GFP_ATOMIC); 1258 } 1259 return; 1260 } 1261 1262 if (MID_lt(mid, sctp_mid_peek(stream, in, sid))) 1263 return; 1264 1265 if (sin->pd_mode) { 1266 sin->pd_mode = 0; 1267 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC); 1268 } 1269 1270 sctp_mid_skip(stream, in, sid, mid); 1271 1272 sctp_intl_reap_ordered(ulpq, sid); 1273 } 1274 1275 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) 1276 { 1277 struct sctp_ifwdtsn_skip *skip; 1278 1279 /* Walk through all the skipped MIDs and abort stream pd if possible */ 1280 sctp_walk_ifwdtsn(skip, chunk) 1281 sctp_intl_skip(ulpq, ntohs(skip->stream), 1282 ntohl(skip->mid), skip->flags); 1283 } 1284 1285 static struct sctp_stream_interleave sctp_stream_interleave_0 = { 1286 .data_chunk_len = sizeof(struct sctp_data_chunk), 1287 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk), 1288 /* DATA process functions */ 1289 .make_datafrag = sctp_make_datafrag_empty, 1290 .assign_number = sctp_chunk_assign_ssn, 1291 .validate_data = sctp_validate_data, 1292 .ulpevent_data = sctp_ulpq_tail_data, 1293 .enqueue_event = sctp_ulpq_tail_event, 1294 .renege_events = sctp_ulpq_renege, 1295 .start_pd = sctp_ulpq_partial_delivery, 1296 .abort_pd = sctp_ulpq_abort_pd, 1297 /* FORWARD-TSN process functions */ 1298 .generate_ftsn = sctp_generate_fwdtsn, 1299 .validate_ftsn = sctp_validate_fwdtsn, 1300 .report_ftsn = sctp_report_fwdtsn, 1301 .handle_ftsn = sctp_handle_fwdtsn, 1302 }; 1303 1304 static struct sctp_stream_interleave sctp_stream_interleave_1 = { 1305 .data_chunk_len = sizeof(struct sctp_idata_chunk), 1306 .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk), 1307 /* I-DATA process functions */ 1308 .make_datafrag = sctp_make_idatafrag_empty, 1309 .assign_number = sctp_chunk_assign_mid, 1310 .validate_data = sctp_validate_idata, 1311 .ulpevent_data = sctp_ulpevent_idata, 1312 .enqueue_event = sctp_enqueue_event, 1313 .renege_events = sctp_renege_events, 1314 .start_pd = sctp_intl_start_pd, 1315 .abort_pd = sctp_intl_abort_pd, 1316 /* I-FORWARD-TSN process functions */ 1317 .generate_ftsn = sctp_generate_iftsn, 1318 .validate_ftsn = sctp_validate_iftsn, 1319 .report_ftsn = sctp_report_iftsn, 1320 .handle_ftsn = sctp_handle_iftsn, 1321 }; 1322 1323 void sctp_stream_interleave_init(struct sctp_stream *stream) 1324 { 1325 struct sctp_association *asoc; 1326 1327 asoc = container_of(stream, struct sctp_association, stream); 1328 stream->si = asoc->intl_enable ? &sctp_stream_interleave_1 1329 : &sctp_stream_interleave_0; 1330 } 1331