1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * 7 * This file is part of the SCTP kernel implementation 8 * 9 * These functions implement the sctp_outq class. The outqueue handles 10 * bundling and queueing of outgoing SCTP chunks. 11 * 12 * This SCTP implementation is free software; 13 * you can redistribute it and/or modify it under the terms of 14 * the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * This SCTP implementation is distributed in the hope that it 19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 20 * ************************ 21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 22 * See the GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with GNU CC; see the file COPYING. If not, write to 26 * the Free Software Foundation, 59 Temple Place - Suite 330, 27 * Boston, MA 02111-1307, USA. 28 * 29 * Please send any bug reports or fixes you make to the 30 * email address(es): 31 * lksctp developers <linux-sctp@vger.kernel.org> 32 * 33 * Written or modified by: 34 * La Monte H.P. Yarroll <piggy@acm.org> 35 * Karl Knutson <karl@athena.chicago.il.us> 36 * Perry Melange <pmelange@null.cc.uic.edu> 37 * Xingang Guo <xingang.guo@intel.com> 38 * Hui Huang <hui.huang@nokia.com> 39 * Sridhar Samudrala <sri@us.ibm.com> 40 * Jon Grimm <jgrimm@us.ibm.com> 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/types.h> 46 #include <linux/list.h> /* For struct list_head */ 47 #include <linux/socket.h> 48 #include <linux/ip.h> 49 #include <linux/slab.h> 50 #include <net/sock.h> /* For skb_set_owner_w */ 51 52 #include <net/sctp/sctp.h> 53 #include <net/sctp/sm.h> 54 55 /* Declare internal functions here. */ 56 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); 57 static void sctp_check_transmitted(struct sctp_outq *q, 58 struct list_head *transmitted_queue, 59 struct sctp_transport *transport, 60 union sctp_addr *saddr, 61 struct sctp_sackhdr *sack, 62 __u32 *highest_new_tsn); 63 64 static void sctp_mark_missing(struct sctp_outq *q, 65 struct list_head *transmitted_queue, 66 struct sctp_transport *transport, 67 __u32 highest_new_tsn, 68 int count_of_newacks); 69 70 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); 71 72 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout); 73 74 /* Add data to the front of the queue. */ 75 static inline void sctp_outq_head_data(struct sctp_outq *q, 76 struct sctp_chunk *ch) 77 { 78 list_add(&ch->list, &q->out_chunk_list); 79 q->out_qlen += ch->skb->len; 80 } 81 82 /* Take data from the front of the queue. */ 83 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) 84 { 85 struct sctp_chunk *ch = NULL; 86 87 if (!list_empty(&q->out_chunk_list)) { 88 struct list_head *entry = q->out_chunk_list.next; 89 90 ch = list_entry(entry, struct sctp_chunk, list); 91 list_del_init(entry); 92 q->out_qlen -= ch->skb->len; 93 } 94 return ch; 95 } 96 /* Add data chunk to the end of the queue. */ 97 static inline void sctp_outq_tail_data(struct sctp_outq *q, 98 struct sctp_chunk *ch) 99 { 100 list_add_tail(&ch->list, &q->out_chunk_list); 101 q->out_qlen += ch->skb->len; 102 } 103 104 /* 105 * SFR-CACC algorithm: 106 * D) If count_of_newacks is greater than or equal to 2 107 * and t was not sent to the current primary then the 108 * sender MUST NOT increment missing report count for t. 109 */ 110 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, 111 struct sctp_transport *transport, 112 int count_of_newacks) 113 { 114 if (count_of_newacks >=2 && transport != primary) 115 return 1; 116 return 0; 117 } 118 119 /* 120 * SFR-CACC algorithm: 121 * F) If count_of_newacks is less than 2, let d be the 122 * destination to which t was sent. If cacc_saw_newack 123 * is 0 for destination d, then the sender MUST NOT 124 * increment missing report count for t. 125 */ 126 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 127 int count_of_newacks) 128 { 129 if (count_of_newacks < 2 && 130 (transport && !transport->cacc.cacc_saw_newack)) 131 return 1; 132 return 0; 133 } 134 135 /* 136 * SFR-CACC algorithm: 137 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD 138 * execute steps C, D, F. 139 * 140 * C has been implemented in sctp_outq_sack 141 */ 142 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, 143 struct sctp_transport *transport, 144 int count_of_newacks) 145 { 146 if (!primary->cacc.cycling_changeover) { 147 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) 148 return 1; 149 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) 150 return 1; 151 return 0; 152 } 153 return 0; 154 } 155 156 /* 157 * SFR-CACC algorithm: 158 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less 159 * than next_tsn_at_change of the current primary, then 160 * the sender MUST NOT increment missing report count 161 * for t. 162 */ 163 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) 164 { 165 if (primary->cacc.cycling_changeover && 166 TSN_lt(tsn, primary->cacc.next_tsn_at_change)) 167 return 1; 168 return 0; 169 } 170 171 /* 172 * SFR-CACC algorithm: 173 * 3) If the missing report count for TSN t is to be 174 * incremented according to [RFC2960] and 175 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, 176 * then the sender MUST further execute steps 3.1 and 177 * 3.2 to determine if the missing report count for 178 * TSN t SHOULD NOT be incremented. 179 * 180 * 3.3) If 3.1 and 3.2 do not dictate that the missing 181 * report count for t should not be incremented, then 182 * the sender SHOULD increment missing report count for 183 * t (according to [RFC2960] and [SCTP_STEWART_2002]). 184 */ 185 static inline int sctp_cacc_skip(struct sctp_transport *primary, 186 struct sctp_transport *transport, 187 int count_of_newacks, 188 __u32 tsn) 189 { 190 if (primary->cacc.changeover_active && 191 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) || 192 sctp_cacc_skip_3_2(primary, tsn))) 193 return 1; 194 return 0; 195 } 196 197 /* Initialize an existing sctp_outq. This does the boring stuff. 198 * You still need to define handlers if you really want to DO 199 * something with this structure... 200 */ 201 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 202 { 203 memset(q, 0, sizeof(struct sctp_outq)); 204 205 q->asoc = asoc; 206 INIT_LIST_HEAD(&q->out_chunk_list); 207 INIT_LIST_HEAD(&q->control_chunk_list); 208 INIT_LIST_HEAD(&q->retransmit); 209 INIT_LIST_HEAD(&q->sacked); 210 INIT_LIST_HEAD(&q->abandoned); 211 212 q->empty = 1; 213 } 214 215 /* Free the outqueue structure and any related pending chunks. 216 */ 217 static void __sctp_outq_teardown(struct sctp_outq *q) 218 { 219 struct sctp_transport *transport; 220 struct list_head *lchunk, *temp; 221 struct sctp_chunk *chunk, *tmp; 222 223 /* Throw away unacknowledged chunks. */ 224 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, 225 transports) { 226 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { 227 chunk = list_entry(lchunk, struct sctp_chunk, 228 transmitted_list); 229 /* Mark as part of a failed message. */ 230 sctp_chunk_fail(chunk, q->error); 231 sctp_chunk_free(chunk); 232 } 233 } 234 235 /* Throw away chunks that have been gap ACKed. */ 236 list_for_each_safe(lchunk, temp, &q->sacked) { 237 list_del_init(lchunk); 238 chunk = list_entry(lchunk, struct sctp_chunk, 239 transmitted_list); 240 sctp_chunk_fail(chunk, q->error); 241 sctp_chunk_free(chunk); 242 } 243 244 /* Throw away any chunks in the retransmit queue. */ 245 list_for_each_safe(lchunk, temp, &q->retransmit) { 246 list_del_init(lchunk); 247 chunk = list_entry(lchunk, struct sctp_chunk, 248 transmitted_list); 249 sctp_chunk_fail(chunk, q->error); 250 sctp_chunk_free(chunk); 251 } 252 253 /* Throw away any chunks that are in the abandoned queue. */ 254 list_for_each_safe(lchunk, temp, &q->abandoned) { 255 list_del_init(lchunk); 256 chunk = list_entry(lchunk, struct sctp_chunk, 257 transmitted_list); 258 sctp_chunk_fail(chunk, q->error); 259 sctp_chunk_free(chunk); 260 } 261 262 /* Throw away any leftover data chunks. */ 263 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 264 265 /* Mark as send failure. */ 266 sctp_chunk_fail(chunk, q->error); 267 sctp_chunk_free(chunk); 268 } 269 270 /* Throw away any leftover control chunks. */ 271 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 272 list_del_init(&chunk->list); 273 sctp_chunk_free(chunk); 274 } 275 } 276 277 void sctp_outq_teardown(struct sctp_outq *q) 278 { 279 __sctp_outq_teardown(q); 280 sctp_outq_init(q->asoc, q); 281 } 282 283 /* Free the outqueue structure and any related pending chunks. */ 284 void sctp_outq_free(struct sctp_outq *q) 285 { 286 /* Throw away leftover chunks. */ 287 __sctp_outq_teardown(q); 288 } 289 290 /* Put a new chunk in an sctp_outq. */ 291 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) 292 { 293 struct net *net = sock_net(q->asoc->base.sk); 294 int error = 0; 295 296 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, 297 chunk && chunk->chunk_hdr ? 298 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 299 "illegal chunk"); 300 301 /* If it is data, queue it up, otherwise, send it 302 * immediately. 303 */ 304 if (sctp_chunk_is_data(chunk)) { 305 /* Is it OK to queue data chunks? */ 306 /* From 9. Termination of Association 307 * 308 * When either endpoint performs a shutdown, the 309 * association on each peer will stop accepting new 310 * data from its user and only deliver data in queue 311 * at the time of sending or receiving the SHUTDOWN 312 * chunk. 313 */ 314 switch (q->asoc->state) { 315 case SCTP_STATE_CLOSED: 316 case SCTP_STATE_SHUTDOWN_PENDING: 317 case SCTP_STATE_SHUTDOWN_SENT: 318 case SCTP_STATE_SHUTDOWN_RECEIVED: 319 case SCTP_STATE_SHUTDOWN_ACK_SENT: 320 /* Cannot send after transport endpoint shutdown */ 321 error = -ESHUTDOWN; 322 break; 323 324 default: 325 pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n", 326 __func__, q, chunk, chunk && chunk->chunk_hdr ? 327 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 328 "illegal chunk"); 329 330 sctp_outq_tail_data(q, chunk); 331 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 332 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); 333 else 334 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); 335 q->empty = 0; 336 break; 337 } 338 } else { 339 list_add_tail(&chunk->list, &q->control_chunk_list); 340 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 341 } 342 343 if (error < 0) 344 return error; 345 346 if (!q->cork) 347 error = sctp_outq_flush(q, 0); 348 349 return error; 350 } 351 352 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list 353 * and the abandoned list are in ascending order. 354 */ 355 static void sctp_insert_list(struct list_head *head, struct list_head *new) 356 { 357 struct list_head *pos; 358 struct sctp_chunk *nchunk, *lchunk; 359 __u32 ntsn, ltsn; 360 int done = 0; 361 362 nchunk = list_entry(new, struct sctp_chunk, transmitted_list); 363 ntsn = ntohl(nchunk->subh.data_hdr->tsn); 364 365 list_for_each(pos, head) { 366 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); 367 ltsn = ntohl(lchunk->subh.data_hdr->tsn); 368 if (TSN_lt(ntsn, ltsn)) { 369 list_add(new, pos->prev); 370 done = 1; 371 break; 372 } 373 } 374 if (!done) 375 list_add_tail(new, head); 376 } 377 378 /* Mark all the eligible packets on a transport for retransmission. */ 379 void sctp_retransmit_mark(struct sctp_outq *q, 380 struct sctp_transport *transport, 381 __u8 reason) 382 { 383 struct list_head *lchunk, *ltemp; 384 struct sctp_chunk *chunk; 385 386 /* Walk through the specified transmitted queue. */ 387 list_for_each_safe(lchunk, ltemp, &transport->transmitted) { 388 chunk = list_entry(lchunk, struct sctp_chunk, 389 transmitted_list); 390 391 /* If the chunk is abandoned, move it to abandoned list. */ 392 if (sctp_chunk_abandoned(chunk)) { 393 list_del_init(lchunk); 394 sctp_insert_list(&q->abandoned, lchunk); 395 396 /* If this chunk has not been previousely acked, 397 * stop considering it 'outstanding'. Our peer 398 * will most likely never see it since it will 399 * not be retransmitted 400 */ 401 if (!chunk->tsn_gap_acked) { 402 if (chunk->transport) 403 chunk->transport->flight_size -= 404 sctp_data_size(chunk); 405 q->outstanding_bytes -= sctp_data_size(chunk); 406 q->asoc->peer.rwnd += sctp_data_size(chunk); 407 } 408 continue; 409 } 410 411 /* If we are doing retransmission due to a timeout or pmtu 412 * discovery, only the chunks that are not yet acked should 413 * be added to the retransmit queue. 414 */ 415 if ((reason == SCTP_RTXR_FAST_RTX && 416 (chunk->fast_retransmit == SCTP_NEED_FRTX)) || 417 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { 418 /* RFC 2960 6.2.1 Processing a Received SACK 419 * 420 * C) Any time a DATA chunk is marked for 421 * retransmission (via either T3-rtx timer expiration 422 * (Section 6.3.3) or via fast retransmit 423 * (Section 7.2.4)), add the data size of those 424 * chunks to the rwnd. 425 */ 426 q->asoc->peer.rwnd += sctp_data_size(chunk); 427 q->outstanding_bytes -= sctp_data_size(chunk); 428 if (chunk->transport) 429 transport->flight_size -= sctp_data_size(chunk); 430 431 /* sctpimpguide-05 Section 2.8.2 432 * M5) If a T3-rtx timer expires, the 433 * 'TSN.Missing.Report' of all affected TSNs is set 434 * to 0. 435 */ 436 chunk->tsn_missing_report = 0; 437 438 /* If a chunk that is being used for RTT measurement 439 * has to be retransmitted, we cannot use this chunk 440 * anymore for RTT measurements. Reset rto_pending so 441 * that a new RTT measurement is started when a new 442 * data chunk is sent. 443 */ 444 if (chunk->rtt_in_progress) { 445 chunk->rtt_in_progress = 0; 446 transport->rto_pending = 0; 447 } 448 449 /* Move the chunk to the retransmit queue. The chunks 450 * on the retransmit queue are always kept in order. 451 */ 452 list_del_init(lchunk); 453 sctp_insert_list(&q->retransmit, lchunk); 454 } 455 } 456 457 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, " 458 "flight_size:%d, pba:%d\n", __func__, transport, reason, 459 transport->cwnd, transport->ssthresh, transport->flight_size, 460 transport->partial_bytes_acked); 461 } 462 463 /* Mark all the eligible packets on a transport for retransmission and force 464 * one packet out. 465 */ 466 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, 467 sctp_retransmit_reason_t reason) 468 { 469 struct net *net = sock_net(q->asoc->base.sk); 470 int error = 0; 471 472 switch(reason) { 473 case SCTP_RTXR_T3_RTX: 474 SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS); 475 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); 476 /* Update the retran path if the T3-rtx timer has expired for 477 * the current retran path. 478 */ 479 if (transport == transport->asoc->peer.retran_path) 480 sctp_assoc_update_retran_path(transport->asoc); 481 transport->asoc->rtx_data_chunks += 482 transport->asoc->unack_data; 483 break; 484 case SCTP_RTXR_FAST_RTX: 485 SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS); 486 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 487 q->fast_rtx = 1; 488 break; 489 case SCTP_RTXR_PMTUD: 490 SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS); 491 break; 492 case SCTP_RTXR_T1_RTX: 493 SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS); 494 transport->asoc->init_retries++; 495 break; 496 default: 497 BUG(); 498 } 499 500 sctp_retransmit_mark(q, transport, reason); 501 502 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, 503 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 504 * following the procedures outlined in C1 - C5. 505 */ 506 if (reason == SCTP_RTXR_T3_RTX) 507 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 508 509 /* Flush the queues only on timeout, since fast_rtx is only 510 * triggered during sack processing and the queue 511 * will be flushed at the end. 512 */ 513 if (reason != SCTP_RTXR_FAST_RTX) 514 error = sctp_outq_flush(q, /* rtx_timeout */ 1); 515 516 if (error) 517 q->asoc->base.sk->sk_err = -error; 518 } 519 520 /* 521 * Transmit DATA chunks on the retransmit queue. Upon return from 522 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which 523 * need to be transmitted by the caller. 524 * We assume that pkt->transport has already been set. 525 * 526 * The return value is a normal kernel error return value. 527 */ 528 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, 529 int rtx_timeout, int *start_timer) 530 { 531 struct list_head *lqueue; 532 struct sctp_transport *transport = pkt->transport; 533 sctp_xmit_t status; 534 struct sctp_chunk *chunk, *chunk1; 535 int fast_rtx; 536 int error = 0; 537 int timer = 0; 538 int done = 0; 539 540 lqueue = &q->retransmit; 541 fast_rtx = q->fast_rtx; 542 543 /* This loop handles time-out retransmissions, fast retransmissions, 544 * and retransmissions due to opening of whindow. 545 * 546 * RFC 2960 6.3.3 Handle T3-rtx Expiration 547 * 548 * E3) Determine how many of the earliest (i.e., lowest TSN) 549 * outstanding DATA chunks for the address for which the 550 * T3-rtx has expired will fit into a single packet, subject 551 * to the MTU constraint for the path corresponding to the 552 * destination transport address to which the retransmission 553 * is being sent (this may be different from the address for 554 * which the timer expires [see Section 6.4]). Call this value 555 * K. Bundle and retransmit those K DATA chunks in a single 556 * packet to the destination endpoint. 557 * 558 * [Just to be painfully clear, if we are retransmitting 559 * because a timeout just happened, we should send only ONE 560 * packet of retransmitted data.] 561 * 562 * For fast retransmissions we also send only ONE packet. However, 563 * if we are just flushing the queue due to open window, we'll 564 * try to send as much as possible. 565 */ 566 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 567 /* If the chunk is abandoned, move it to abandoned list. */ 568 if (sctp_chunk_abandoned(chunk)) { 569 list_del_init(&chunk->transmitted_list); 570 sctp_insert_list(&q->abandoned, 571 &chunk->transmitted_list); 572 continue; 573 } 574 575 /* Make sure that Gap Acked TSNs are not retransmitted. A 576 * simple approach is just to move such TSNs out of the 577 * way and into a 'transmitted' queue and skip to the 578 * next chunk. 579 */ 580 if (chunk->tsn_gap_acked) { 581 list_move_tail(&chunk->transmitted_list, 582 &transport->transmitted); 583 continue; 584 } 585 586 /* If we are doing fast retransmit, ignore non-fast_rtransmit 587 * chunks 588 */ 589 if (fast_rtx && !chunk->fast_retransmit) 590 continue; 591 592 redo: 593 /* Attempt to append this chunk to the packet. */ 594 status = sctp_packet_append_chunk(pkt, chunk); 595 596 switch (status) { 597 case SCTP_XMIT_PMTU_FULL: 598 if (!pkt->has_data && !pkt->has_cookie_echo) { 599 /* If this packet did not contain DATA then 600 * retransmission did not happen, so do it 601 * again. We'll ignore the error here since 602 * control chunks are already freed so there 603 * is nothing we can do. 604 */ 605 sctp_packet_transmit(pkt); 606 goto redo; 607 } 608 609 /* Send this packet. */ 610 error = sctp_packet_transmit(pkt); 611 612 /* If we are retransmitting, we should only 613 * send a single packet. 614 * Otherwise, try appending this chunk again. 615 */ 616 if (rtx_timeout || fast_rtx) 617 done = 1; 618 else 619 goto redo; 620 621 /* Bundle next chunk in the next round. */ 622 break; 623 624 case SCTP_XMIT_RWND_FULL: 625 /* Send this packet. */ 626 error = sctp_packet_transmit(pkt); 627 628 /* Stop sending DATA as there is no more room 629 * at the receiver. 630 */ 631 done = 1; 632 break; 633 634 case SCTP_XMIT_NAGLE_DELAY: 635 /* Send this packet. */ 636 error = sctp_packet_transmit(pkt); 637 638 /* Stop sending DATA because of nagle delay. */ 639 done = 1; 640 break; 641 642 default: 643 /* The append was successful, so add this chunk to 644 * the transmitted list. 645 */ 646 list_move_tail(&chunk->transmitted_list, 647 &transport->transmitted); 648 649 /* Mark the chunk as ineligible for fast retransmit 650 * after it is retransmitted. 651 */ 652 if (chunk->fast_retransmit == SCTP_NEED_FRTX) 653 chunk->fast_retransmit = SCTP_DONT_FRTX; 654 655 q->empty = 0; 656 q->asoc->stats.rtxchunks++; 657 break; 658 } 659 660 /* Set the timer if there were no errors */ 661 if (!error && !timer) 662 timer = 1; 663 664 if (done) 665 break; 666 } 667 668 /* If we are here due to a retransmit timeout or a fast 669 * retransmit and if there are any chunks left in the retransmit 670 * queue that could not fit in the PMTU sized packet, they need 671 * to be marked as ineligible for a subsequent fast retransmit. 672 */ 673 if (rtx_timeout || fast_rtx) { 674 list_for_each_entry(chunk1, lqueue, transmitted_list) { 675 if (chunk1->fast_retransmit == SCTP_NEED_FRTX) 676 chunk1->fast_retransmit = SCTP_DONT_FRTX; 677 } 678 } 679 680 *start_timer = timer; 681 682 /* Clear fast retransmit hint */ 683 if (fast_rtx) 684 q->fast_rtx = 0; 685 686 return error; 687 } 688 689 /* Cork the outqueue so queued chunks are really queued. */ 690 int sctp_outq_uncork(struct sctp_outq *q) 691 { 692 if (q->cork) 693 q->cork = 0; 694 695 return sctp_outq_flush(q, 0); 696 } 697 698 699 /* 700 * Try to flush an outqueue. 701 * 702 * Description: Send everything in q which we legally can, subject to 703 * congestion limitations. 704 * * Note: This function can be called from multiple contexts so appropriate 705 * locking concerns must be made. Today we use the sock lock to protect 706 * this function. 707 */ 708 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) 709 { 710 struct sctp_packet *packet; 711 struct sctp_packet singleton; 712 struct sctp_association *asoc = q->asoc; 713 __u16 sport = asoc->base.bind_addr.port; 714 __u16 dport = asoc->peer.port; 715 __u32 vtag = asoc->peer.i.init_tag; 716 struct sctp_transport *transport = NULL; 717 struct sctp_transport *new_transport; 718 struct sctp_chunk *chunk, *tmp; 719 sctp_xmit_t status; 720 int error = 0; 721 int start_timer = 0; 722 int one_packet = 0; 723 724 /* These transports have chunks to send. */ 725 struct list_head transport_list; 726 struct list_head *ltransport; 727 728 INIT_LIST_HEAD(&transport_list); 729 packet = NULL; 730 731 /* 732 * 6.10 Bundling 733 * ... 734 * When bundling control chunks with DATA chunks, an 735 * endpoint MUST place control chunks first in the outbound 736 * SCTP packet. The transmitter MUST transmit DATA chunks 737 * within a SCTP packet in increasing order of TSN. 738 * ... 739 */ 740 741 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 742 /* RFC 5061, 5.3 743 * F1) This means that until such time as the ASCONF 744 * containing the add is acknowledged, the sender MUST 745 * NOT use the new IP address as a source for ANY SCTP 746 * packet except on carrying an ASCONF Chunk. 747 */ 748 if (asoc->src_out_of_asoc_ok && 749 chunk->chunk_hdr->type != SCTP_CID_ASCONF) 750 continue; 751 752 list_del_init(&chunk->list); 753 754 /* Pick the right transport to use. */ 755 new_transport = chunk->transport; 756 757 if (!new_transport) { 758 /* 759 * If we have a prior transport pointer, see if 760 * the destination address of the chunk 761 * matches the destination address of the 762 * current transport. If not a match, then 763 * try to look up the transport with a given 764 * destination address. We do this because 765 * after processing ASCONFs, we may have new 766 * transports created. 767 */ 768 if (transport && 769 sctp_cmp_addr_exact(&chunk->dest, 770 &transport->ipaddr)) 771 new_transport = transport; 772 else 773 new_transport = sctp_assoc_lookup_paddr(asoc, 774 &chunk->dest); 775 776 /* if we still don't have a new transport, then 777 * use the current active path. 778 */ 779 if (!new_transport) 780 new_transport = asoc->peer.active_path; 781 } else if ((new_transport->state == SCTP_INACTIVE) || 782 (new_transport->state == SCTP_UNCONFIRMED) || 783 (new_transport->state == SCTP_PF)) { 784 /* If the chunk is Heartbeat or Heartbeat Ack, 785 * send it to chunk->transport, even if it's 786 * inactive. 787 * 788 * 3.3.6 Heartbeat Acknowledgement: 789 * ... 790 * A HEARTBEAT ACK is always sent to the source IP 791 * address of the IP datagram containing the 792 * HEARTBEAT chunk to which this ack is responding. 793 * ... 794 * 795 * ASCONF_ACKs also must be sent to the source. 796 */ 797 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && 798 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && 799 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) 800 new_transport = asoc->peer.active_path; 801 } 802 803 /* Are we switching transports? 804 * Take care of transport locks. 805 */ 806 if (new_transport != transport) { 807 transport = new_transport; 808 if (list_empty(&transport->send_ready)) { 809 list_add_tail(&transport->send_ready, 810 &transport_list); 811 } 812 packet = &transport->packet; 813 sctp_packet_config(packet, vtag, 814 asoc->peer.ecn_capable); 815 } 816 817 switch (chunk->chunk_hdr->type) { 818 /* 819 * 6.10 Bundling 820 * ... 821 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN 822 * COMPLETE with any other chunks. [Send them immediately.] 823 */ 824 case SCTP_CID_INIT: 825 case SCTP_CID_INIT_ACK: 826 case SCTP_CID_SHUTDOWN_COMPLETE: 827 sctp_packet_init(&singleton, transport, sport, dport); 828 sctp_packet_config(&singleton, vtag, 0); 829 sctp_packet_append_chunk(&singleton, chunk); 830 error = sctp_packet_transmit(&singleton); 831 if (error < 0) 832 return error; 833 break; 834 835 case SCTP_CID_ABORT: 836 if (sctp_test_T_bit(chunk)) { 837 packet->vtag = asoc->c.my_vtag; 838 } 839 /* The following chunks are "response" chunks, i.e. 840 * they are generated in response to something we 841 * received. If we are sending these, then we can 842 * send only 1 packet containing these chunks. 843 */ 844 case SCTP_CID_HEARTBEAT_ACK: 845 case SCTP_CID_SHUTDOWN_ACK: 846 case SCTP_CID_COOKIE_ACK: 847 case SCTP_CID_COOKIE_ECHO: 848 case SCTP_CID_ERROR: 849 case SCTP_CID_ECN_CWR: 850 case SCTP_CID_ASCONF_ACK: 851 one_packet = 1; 852 /* Fall through */ 853 854 case SCTP_CID_SACK: 855 case SCTP_CID_HEARTBEAT: 856 case SCTP_CID_SHUTDOWN: 857 case SCTP_CID_ECN_ECNE: 858 case SCTP_CID_ASCONF: 859 case SCTP_CID_FWD_TSN: 860 status = sctp_packet_transmit_chunk(packet, chunk, 861 one_packet); 862 if (status != SCTP_XMIT_OK) { 863 /* put the chunk back */ 864 list_add(&chunk->list, &q->control_chunk_list); 865 } else { 866 asoc->stats.octrlchunks++; 867 /* PR-SCTP C5) If a FORWARD TSN is sent, the 868 * sender MUST assure that at least one T3-rtx 869 * timer is running. 870 */ 871 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) 872 sctp_transport_reset_timers(transport); 873 } 874 break; 875 876 default: 877 /* We built a chunk with an illegal type! */ 878 BUG(); 879 } 880 } 881 882 if (q->asoc->src_out_of_asoc_ok) 883 goto sctp_flush_out; 884 885 /* Is it OK to send data chunks? */ 886 switch (asoc->state) { 887 case SCTP_STATE_COOKIE_ECHOED: 888 /* Only allow bundling when this packet has a COOKIE-ECHO 889 * chunk. 890 */ 891 if (!packet || !packet->has_cookie_echo) 892 break; 893 894 /* fallthru */ 895 case SCTP_STATE_ESTABLISHED: 896 case SCTP_STATE_SHUTDOWN_PENDING: 897 case SCTP_STATE_SHUTDOWN_RECEIVED: 898 /* 899 * RFC 2960 6.1 Transmission of DATA Chunks 900 * 901 * C) When the time comes for the sender to transmit, 902 * before sending new DATA chunks, the sender MUST 903 * first transmit any outstanding DATA chunks which 904 * are marked for retransmission (limited by the 905 * current cwnd). 906 */ 907 if (!list_empty(&q->retransmit)) { 908 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) 909 goto sctp_flush_out; 910 if (transport == asoc->peer.retran_path) 911 goto retran; 912 913 /* Switch transports & prepare the packet. */ 914 915 transport = asoc->peer.retran_path; 916 917 if (list_empty(&transport->send_ready)) { 918 list_add_tail(&transport->send_ready, 919 &transport_list); 920 } 921 922 packet = &transport->packet; 923 sctp_packet_config(packet, vtag, 924 asoc->peer.ecn_capable); 925 retran: 926 error = sctp_outq_flush_rtx(q, packet, 927 rtx_timeout, &start_timer); 928 929 if (start_timer) 930 sctp_transport_reset_timers(transport); 931 932 /* This can happen on COOKIE-ECHO resend. Only 933 * one chunk can get bundled with a COOKIE-ECHO. 934 */ 935 if (packet->has_cookie_echo) 936 goto sctp_flush_out; 937 938 /* Don't send new data if there is still data 939 * waiting to retransmit. 940 */ 941 if (!list_empty(&q->retransmit)) 942 goto sctp_flush_out; 943 } 944 945 /* Apply Max.Burst limitation to the current transport in 946 * case it will be used for new data. We are going to 947 * rest it before we return, but we want to apply the limit 948 * to the currently queued data. 949 */ 950 if (transport) 951 sctp_transport_burst_limited(transport); 952 953 /* Finally, transmit new packets. */ 954 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 955 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 956 * stream identifier. 957 */ 958 if (chunk->sinfo.sinfo_stream >= 959 asoc->c.sinit_num_ostreams) { 960 961 /* Mark as failed send. */ 962 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 963 sctp_chunk_free(chunk); 964 continue; 965 } 966 967 /* Has this chunk expired? */ 968 if (sctp_chunk_abandoned(chunk)) { 969 sctp_chunk_fail(chunk, 0); 970 sctp_chunk_free(chunk); 971 continue; 972 } 973 974 /* If there is a specified transport, use it. 975 * Otherwise, we want to use the active path. 976 */ 977 new_transport = chunk->transport; 978 if (!new_transport || 979 ((new_transport->state == SCTP_INACTIVE) || 980 (new_transport->state == SCTP_UNCONFIRMED) || 981 (new_transport->state == SCTP_PF))) 982 new_transport = asoc->peer.active_path; 983 if (new_transport->state == SCTP_UNCONFIRMED) 984 continue; 985 986 /* Change packets if necessary. */ 987 if (new_transport != transport) { 988 transport = new_transport; 989 990 /* Schedule to have this transport's 991 * packet flushed. 992 */ 993 if (list_empty(&transport->send_ready)) { 994 list_add_tail(&transport->send_ready, 995 &transport_list); 996 } 997 998 packet = &transport->packet; 999 sctp_packet_config(packet, vtag, 1000 asoc->peer.ecn_capable); 1001 /* We've switched transports, so apply the 1002 * Burst limit to the new transport. 1003 */ 1004 sctp_transport_burst_limited(transport); 1005 } 1006 1007 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " 1008 "skb->users:%d\n", 1009 __func__, q, chunk, chunk && chunk->chunk_hdr ? 1010 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 1011 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), 1012 chunk->skb ? chunk->skb->head : NULL, chunk->skb ? 1013 atomic_read(&chunk->skb->users) : -1); 1014 1015 /* Add the chunk to the packet. */ 1016 status = sctp_packet_transmit_chunk(packet, chunk, 0); 1017 1018 switch (status) { 1019 case SCTP_XMIT_PMTU_FULL: 1020 case SCTP_XMIT_RWND_FULL: 1021 case SCTP_XMIT_NAGLE_DELAY: 1022 /* We could not append this chunk, so put 1023 * the chunk back on the output queue. 1024 */ 1025 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", 1026 __func__, ntohl(chunk->subh.data_hdr->tsn), 1027 status); 1028 1029 sctp_outq_head_data(q, chunk); 1030 goto sctp_flush_out; 1031 break; 1032 1033 case SCTP_XMIT_OK: 1034 /* The sender is in the SHUTDOWN-PENDING state, 1035 * The sender MAY set the I-bit in the DATA 1036 * chunk header. 1037 */ 1038 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) 1039 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; 1040 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 1041 asoc->stats.ouodchunks++; 1042 else 1043 asoc->stats.oodchunks++; 1044 1045 break; 1046 1047 default: 1048 BUG(); 1049 } 1050 1051 /* BUG: We assume that the sctp_packet_transmit() 1052 * call below will succeed all the time and add the 1053 * chunk to the transmitted list and restart the 1054 * timers. 1055 * It is possible that the call can fail under OOM 1056 * conditions. 1057 * 1058 * Is this really a problem? Won't this behave 1059 * like a lost TSN? 1060 */ 1061 list_add_tail(&chunk->transmitted_list, 1062 &transport->transmitted); 1063 1064 sctp_transport_reset_timers(transport); 1065 1066 q->empty = 0; 1067 1068 /* Only let one DATA chunk get bundled with a 1069 * COOKIE-ECHO chunk. 1070 */ 1071 if (packet->has_cookie_echo) 1072 goto sctp_flush_out; 1073 } 1074 break; 1075 1076 default: 1077 /* Do nothing. */ 1078 break; 1079 } 1080 1081 sctp_flush_out: 1082 1083 /* Before returning, examine all the transports touched in 1084 * this call. Right now, we bluntly force clear all the 1085 * transports. Things might change after we implement Nagle. 1086 * But such an examination is still required. 1087 * 1088 * --xguo 1089 */ 1090 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) { 1091 struct sctp_transport *t = list_entry(ltransport, 1092 struct sctp_transport, 1093 send_ready); 1094 packet = &t->packet; 1095 if (!sctp_packet_empty(packet)) 1096 error = sctp_packet_transmit(packet); 1097 1098 /* Clear the burst limited state, if any */ 1099 sctp_transport_burst_reset(t); 1100 } 1101 1102 return error; 1103 } 1104 1105 /* Update unack_data based on the incoming SACK chunk */ 1106 static void sctp_sack_update_unack_data(struct sctp_association *assoc, 1107 struct sctp_sackhdr *sack) 1108 { 1109 sctp_sack_variable_t *frags; 1110 __u16 unack_data; 1111 int i; 1112 1113 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; 1114 1115 frags = sack->variable; 1116 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { 1117 unack_data -= ((ntohs(frags[i].gab.end) - 1118 ntohs(frags[i].gab.start) + 1)); 1119 } 1120 1121 assoc->unack_data = unack_data; 1122 } 1123 1124 /* This is where we REALLY process a SACK. 1125 * 1126 * Process the SACK against the outqueue. Mostly, this just frees 1127 * things off the transmitted queue. 1128 */ 1129 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) 1130 { 1131 struct sctp_association *asoc = q->asoc; 1132 struct sctp_sackhdr *sack = chunk->subh.sack_hdr; 1133 struct sctp_transport *transport; 1134 struct sctp_chunk *tchunk = NULL; 1135 struct list_head *lchunk, *transport_list, *temp; 1136 sctp_sack_variable_t *frags = sack->variable; 1137 __u32 sack_ctsn, ctsn, tsn; 1138 __u32 highest_tsn, highest_new_tsn; 1139 __u32 sack_a_rwnd; 1140 unsigned int outstanding; 1141 struct sctp_transport *primary = asoc->peer.primary_path; 1142 int count_of_newacks = 0; 1143 int gap_ack_blocks; 1144 u8 accum_moved = 0; 1145 1146 /* Grab the association's destination address list. */ 1147 transport_list = &asoc->peer.transport_addr_list; 1148 1149 sack_ctsn = ntohl(sack->cum_tsn_ack); 1150 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); 1151 asoc->stats.gapcnt += gap_ack_blocks; 1152 /* 1153 * SFR-CACC algorithm: 1154 * On receipt of a SACK the sender SHOULD execute the 1155 * following statements. 1156 * 1157 * 1) If the cumulative ack in the SACK passes next tsn_at_change 1158 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be 1159 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for 1160 * all destinations. 1161 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE 1162 * is set the receiver of the SACK MUST take the following actions: 1163 * 1164 * A) Initialize the cacc_saw_newack to 0 for all destination 1165 * addresses. 1166 * 1167 * Only bother if changeover_active is set. Otherwise, this is 1168 * totally suboptimal to do on every SACK. 1169 */ 1170 if (primary->cacc.changeover_active) { 1171 u8 clear_cycling = 0; 1172 1173 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { 1174 primary->cacc.changeover_active = 0; 1175 clear_cycling = 1; 1176 } 1177 1178 if (clear_cycling || gap_ack_blocks) { 1179 list_for_each_entry(transport, transport_list, 1180 transports) { 1181 if (clear_cycling) 1182 transport->cacc.cycling_changeover = 0; 1183 if (gap_ack_blocks) 1184 transport->cacc.cacc_saw_newack = 0; 1185 } 1186 } 1187 } 1188 1189 /* Get the highest TSN in the sack. */ 1190 highest_tsn = sack_ctsn; 1191 if (gap_ack_blocks) 1192 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); 1193 1194 if (TSN_lt(asoc->highest_sacked, highest_tsn)) 1195 asoc->highest_sacked = highest_tsn; 1196 1197 highest_new_tsn = sack_ctsn; 1198 1199 /* Run through the retransmit queue. Credit bytes received 1200 * and free those chunks that we can. 1201 */ 1202 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn); 1203 1204 /* Run through the transmitted queue. 1205 * Credit bytes received and free those chunks which we can. 1206 * 1207 * This is a MASSIVE candidate for optimization. 1208 */ 1209 list_for_each_entry(transport, transport_list, transports) { 1210 sctp_check_transmitted(q, &transport->transmitted, 1211 transport, &chunk->source, sack, 1212 &highest_new_tsn); 1213 /* 1214 * SFR-CACC algorithm: 1215 * C) Let count_of_newacks be the number of 1216 * destinations for which cacc_saw_newack is set. 1217 */ 1218 if (transport->cacc.cacc_saw_newack) 1219 count_of_newacks ++; 1220 } 1221 1222 /* Move the Cumulative TSN Ack Point if appropriate. */ 1223 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) { 1224 asoc->ctsn_ack_point = sack_ctsn; 1225 accum_moved = 1; 1226 } 1227 1228 if (gap_ack_blocks) { 1229 1230 if (asoc->fast_recovery && accum_moved) 1231 highest_new_tsn = highest_tsn; 1232 1233 list_for_each_entry(transport, transport_list, transports) 1234 sctp_mark_missing(q, &transport->transmitted, transport, 1235 highest_new_tsn, count_of_newacks); 1236 } 1237 1238 /* Update unack_data field in the assoc. */ 1239 sctp_sack_update_unack_data(asoc, sack); 1240 1241 ctsn = asoc->ctsn_ack_point; 1242 1243 /* Throw away stuff rotting on the sack queue. */ 1244 list_for_each_safe(lchunk, temp, &q->sacked) { 1245 tchunk = list_entry(lchunk, struct sctp_chunk, 1246 transmitted_list); 1247 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1248 if (TSN_lte(tsn, ctsn)) { 1249 list_del_init(&tchunk->transmitted_list); 1250 sctp_chunk_free(tchunk); 1251 } 1252 } 1253 1254 /* ii) Set rwnd equal to the newly received a_rwnd minus the 1255 * number of bytes still outstanding after processing the 1256 * Cumulative TSN Ack and the Gap Ack Blocks. 1257 */ 1258 1259 sack_a_rwnd = ntohl(sack->a_rwnd); 1260 outstanding = q->outstanding_bytes; 1261 1262 if (outstanding < sack_a_rwnd) 1263 sack_a_rwnd -= outstanding; 1264 else 1265 sack_a_rwnd = 0; 1266 1267 asoc->peer.rwnd = sack_a_rwnd; 1268 1269 sctp_generate_fwdtsn(q, sack_ctsn); 1270 1271 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn); 1272 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, " 1273 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, 1274 asoc->adv_peer_ack_point); 1275 1276 /* See if all chunks are acked. 1277 * Make sure the empty queue handler will get run later. 1278 */ 1279 q->empty = (list_empty(&q->out_chunk_list) && 1280 list_empty(&q->retransmit)); 1281 if (!q->empty) 1282 goto finish; 1283 1284 list_for_each_entry(transport, transport_list, transports) { 1285 q->empty = q->empty && list_empty(&transport->transmitted); 1286 if (!q->empty) 1287 goto finish; 1288 } 1289 1290 pr_debug("%s: sack queue is empty\n", __func__); 1291 finish: 1292 return q->empty; 1293 } 1294 1295 /* Is the outqueue empty? */ 1296 int sctp_outq_is_empty(const struct sctp_outq *q) 1297 { 1298 return q->empty; 1299 } 1300 1301 /******************************************************************** 1302 * 2nd Level Abstractions 1303 ********************************************************************/ 1304 1305 /* Go through a transport's transmitted list or the association's retransmit 1306 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. 1307 * The retransmit list will not have an associated transport. 1308 * 1309 * I added coherent debug information output. --xguo 1310 * 1311 * Instead of printing 'sacked' or 'kept' for each TSN on the 1312 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. 1313 * KEPT TSN6-TSN7, etc. 1314 */ 1315 static void sctp_check_transmitted(struct sctp_outq *q, 1316 struct list_head *transmitted_queue, 1317 struct sctp_transport *transport, 1318 union sctp_addr *saddr, 1319 struct sctp_sackhdr *sack, 1320 __u32 *highest_new_tsn_in_sack) 1321 { 1322 struct list_head *lchunk; 1323 struct sctp_chunk *tchunk; 1324 struct list_head tlist; 1325 __u32 tsn; 1326 __u32 sack_ctsn; 1327 __u32 rtt; 1328 __u8 restart_timer = 0; 1329 int bytes_acked = 0; 1330 int migrate_bytes = 0; 1331 bool forward_progress = false; 1332 1333 sack_ctsn = ntohl(sack->cum_tsn_ack); 1334 1335 INIT_LIST_HEAD(&tlist); 1336 1337 /* The while loop will skip empty transmitted queues. */ 1338 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { 1339 tchunk = list_entry(lchunk, struct sctp_chunk, 1340 transmitted_list); 1341 1342 if (sctp_chunk_abandoned(tchunk)) { 1343 /* Move the chunk to abandoned list. */ 1344 sctp_insert_list(&q->abandoned, lchunk); 1345 1346 /* If this chunk has not been acked, stop 1347 * considering it as 'outstanding'. 1348 */ 1349 if (!tchunk->tsn_gap_acked) { 1350 if (tchunk->transport) 1351 tchunk->transport->flight_size -= 1352 sctp_data_size(tchunk); 1353 q->outstanding_bytes -= sctp_data_size(tchunk); 1354 } 1355 continue; 1356 } 1357 1358 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1359 if (sctp_acked(sack, tsn)) { 1360 /* If this queue is the retransmit queue, the 1361 * retransmit timer has already reclaimed 1362 * the outstanding bytes for this chunk, so only 1363 * count bytes associated with a transport. 1364 */ 1365 if (transport) { 1366 /* If this chunk is being used for RTT 1367 * measurement, calculate the RTT and update 1368 * the RTO using this value. 1369 * 1370 * 6.3.1 C5) Karn's algorithm: RTT measurements 1371 * MUST NOT be made using packets that were 1372 * retransmitted (and thus for which it is 1373 * ambiguous whether the reply was for the 1374 * first instance of the packet or a later 1375 * instance). 1376 */ 1377 if (!tchunk->tsn_gap_acked && 1378 tchunk->rtt_in_progress) { 1379 tchunk->rtt_in_progress = 0; 1380 rtt = jiffies - tchunk->sent_at; 1381 sctp_transport_update_rto(transport, 1382 rtt); 1383 } 1384 } 1385 1386 /* If the chunk hasn't been marked as ACKED, 1387 * mark it and account bytes_acked if the 1388 * chunk had a valid transport (it will not 1389 * have a transport if ASCONF had deleted it 1390 * while DATA was outstanding). 1391 */ 1392 if (!tchunk->tsn_gap_acked) { 1393 tchunk->tsn_gap_acked = 1; 1394 *highest_new_tsn_in_sack = tsn; 1395 bytes_acked += sctp_data_size(tchunk); 1396 if (!tchunk->transport) 1397 migrate_bytes += sctp_data_size(tchunk); 1398 forward_progress = true; 1399 } 1400 1401 if (TSN_lte(tsn, sack_ctsn)) { 1402 /* RFC 2960 6.3.2 Retransmission Timer Rules 1403 * 1404 * R3) Whenever a SACK is received 1405 * that acknowledges the DATA chunk 1406 * with the earliest outstanding TSN 1407 * for that address, restart T3-rtx 1408 * timer for that address with its 1409 * current RTO. 1410 */ 1411 restart_timer = 1; 1412 forward_progress = true; 1413 1414 if (!tchunk->tsn_gap_acked) { 1415 /* 1416 * SFR-CACC algorithm: 1417 * 2) If the SACK contains gap acks 1418 * and the flag CHANGEOVER_ACTIVE is 1419 * set the receiver of the SACK MUST 1420 * take the following action: 1421 * 1422 * B) For each TSN t being acked that 1423 * has not been acked in any SACK so 1424 * far, set cacc_saw_newack to 1 for 1425 * the destination that the TSN was 1426 * sent to. 1427 */ 1428 if (transport && 1429 sack->num_gap_ack_blocks && 1430 q->asoc->peer.primary_path->cacc. 1431 changeover_active) 1432 transport->cacc.cacc_saw_newack 1433 = 1; 1434 } 1435 1436 list_add_tail(&tchunk->transmitted_list, 1437 &q->sacked); 1438 } else { 1439 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 1440 * M2) Each time a SACK arrives reporting 1441 * 'Stray DATA chunk(s)' record the highest TSN 1442 * reported as newly acknowledged, call this 1443 * value 'HighestTSNinSack'. A newly 1444 * acknowledged DATA chunk is one not 1445 * previously acknowledged in a SACK. 1446 * 1447 * When the SCTP sender of data receives a SACK 1448 * chunk that acknowledges, for the first time, 1449 * the receipt of a DATA chunk, all the still 1450 * unacknowledged DATA chunks whose TSN is 1451 * older than that newly acknowledged DATA 1452 * chunk, are qualified as 'Stray DATA chunks'. 1453 */ 1454 list_add_tail(lchunk, &tlist); 1455 } 1456 } else { 1457 if (tchunk->tsn_gap_acked) { 1458 pr_debug("%s: receiver reneged on data TSN:0x%x\n", 1459 __func__, tsn); 1460 1461 tchunk->tsn_gap_acked = 0; 1462 1463 if (tchunk->transport) 1464 bytes_acked -= sctp_data_size(tchunk); 1465 1466 /* RFC 2960 6.3.2 Retransmission Timer Rules 1467 * 1468 * R4) Whenever a SACK is received missing a 1469 * TSN that was previously acknowledged via a 1470 * Gap Ack Block, start T3-rtx for the 1471 * destination address to which the DATA 1472 * chunk was originally 1473 * transmitted if it is not already running. 1474 */ 1475 restart_timer = 1; 1476 } 1477 1478 list_add_tail(lchunk, &tlist); 1479 } 1480 } 1481 1482 if (transport) { 1483 if (bytes_acked) { 1484 struct sctp_association *asoc = transport->asoc; 1485 1486 /* We may have counted DATA that was migrated 1487 * to this transport due to DEL-IP operation. 1488 * Subtract those bytes, since the were never 1489 * send on this transport and shouldn't be 1490 * credited to this transport. 1491 */ 1492 bytes_acked -= migrate_bytes; 1493 1494 /* 8.2. When an outstanding TSN is acknowledged, 1495 * the endpoint shall clear the error counter of 1496 * the destination transport address to which the 1497 * DATA chunk was last sent. 1498 * The association's overall error counter is 1499 * also cleared. 1500 */ 1501 transport->error_count = 0; 1502 transport->asoc->overall_error_count = 0; 1503 forward_progress = true; 1504 1505 /* 1506 * While in SHUTDOWN PENDING, we may have started 1507 * the T5 shutdown guard timer after reaching the 1508 * retransmission limit. Stop that timer as soon 1509 * as the receiver acknowledged any data. 1510 */ 1511 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && 1512 del_timer(&asoc->timers 1513 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) 1514 sctp_association_put(asoc); 1515 1516 /* Mark the destination transport address as 1517 * active if it is not so marked. 1518 */ 1519 if ((transport->state == SCTP_INACTIVE || 1520 transport->state == SCTP_UNCONFIRMED) && 1521 sctp_cmp_addr_exact(&transport->ipaddr, saddr)) { 1522 sctp_assoc_control_transport( 1523 transport->asoc, 1524 transport, 1525 SCTP_TRANSPORT_UP, 1526 SCTP_RECEIVED_SACK); 1527 } 1528 1529 sctp_transport_raise_cwnd(transport, sack_ctsn, 1530 bytes_acked); 1531 1532 transport->flight_size -= bytes_acked; 1533 if (transport->flight_size == 0) 1534 transport->partial_bytes_acked = 0; 1535 q->outstanding_bytes -= bytes_acked + migrate_bytes; 1536 } else { 1537 /* RFC 2960 6.1, sctpimpguide-06 2.15.2 1538 * When a sender is doing zero window probing, it 1539 * should not timeout the association if it continues 1540 * to receive new packets from the receiver. The 1541 * reason is that the receiver MAY keep its window 1542 * closed for an indefinite time. 1543 * A sender is doing zero window probing when the 1544 * receiver's advertised window is zero, and there is 1545 * only one data chunk in flight to the receiver. 1546 * 1547 * Allow the association to timeout while in SHUTDOWN 1548 * PENDING or SHUTDOWN RECEIVED in case the receiver 1549 * stays in zero window mode forever. 1550 */ 1551 if (!q->asoc->peer.rwnd && 1552 !list_empty(&tlist) && 1553 (sack_ctsn+2 == q->asoc->next_tsn) && 1554 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { 1555 pr_debug("%s: sack received for zero window " 1556 "probe:%u\n", __func__, sack_ctsn); 1557 1558 q->asoc->overall_error_count = 0; 1559 transport->error_count = 0; 1560 } 1561 } 1562 1563 /* RFC 2960 6.3.2 Retransmission Timer Rules 1564 * 1565 * R2) Whenever all outstanding data sent to an address have 1566 * been acknowledged, turn off the T3-rtx timer of that 1567 * address. 1568 */ 1569 if (!transport->flight_size) { 1570 if (del_timer(&transport->T3_rtx_timer)) 1571 sctp_transport_put(transport); 1572 } else if (restart_timer) { 1573 if (!mod_timer(&transport->T3_rtx_timer, 1574 jiffies + transport->rto)) 1575 sctp_transport_hold(transport); 1576 } 1577 1578 if (forward_progress) { 1579 if (transport->dst) 1580 dst_confirm(transport->dst); 1581 } 1582 } 1583 1584 list_splice(&tlist, transmitted_queue); 1585 } 1586 1587 /* Mark chunks as missing and consequently may get retransmitted. */ 1588 static void sctp_mark_missing(struct sctp_outq *q, 1589 struct list_head *transmitted_queue, 1590 struct sctp_transport *transport, 1591 __u32 highest_new_tsn_in_sack, 1592 int count_of_newacks) 1593 { 1594 struct sctp_chunk *chunk; 1595 __u32 tsn; 1596 char do_fast_retransmit = 0; 1597 struct sctp_association *asoc = q->asoc; 1598 struct sctp_transport *primary = asoc->peer.primary_path; 1599 1600 list_for_each_entry(chunk, transmitted_queue, transmitted_list) { 1601 1602 tsn = ntohl(chunk->subh.data_hdr->tsn); 1603 1604 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all 1605 * 'Unacknowledged TSN's', if the TSN number of an 1606 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' 1607 * value, increment the 'TSN.Missing.Report' count on that 1608 * chunk if it has NOT been fast retransmitted or marked for 1609 * fast retransmit already. 1610 */ 1611 if (chunk->fast_retransmit == SCTP_CAN_FRTX && 1612 !chunk->tsn_gap_acked && 1613 TSN_lt(tsn, highest_new_tsn_in_sack)) { 1614 1615 /* SFR-CACC may require us to skip marking 1616 * this chunk as missing. 1617 */ 1618 if (!transport || !sctp_cacc_skip(primary, 1619 chunk->transport, 1620 count_of_newacks, tsn)) { 1621 chunk->tsn_missing_report++; 1622 1623 pr_debug("%s: tsn:0x%x missing counter:%d\n", 1624 __func__, tsn, chunk->tsn_missing_report); 1625 } 1626 } 1627 /* 1628 * M4) If any DATA chunk is found to have a 1629 * 'TSN.Missing.Report' 1630 * value larger than or equal to 3, mark that chunk for 1631 * retransmission and start the fast retransmit procedure. 1632 */ 1633 1634 if (chunk->tsn_missing_report >= 3) { 1635 chunk->fast_retransmit = SCTP_NEED_FRTX; 1636 do_fast_retransmit = 1; 1637 } 1638 } 1639 1640 if (transport) { 1641 if (do_fast_retransmit) 1642 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); 1643 1644 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, " 1645 "flight_size:%d, pba:%d\n", __func__, transport, 1646 transport->cwnd, transport->ssthresh, 1647 transport->flight_size, transport->partial_bytes_acked); 1648 } 1649 } 1650 1651 /* Is the given TSN acked by this packet? */ 1652 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) 1653 { 1654 int i; 1655 sctp_sack_variable_t *frags; 1656 __u16 gap; 1657 __u32 ctsn = ntohl(sack->cum_tsn_ack); 1658 1659 if (TSN_lte(tsn, ctsn)) 1660 goto pass; 1661 1662 /* 3.3.4 Selective Acknowledgement (SACK) (3): 1663 * 1664 * Gap Ack Blocks: 1665 * These fields contain the Gap Ack Blocks. They are repeated 1666 * for each Gap Ack Block up to the number of Gap Ack Blocks 1667 * defined in the Number of Gap Ack Blocks field. All DATA 1668 * chunks with TSNs greater than or equal to (Cumulative TSN 1669 * Ack + Gap Ack Block Start) and less than or equal to 1670 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack 1671 * Block are assumed to have been received correctly. 1672 */ 1673 1674 frags = sack->variable; 1675 gap = tsn - ctsn; 1676 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) { 1677 if (TSN_lte(ntohs(frags[i].gab.start), gap) && 1678 TSN_lte(gap, ntohs(frags[i].gab.end))) 1679 goto pass; 1680 } 1681 1682 return 0; 1683 pass: 1684 return 1; 1685 } 1686 1687 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, 1688 int nskips, __be16 stream) 1689 { 1690 int i; 1691 1692 for (i = 0; i < nskips; i++) { 1693 if (skiplist[i].stream == stream) 1694 return i; 1695 } 1696 return i; 1697 } 1698 1699 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */ 1700 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) 1701 { 1702 struct sctp_association *asoc = q->asoc; 1703 struct sctp_chunk *ftsn_chunk = NULL; 1704 struct sctp_fwdtsn_skip ftsn_skip_arr[10]; 1705 int nskips = 0; 1706 int skip_pos = 0; 1707 __u32 tsn; 1708 struct sctp_chunk *chunk; 1709 struct list_head *lchunk, *temp; 1710 1711 if (!asoc->peer.prsctp_capable) 1712 return; 1713 1714 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the 1715 * received SACK. 1716 * 1717 * If (Advanced.Peer.Ack.Point < SackCumAck), then update 1718 * Advanced.Peer.Ack.Point to be equal to SackCumAck. 1719 */ 1720 if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) 1721 asoc->adv_peer_ack_point = ctsn; 1722 1723 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" 1724 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as 1725 * the chunk next in the out-queue space is marked as "abandoned" as 1726 * shown in the following example: 1727 * 1728 * Assuming that a SACK arrived with the Cumulative TSN ACK 102 1729 * and the Advanced.Peer.Ack.Point is updated to this value: 1730 * 1731 * out-queue at the end of ==> out-queue after Adv.Ack.Point 1732 * normal SACK processing local advancement 1733 * ... ... 1734 * Adv.Ack.Pt-> 102 acked 102 acked 1735 * 103 abandoned 103 abandoned 1736 * 104 abandoned Adv.Ack.P-> 104 abandoned 1737 * 105 105 1738 * 106 acked 106 acked 1739 * ... ... 1740 * 1741 * In this example, the data sender successfully advanced the 1742 * "Advanced.Peer.Ack.Point" from 102 to 104 locally. 1743 */ 1744 list_for_each_safe(lchunk, temp, &q->abandoned) { 1745 chunk = list_entry(lchunk, struct sctp_chunk, 1746 transmitted_list); 1747 tsn = ntohl(chunk->subh.data_hdr->tsn); 1748 1749 /* Remove any chunks in the abandoned queue that are acked by 1750 * the ctsn. 1751 */ 1752 if (TSN_lte(tsn, ctsn)) { 1753 list_del_init(lchunk); 1754 sctp_chunk_free(chunk); 1755 } else { 1756 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { 1757 asoc->adv_peer_ack_point = tsn; 1758 if (chunk->chunk_hdr->flags & 1759 SCTP_DATA_UNORDERED) 1760 continue; 1761 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], 1762 nskips, 1763 chunk->subh.data_hdr->stream); 1764 ftsn_skip_arr[skip_pos].stream = 1765 chunk->subh.data_hdr->stream; 1766 ftsn_skip_arr[skip_pos].ssn = 1767 chunk->subh.data_hdr->ssn; 1768 if (skip_pos == nskips) 1769 nskips++; 1770 if (nskips == 10) 1771 break; 1772 } else 1773 break; 1774 } 1775 } 1776 1777 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" 1778 * is greater than the Cumulative TSN ACK carried in the received 1779 * SACK, the data sender MUST send the data receiver a FORWARD TSN 1780 * chunk containing the latest value of the 1781 * "Advanced.Peer.Ack.Point". 1782 * 1783 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD 1784 * list each stream and sequence number in the forwarded TSN. This 1785 * information will enable the receiver to easily find any 1786 * stranded TSN's waiting on stream reorder queues. Each stream 1787 * SHOULD only be reported once; this means that if multiple 1788 * abandoned messages occur in the same stream then only the 1789 * highest abandoned stream sequence number is reported. If the 1790 * total size of the FORWARD TSN does NOT fit in a single MTU then 1791 * the sender of the FORWARD TSN SHOULD lower the 1792 * Advanced.Peer.Ack.Point to the last TSN that will fit in a 1793 * single MTU. 1794 */ 1795 if (asoc->adv_peer_ack_point > ctsn) 1796 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, 1797 nskips, &ftsn_skip_arr[0]); 1798 1799 if (ftsn_chunk) { 1800 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1801 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); 1802 } 1803 } 1804