1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * 7 * This file is part of the SCTP kernel implementation 8 * 9 * These functions implement the sctp_outq class. The outqueue handles 10 * bundling and queueing of outgoing SCTP chunks. 11 * 12 * This SCTP implementation is free software; 13 * you can redistribute it and/or modify it under the terms of 14 * the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * This SCTP implementation is distributed in the hope that it 19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 20 * ************************ 21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 22 * See the GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with GNU CC; see the file COPYING. If not, write to 26 * the Free Software Foundation, 59 Temple Place - Suite 330, 27 * Boston, MA 02111-1307, USA. 28 * 29 * Please send any bug reports or fixes you make to the 30 * email address(es): 31 * lksctp developers <lksctp-developers@lists.sourceforge.net> 32 * 33 * Or submit a bug report through the following website: 34 * http://www.sf.net/projects/lksctp 35 * 36 * Written or modified by: 37 * La Monte H.P. Yarroll <piggy@acm.org> 38 * Karl Knutson <karl@athena.chicago.il.us> 39 * Perry Melange <pmelange@null.cc.uic.edu> 40 * Xingang Guo <xingang.guo@intel.com> 41 * Hui Huang <hui.huang@nokia.com> 42 * Sridhar Samudrala <sri@us.ibm.com> 43 * Jon Grimm <jgrimm@us.ibm.com> 44 * 45 * Any bugs reported given to us we will try to fix... any fixes shared will 46 * be incorporated into the next SCTP release. 47 */ 48 49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 50 51 #include <linux/types.h> 52 #include <linux/list.h> /* For struct list_head */ 53 #include <linux/socket.h> 54 #include <linux/ip.h> 55 #include <linux/slab.h> 56 #include <net/sock.h> /* For skb_set_owner_w */ 57 58 #include <net/sctp/sctp.h> 59 #include <net/sctp/sm.h> 60 61 /* Declare internal functions here. */ 62 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); 63 static void sctp_check_transmitted(struct sctp_outq *q, 64 struct list_head *transmitted_queue, 65 struct sctp_transport *transport, 66 struct sctp_sackhdr *sack, 67 __u32 *highest_new_tsn); 68 69 static void sctp_mark_missing(struct sctp_outq *q, 70 struct list_head *transmitted_queue, 71 struct sctp_transport *transport, 72 __u32 highest_new_tsn, 73 int count_of_newacks); 74 75 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); 76 77 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout); 78 79 /* Add data to the front of the queue. */ 80 static inline void sctp_outq_head_data(struct sctp_outq *q, 81 struct sctp_chunk *ch) 82 { 83 list_add(&ch->list, &q->out_chunk_list); 84 q->out_qlen += ch->skb->len; 85 } 86 87 /* Take data from the front of the queue. */ 88 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) 89 { 90 struct sctp_chunk *ch = NULL; 91 92 if (!list_empty(&q->out_chunk_list)) { 93 struct list_head *entry = q->out_chunk_list.next; 94 95 ch = list_entry(entry, struct sctp_chunk, list); 96 list_del_init(entry); 97 q->out_qlen -= ch->skb->len; 98 } 99 return ch; 100 } 101 /* Add data chunk to the end of the queue. */ 102 static inline void sctp_outq_tail_data(struct sctp_outq *q, 103 struct sctp_chunk *ch) 104 { 105 list_add_tail(&ch->list, &q->out_chunk_list); 106 q->out_qlen += ch->skb->len; 107 } 108 109 /* 110 * SFR-CACC algorithm: 111 * D) If count_of_newacks is greater than or equal to 2 112 * and t was not sent to the current primary then the 113 * sender MUST NOT increment missing report count for t. 114 */ 115 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, 116 struct sctp_transport *transport, 117 int count_of_newacks) 118 { 119 if (count_of_newacks >=2 && transport != primary) 120 return 1; 121 return 0; 122 } 123 124 /* 125 * SFR-CACC algorithm: 126 * F) If count_of_newacks is less than 2, let d be the 127 * destination to which t was sent. If cacc_saw_newack 128 * is 0 for destination d, then the sender MUST NOT 129 * increment missing report count for t. 130 */ 131 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 132 int count_of_newacks) 133 { 134 if (count_of_newacks < 2 && 135 (transport && !transport->cacc.cacc_saw_newack)) 136 return 1; 137 return 0; 138 } 139 140 /* 141 * SFR-CACC algorithm: 142 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD 143 * execute steps C, D, F. 144 * 145 * C has been implemented in sctp_outq_sack 146 */ 147 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, 148 struct sctp_transport *transport, 149 int count_of_newacks) 150 { 151 if (!primary->cacc.cycling_changeover) { 152 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) 153 return 1; 154 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) 155 return 1; 156 return 0; 157 } 158 return 0; 159 } 160 161 /* 162 * SFR-CACC algorithm: 163 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less 164 * than next_tsn_at_change of the current primary, then 165 * the sender MUST NOT increment missing report count 166 * for t. 167 */ 168 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) 169 { 170 if (primary->cacc.cycling_changeover && 171 TSN_lt(tsn, primary->cacc.next_tsn_at_change)) 172 return 1; 173 return 0; 174 } 175 176 /* 177 * SFR-CACC algorithm: 178 * 3) If the missing report count for TSN t is to be 179 * incremented according to [RFC2960] and 180 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, 181 * then the sender MUST further execute steps 3.1 and 182 * 3.2 to determine if the missing report count for 183 * TSN t SHOULD NOT be incremented. 184 * 185 * 3.3) If 3.1 and 3.2 do not dictate that the missing 186 * report count for t should not be incremented, then 187 * the sender SHOULD increment missing report count for 188 * t (according to [RFC2960] and [SCTP_STEWART_2002]). 189 */ 190 static inline int sctp_cacc_skip(struct sctp_transport *primary, 191 struct sctp_transport *transport, 192 int count_of_newacks, 193 __u32 tsn) 194 { 195 if (primary->cacc.changeover_active && 196 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) || 197 sctp_cacc_skip_3_2(primary, tsn))) 198 return 1; 199 return 0; 200 } 201 202 /* Initialize an existing sctp_outq. This does the boring stuff. 203 * You still need to define handlers if you really want to DO 204 * something with this structure... 205 */ 206 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 207 { 208 q->asoc = asoc; 209 INIT_LIST_HEAD(&q->out_chunk_list); 210 INIT_LIST_HEAD(&q->control_chunk_list); 211 INIT_LIST_HEAD(&q->retransmit); 212 INIT_LIST_HEAD(&q->sacked); 213 INIT_LIST_HEAD(&q->abandoned); 214 215 q->fast_rtx = 0; 216 q->outstanding_bytes = 0; 217 q->empty = 1; 218 q->cork = 0; 219 220 q->malloced = 0; 221 q->out_qlen = 0; 222 } 223 224 /* Free the outqueue structure and any related pending chunks. 225 */ 226 void sctp_outq_teardown(struct sctp_outq *q) 227 { 228 struct sctp_transport *transport; 229 struct list_head *lchunk, *temp; 230 struct sctp_chunk *chunk, *tmp; 231 232 /* Throw away unacknowledged chunks. */ 233 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, 234 transports) { 235 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { 236 chunk = list_entry(lchunk, struct sctp_chunk, 237 transmitted_list); 238 /* Mark as part of a failed message. */ 239 sctp_chunk_fail(chunk, q->error); 240 sctp_chunk_free(chunk); 241 } 242 } 243 244 /* Throw away chunks that have been gap ACKed. */ 245 list_for_each_safe(lchunk, temp, &q->sacked) { 246 list_del_init(lchunk); 247 chunk = list_entry(lchunk, struct sctp_chunk, 248 transmitted_list); 249 sctp_chunk_fail(chunk, q->error); 250 sctp_chunk_free(chunk); 251 } 252 253 /* Throw away any chunks in the retransmit queue. */ 254 list_for_each_safe(lchunk, temp, &q->retransmit) { 255 list_del_init(lchunk); 256 chunk = list_entry(lchunk, struct sctp_chunk, 257 transmitted_list); 258 sctp_chunk_fail(chunk, q->error); 259 sctp_chunk_free(chunk); 260 } 261 262 /* Throw away any chunks that are in the abandoned queue. */ 263 list_for_each_safe(lchunk, temp, &q->abandoned) { 264 list_del_init(lchunk); 265 chunk = list_entry(lchunk, struct sctp_chunk, 266 transmitted_list); 267 sctp_chunk_fail(chunk, q->error); 268 sctp_chunk_free(chunk); 269 } 270 271 /* Throw away any leftover data chunks. */ 272 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 273 274 /* Mark as send failure. */ 275 sctp_chunk_fail(chunk, q->error); 276 sctp_chunk_free(chunk); 277 } 278 279 q->error = 0; 280 281 /* Throw away any leftover control chunks. */ 282 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 283 list_del_init(&chunk->list); 284 sctp_chunk_free(chunk); 285 } 286 } 287 288 /* Free the outqueue structure and any related pending chunks. */ 289 void sctp_outq_free(struct sctp_outq *q) 290 { 291 /* Throw away leftover chunks. */ 292 sctp_outq_teardown(q); 293 294 /* If we were kmalloc()'d, free the memory. */ 295 if (q->malloced) 296 kfree(q); 297 } 298 299 /* Put a new chunk in an sctp_outq. */ 300 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) 301 { 302 int error = 0; 303 304 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n", 305 q, chunk, chunk && chunk->chunk_hdr ? 306 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) 307 : "Illegal Chunk"); 308 309 /* If it is data, queue it up, otherwise, send it 310 * immediately. 311 */ 312 if (sctp_chunk_is_data(chunk)) { 313 /* Is it OK to queue data chunks? */ 314 /* From 9. Termination of Association 315 * 316 * When either endpoint performs a shutdown, the 317 * association on each peer will stop accepting new 318 * data from its user and only deliver data in queue 319 * at the time of sending or receiving the SHUTDOWN 320 * chunk. 321 */ 322 switch (q->asoc->state) { 323 case SCTP_STATE_CLOSED: 324 case SCTP_STATE_SHUTDOWN_PENDING: 325 case SCTP_STATE_SHUTDOWN_SENT: 326 case SCTP_STATE_SHUTDOWN_RECEIVED: 327 case SCTP_STATE_SHUTDOWN_ACK_SENT: 328 /* Cannot send after transport endpoint shutdown */ 329 error = -ESHUTDOWN; 330 break; 331 332 default: 333 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n", 334 q, chunk, chunk && chunk->chunk_hdr ? 335 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) 336 : "Illegal Chunk"); 337 338 sctp_outq_tail_data(q, chunk); 339 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 340 SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS); 341 else 342 SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); 343 q->empty = 0; 344 break; 345 } 346 } else { 347 list_add_tail(&chunk->list, &q->control_chunk_list); 348 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 349 } 350 351 if (error < 0) 352 return error; 353 354 if (!q->cork) 355 error = sctp_outq_flush(q, 0); 356 357 return error; 358 } 359 360 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list 361 * and the abandoned list are in ascending order. 362 */ 363 static void sctp_insert_list(struct list_head *head, struct list_head *new) 364 { 365 struct list_head *pos; 366 struct sctp_chunk *nchunk, *lchunk; 367 __u32 ntsn, ltsn; 368 int done = 0; 369 370 nchunk = list_entry(new, struct sctp_chunk, transmitted_list); 371 ntsn = ntohl(nchunk->subh.data_hdr->tsn); 372 373 list_for_each(pos, head) { 374 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); 375 ltsn = ntohl(lchunk->subh.data_hdr->tsn); 376 if (TSN_lt(ntsn, ltsn)) { 377 list_add(new, pos->prev); 378 done = 1; 379 break; 380 } 381 } 382 if (!done) 383 list_add_tail(new, head); 384 } 385 386 /* Mark all the eligible packets on a transport for retransmission. */ 387 void sctp_retransmit_mark(struct sctp_outq *q, 388 struct sctp_transport *transport, 389 __u8 reason) 390 { 391 struct list_head *lchunk, *ltemp; 392 struct sctp_chunk *chunk; 393 394 /* Walk through the specified transmitted queue. */ 395 list_for_each_safe(lchunk, ltemp, &transport->transmitted) { 396 chunk = list_entry(lchunk, struct sctp_chunk, 397 transmitted_list); 398 399 /* If the chunk is abandoned, move it to abandoned list. */ 400 if (sctp_chunk_abandoned(chunk)) { 401 list_del_init(lchunk); 402 sctp_insert_list(&q->abandoned, lchunk); 403 404 /* If this chunk has not been previousely acked, 405 * stop considering it 'outstanding'. Our peer 406 * will most likely never see it since it will 407 * not be retransmitted 408 */ 409 if (!chunk->tsn_gap_acked) { 410 if (chunk->transport) 411 chunk->transport->flight_size -= 412 sctp_data_size(chunk); 413 q->outstanding_bytes -= sctp_data_size(chunk); 414 q->asoc->peer.rwnd += sctp_data_size(chunk); 415 } 416 continue; 417 } 418 419 /* If we are doing retransmission due to a timeout or pmtu 420 * discovery, only the chunks that are not yet acked should 421 * be added to the retransmit queue. 422 */ 423 if ((reason == SCTP_RTXR_FAST_RTX && 424 (chunk->fast_retransmit == SCTP_NEED_FRTX)) || 425 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { 426 /* RFC 2960 6.2.1 Processing a Received SACK 427 * 428 * C) Any time a DATA chunk is marked for 429 * retransmission (via either T3-rtx timer expiration 430 * (Section 6.3.3) or via fast retransmit 431 * (Section 7.2.4)), add the data size of those 432 * chunks to the rwnd. 433 */ 434 q->asoc->peer.rwnd += sctp_data_size(chunk); 435 q->outstanding_bytes -= sctp_data_size(chunk); 436 if (chunk->transport) 437 transport->flight_size -= sctp_data_size(chunk); 438 439 /* sctpimpguide-05 Section 2.8.2 440 * M5) If a T3-rtx timer expires, the 441 * 'TSN.Missing.Report' of all affected TSNs is set 442 * to 0. 443 */ 444 chunk->tsn_missing_report = 0; 445 446 /* If a chunk that is being used for RTT measurement 447 * has to be retransmitted, we cannot use this chunk 448 * anymore for RTT measurements. Reset rto_pending so 449 * that a new RTT measurement is started when a new 450 * data chunk is sent. 451 */ 452 if (chunk->rtt_in_progress) { 453 chunk->rtt_in_progress = 0; 454 transport->rto_pending = 0; 455 } 456 457 /* Move the chunk to the retransmit queue. The chunks 458 * on the retransmit queue are always kept in order. 459 */ 460 list_del_init(lchunk); 461 sctp_insert_list(&q->retransmit, lchunk); 462 } 463 } 464 465 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, " 466 "cwnd: %d, ssthresh: %d, flight_size: %d, " 467 "pba: %d\n", __func__, 468 transport, reason, 469 transport->cwnd, transport->ssthresh, 470 transport->flight_size, 471 transport->partial_bytes_acked); 472 473 } 474 475 /* Mark all the eligible packets on a transport for retransmission and force 476 * one packet out. 477 */ 478 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, 479 sctp_retransmit_reason_t reason) 480 { 481 int error = 0; 482 483 switch(reason) { 484 case SCTP_RTXR_T3_RTX: 485 SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS); 486 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); 487 /* Update the retran path if the T3-rtx timer has expired for 488 * the current retran path. 489 */ 490 if (transport == transport->asoc->peer.retran_path) 491 sctp_assoc_update_retran_path(transport->asoc); 492 transport->asoc->rtx_data_chunks += 493 transport->asoc->unack_data; 494 break; 495 case SCTP_RTXR_FAST_RTX: 496 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 497 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 498 q->fast_rtx = 1; 499 break; 500 case SCTP_RTXR_PMTUD: 501 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); 502 break; 503 case SCTP_RTXR_T1_RTX: 504 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); 505 transport->asoc->init_retries++; 506 break; 507 default: 508 BUG(); 509 } 510 511 sctp_retransmit_mark(q, transport, reason); 512 513 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, 514 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 515 * following the procedures outlined in C1 - C5. 516 */ 517 if (reason == SCTP_RTXR_T3_RTX) 518 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 519 520 /* Flush the queues only on timeout, since fast_rtx is only 521 * triggered during sack processing and the queue 522 * will be flushed at the end. 523 */ 524 if (reason != SCTP_RTXR_FAST_RTX) 525 error = sctp_outq_flush(q, /* rtx_timeout */ 1); 526 527 if (error) 528 q->asoc->base.sk->sk_err = -error; 529 } 530 531 /* 532 * Transmit DATA chunks on the retransmit queue. Upon return from 533 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which 534 * need to be transmitted by the caller. 535 * We assume that pkt->transport has already been set. 536 * 537 * The return value is a normal kernel error return value. 538 */ 539 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, 540 int rtx_timeout, int *start_timer) 541 { 542 struct list_head *lqueue; 543 struct sctp_transport *transport = pkt->transport; 544 sctp_xmit_t status; 545 struct sctp_chunk *chunk, *chunk1; 546 int fast_rtx; 547 int error = 0; 548 int timer = 0; 549 int done = 0; 550 551 lqueue = &q->retransmit; 552 fast_rtx = q->fast_rtx; 553 554 /* This loop handles time-out retransmissions, fast retransmissions, 555 * and retransmissions due to opening of whindow. 556 * 557 * RFC 2960 6.3.3 Handle T3-rtx Expiration 558 * 559 * E3) Determine how many of the earliest (i.e., lowest TSN) 560 * outstanding DATA chunks for the address for which the 561 * T3-rtx has expired will fit into a single packet, subject 562 * to the MTU constraint for the path corresponding to the 563 * destination transport address to which the retransmission 564 * is being sent (this may be different from the address for 565 * which the timer expires [see Section 6.4]). Call this value 566 * K. Bundle and retransmit those K DATA chunks in a single 567 * packet to the destination endpoint. 568 * 569 * [Just to be painfully clear, if we are retransmitting 570 * because a timeout just happened, we should send only ONE 571 * packet of retransmitted data.] 572 * 573 * For fast retransmissions we also send only ONE packet. However, 574 * if we are just flushing the queue due to open window, we'll 575 * try to send as much as possible. 576 */ 577 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 578 /* If the chunk is abandoned, move it to abandoned list. */ 579 if (sctp_chunk_abandoned(chunk)) { 580 list_del_init(&chunk->transmitted_list); 581 sctp_insert_list(&q->abandoned, 582 &chunk->transmitted_list); 583 continue; 584 } 585 586 /* Make sure that Gap Acked TSNs are not retransmitted. A 587 * simple approach is just to move such TSNs out of the 588 * way and into a 'transmitted' queue and skip to the 589 * next chunk. 590 */ 591 if (chunk->tsn_gap_acked) { 592 list_del(&chunk->transmitted_list); 593 list_add_tail(&chunk->transmitted_list, 594 &transport->transmitted); 595 continue; 596 } 597 598 /* If we are doing fast retransmit, ignore non-fast_rtransmit 599 * chunks 600 */ 601 if (fast_rtx && !chunk->fast_retransmit) 602 continue; 603 604 redo: 605 /* Attempt to append this chunk to the packet. */ 606 status = sctp_packet_append_chunk(pkt, chunk); 607 608 switch (status) { 609 case SCTP_XMIT_PMTU_FULL: 610 if (!pkt->has_data && !pkt->has_cookie_echo) { 611 /* If this packet did not contain DATA then 612 * retransmission did not happen, so do it 613 * again. We'll ignore the error here since 614 * control chunks are already freed so there 615 * is nothing we can do. 616 */ 617 sctp_packet_transmit(pkt); 618 goto redo; 619 } 620 621 /* Send this packet. */ 622 error = sctp_packet_transmit(pkt); 623 624 /* If we are retransmitting, we should only 625 * send a single packet. 626 * Otherwise, try appending this chunk again. 627 */ 628 if (rtx_timeout || fast_rtx) 629 done = 1; 630 else 631 goto redo; 632 633 /* Bundle next chunk in the next round. */ 634 break; 635 636 case SCTP_XMIT_RWND_FULL: 637 /* Send this packet. */ 638 error = sctp_packet_transmit(pkt); 639 640 /* Stop sending DATA as there is no more room 641 * at the receiver. 642 */ 643 done = 1; 644 break; 645 646 case SCTP_XMIT_NAGLE_DELAY: 647 /* Send this packet. */ 648 error = sctp_packet_transmit(pkt); 649 650 /* Stop sending DATA because of nagle delay. */ 651 done = 1; 652 break; 653 654 default: 655 /* The append was successful, so add this chunk to 656 * the transmitted list. 657 */ 658 list_del(&chunk->transmitted_list); 659 list_add_tail(&chunk->transmitted_list, 660 &transport->transmitted); 661 662 /* Mark the chunk as ineligible for fast retransmit 663 * after it is retransmitted. 664 */ 665 if (chunk->fast_retransmit == SCTP_NEED_FRTX) 666 chunk->fast_retransmit = SCTP_DONT_FRTX; 667 668 q->empty = 0; 669 break; 670 } 671 672 /* Set the timer if there were no errors */ 673 if (!error && !timer) 674 timer = 1; 675 676 if (done) 677 break; 678 } 679 680 /* If we are here due to a retransmit timeout or a fast 681 * retransmit and if there are any chunks left in the retransmit 682 * queue that could not fit in the PMTU sized packet, they need 683 * to be marked as ineligible for a subsequent fast retransmit. 684 */ 685 if (rtx_timeout || fast_rtx) { 686 list_for_each_entry(chunk1, lqueue, transmitted_list) { 687 if (chunk1->fast_retransmit == SCTP_NEED_FRTX) 688 chunk1->fast_retransmit = SCTP_DONT_FRTX; 689 } 690 } 691 692 *start_timer = timer; 693 694 /* Clear fast retransmit hint */ 695 if (fast_rtx) 696 q->fast_rtx = 0; 697 698 return error; 699 } 700 701 /* Cork the outqueue so queued chunks are really queued. */ 702 int sctp_outq_uncork(struct sctp_outq *q) 703 { 704 int error = 0; 705 if (q->cork) 706 q->cork = 0; 707 error = sctp_outq_flush(q, 0); 708 return error; 709 } 710 711 712 /* 713 * Try to flush an outqueue. 714 * 715 * Description: Send everything in q which we legally can, subject to 716 * congestion limitations. 717 * * Note: This function can be called from multiple contexts so appropriate 718 * locking concerns must be made. Today we use the sock lock to protect 719 * this function. 720 */ 721 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) 722 { 723 struct sctp_packet *packet; 724 struct sctp_packet singleton; 725 struct sctp_association *asoc = q->asoc; 726 __u16 sport = asoc->base.bind_addr.port; 727 __u16 dport = asoc->peer.port; 728 __u32 vtag = asoc->peer.i.init_tag; 729 struct sctp_transport *transport = NULL; 730 struct sctp_transport *new_transport; 731 struct sctp_chunk *chunk, *tmp; 732 sctp_xmit_t status; 733 int error = 0; 734 int start_timer = 0; 735 int one_packet = 0; 736 737 /* These transports have chunks to send. */ 738 struct list_head transport_list; 739 struct list_head *ltransport; 740 741 INIT_LIST_HEAD(&transport_list); 742 packet = NULL; 743 744 /* 745 * 6.10 Bundling 746 * ... 747 * When bundling control chunks with DATA chunks, an 748 * endpoint MUST place control chunks first in the outbound 749 * SCTP packet. The transmitter MUST transmit DATA chunks 750 * within a SCTP packet in increasing order of TSN. 751 * ... 752 */ 753 754 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 755 /* RFC 5061, 5.3 756 * F1) This means that until such time as the ASCONF 757 * containing the add is acknowledged, the sender MUST 758 * NOT use the new IP address as a source for ANY SCTP 759 * packet except on carrying an ASCONF Chunk. 760 */ 761 if (asoc->src_out_of_asoc_ok && 762 chunk->chunk_hdr->type != SCTP_CID_ASCONF) 763 continue; 764 765 list_del_init(&chunk->list); 766 767 /* Pick the right transport to use. */ 768 new_transport = chunk->transport; 769 770 if (!new_transport) { 771 /* 772 * If we have a prior transport pointer, see if 773 * the destination address of the chunk 774 * matches the destination address of the 775 * current transport. If not a match, then 776 * try to look up the transport with a given 777 * destination address. We do this because 778 * after processing ASCONFs, we may have new 779 * transports created. 780 */ 781 if (transport && 782 sctp_cmp_addr_exact(&chunk->dest, 783 &transport->ipaddr)) 784 new_transport = transport; 785 else 786 new_transport = sctp_assoc_lookup_paddr(asoc, 787 &chunk->dest); 788 789 /* if we still don't have a new transport, then 790 * use the current active path. 791 */ 792 if (!new_transport) 793 new_transport = asoc->peer.active_path; 794 } else if ((new_transport->state == SCTP_INACTIVE) || 795 (new_transport->state == SCTP_UNCONFIRMED)) { 796 /* If the chunk is Heartbeat or Heartbeat Ack, 797 * send it to chunk->transport, even if it's 798 * inactive. 799 * 800 * 3.3.6 Heartbeat Acknowledgement: 801 * ... 802 * A HEARTBEAT ACK is always sent to the source IP 803 * address of the IP datagram containing the 804 * HEARTBEAT chunk to which this ack is responding. 805 * ... 806 * 807 * ASCONF_ACKs also must be sent to the source. 808 */ 809 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && 810 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && 811 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) 812 new_transport = asoc->peer.active_path; 813 } 814 815 /* Are we switching transports? 816 * Take care of transport locks. 817 */ 818 if (new_transport != transport) { 819 transport = new_transport; 820 if (list_empty(&transport->send_ready)) { 821 list_add_tail(&transport->send_ready, 822 &transport_list); 823 } 824 packet = &transport->packet; 825 sctp_packet_config(packet, vtag, 826 asoc->peer.ecn_capable); 827 } 828 829 switch (chunk->chunk_hdr->type) { 830 /* 831 * 6.10 Bundling 832 * ... 833 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN 834 * COMPLETE with any other chunks. [Send them immediately.] 835 */ 836 case SCTP_CID_INIT: 837 case SCTP_CID_INIT_ACK: 838 case SCTP_CID_SHUTDOWN_COMPLETE: 839 sctp_packet_init(&singleton, transport, sport, dport); 840 sctp_packet_config(&singleton, vtag, 0); 841 sctp_packet_append_chunk(&singleton, chunk); 842 error = sctp_packet_transmit(&singleton); 843 if (error < 0) 844 return error; 845 break; 846 847 case SCTP_CID_ABORT: 848 if (sctp_test_T_bit(chunk)) { 849 packet->vtag = asoc->c.my_vtag; 850 } 851 /* The following chunks are "response" chunks, i.e. 852 * they are generated in response to something we 853 * received. If we are sending these, then we can 854 * send only 1 packet containing these chunks. 855 */ 856 case SCTP_CID_HEARTBEAT_ACK: 857 case SCTP_CID_SHUTDOWN_ACK: 858 case SCTP_CID_COOKIE_ACK: 859 case SCTP_CID_COOKIE_ECHO: 860 case SCTP_CID_ERROR: 861 case SCTP_CID_ECN_CWR: 862 case SCTP_CID_ASCONF_ACK: 863 one_packet = 1; 864 /* Fall through */ 865 866 case SCTP_CID_SACK: 867 case SCTP_CID_HEARTBEAT: 868 case SCTP_CID_SHUTDOWN: 869 case SCTP_CID_ECN_ECNE: 870 case SCTP_CID_ASCONF: 871 case SCTP_CID_FWD_TSN: 872 status = sctp_packet_transmit_chunk(packet, chunk, 873 one_packet); 874 if (status != SCTP_XMIT_OK) { 875 /* put the chunk back */ 876 list_add(&chunk->list, &q->control_chunk_list); 877 } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { 878 /* PR-SCTP C5) If a FORWARD TSN is sent, the 879 * sender MUST assure that at least one T3-rtx 880 * timer is running. 881 */ 882 sctp_transport_reset_timers(transport); 883 } 884 break; 885 886 default: 887 /* We built a chunk with an illegal type! */ 888 BUG(); 889 } 890 } 891 892 if (q->asoc->src_out_of_asoc_ok) 893 goto sctp_flush_out; 894 895 /* Is it OK to send data chunks? */ 896 switch (asoc->state) { 897 case SCTP_STATE_COOKIE_ECHOED: 898 /* Only allow bundling when this packet has a COOKIE-ECHO 899 * chunk. 900 */ 901 if (!packet || !packet->has_cookie_echo) 902 break; 903 904 /* fallthru */ 905 case SCTP_STATE_ESTABLISHED: 906 case SCTP_STATE_SHUTDOWN_PENDING: 907 case SCTP_STATE_SHUTDOWN_RECEIVED: 908 /* 909 * RFC 2960 6.1 Transmission of DATA Chunks 910 * 911 * C) When the time comes for the sender to transmit, 912 * before sending new DATA chunks, the sender MUST 913 * first transmit any outstanding DATA chunks which 914 * are marked for retransmission (limited by the 915 * current cwnd). 916 */ 917 if (!list_empty(&q->retransmit)) { 918 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) 919 goto sctp_flush_out; 920 if (transport == asoc->peer.retran_path) 921 goto retran; 922 923 /* Switch transports & prepare the packet. */ 924 925 transport = asoc->peer.retran_path; 926 927 if (list_empty(&transport->send_ready)) { 928 list_add_tail(&transport->send_ready, 929 &transport_list); 930 } 931 932 packet = &transport->packet; 933 sctp_packet_config(packet, vtag, 934 asoc->peer.ecn_capable); 935 retran: 936 error = sctp_outq_flush_rtx(q, packet, 937 rtx_timeout, &start_timer); 938 939 if (start_timer) 940 sctp_transport_reset_timers(transport); 941 942 /* This can happen on COOKIE-ECHO resend. Only 943 * one chunk can get bundled with a COOKIE-ECHO. 944 */ 945 if (packet->has_cookie_echo) 946 goto sctp_flush_out; 947 948 /* Don't send new data if there is still data 949 * waiting to retransmit. 950 */ 951 if (!list_empty(&q->retransmit)) 952 goto sctp_flush_out; 953 } 954 955 /* Apply Max.Burst limitation to the current transport in 956 * case it will be used for new data. We are going to 957 * rest it before we return, but we want to apply the limit 958 * to the currently queued data. 959 */ 960 if (transport) 961 sctp_transport_burst_limited(transport); 962 963 /* Finally, transmit new packets. */ 964 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 965 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 966 * stream identifier. 967 */ 968 if (chunk->sinfo.sinfo_stream >= 969 asoc->c.sinit_num_ostreams) { 970 971 /* Mark as failed send. */ 972 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 973 sctp_chunk_free(chunk); 974 continue; 975 } 976 977 /* Has this chunk expired? */ 978 if (sctp_chunk_abandoned(chunk)) { 979 sctp_chunk_fail(chunk, 0); 980 sctp_chunk_free(chunk); 981 continue; 982 } 983 984 /* If there is a specified transport, use it. 985 * Otherwise, we want to use the active path. 986 */ 987 new_transport = chunk->transport; 988 if (!new_transport || 989 ((new_transport->state == SCTP_INACTIVE) || 990 (new_transport->state == SCTP_UNCONFIRMED))) 991 new_transport = asoc->peer.active_path; 992 if (new_transport->state == SCTP_UNCONFIRMED) 993 continue; 994 995 /* Change packets if necessary. */ 996 if (new_transport != transport) { 997 transport = new_transport; 998 999 /* Schedule to have this transport's 1000 * packet flushed. 1001 */ 1002 if (list_empty(&transport->send_ready)) { 1003 list_add_tail(&transport->send_ready, 1004 &transport_list); 1005 } 1006 1007 packet = &transport->packet; 1008 sctp_packet_config(packet, vtag, 1009 asoc->peer.ecn_capable); 1010 /* We've switched transports, so apply the 1011 * Burst limit to the new transport. 1012 */ 1013 sctp_transport_burst_limited(transport); 1014 } 1015 1016 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", 1017 q, chunk, 1018 chunk && chunk->chunk_hdr ? 1019 sctp_cname(SCTP_ST_CHUNK( 1020 chunk->chunk_hdr->type)) 1021 : "Illegal Chunk"); 1022 1023 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head " 1024 "%p skb->users %d.\n", 1025 ntohl(chunk->subh.data_hdr->tsn), 1026 chunk->skb ?chunk->skb->head : NULL, 1027 chunk->skb ? 1028 atomic_read(&chunk->skb->users) : -1); 1029 1030 /* Add the chunk to the packet. */ 1031 status = sctp_packet_transmit_chunk(packet, chunk, 0); 1032 1033 switch (status) { 1034 case SCTP_XMIT_PMTU_FULL: 1035 case SCTP_XMIT_RWND_FULL: 1036 case SCTP_XMIT_NAGLE_DELAY: 1037 /* We could not append this chunk, so put 1038 * the chunk back on the output queue. 1039 */ 1040 SCTP_DEBUG_PRINTK("sctp_outq_flush: could " 1041 "not transmit TSN: 0x%x, status: %d\n", 1042 ntohl(chunk->subh.data_hdr->tsn), 1043 status); 1044 sctp_outq_head_data(q, chunk); 1045 goto sctp_flush_out; 1046 break; 1047 1048 case SCTP_XMIT_OK: 1049 /* The sender is in the SHUTDOWN-PENDING state, 1050 * The sender MAY set the I-bit in the DATA 1051 * chunk header. 1052 */ 1053 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) 1054 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; 1055 1056 break; 1057 1058 default: 1059 BUG(); 1060 } 1061 1062 /* BUG: We assume that the sctp_packet_transmit() 1063 * call below will succeed all the time and add the 1064 * chunk to the transmitted list and restart the 1065 * timers. 1066 * It is possible that the call can fail under OOM 1067 * conditions. 1068 * 1069 * Is this really a problem? Won't this behave 1070 * like a lost TSN? 1071 */ 1072 list_add_tail(&chunk->transmitted_list, 1073 &transport->transmitted); 1074 1075 sctp_transport_reset_timers(transport); 1076 1077 q->empty = 0; 1078 1079 /* Only let one DATA chunk get bundled with a 1080 * COOKIE-ECHO chunk. 1081 */ 1082 if (packet->has_cookie_echo) 1083 goto sctp_flush_out; 1084 } 1085 break; 1086 1087 default: 1088 /* Do nothing. */ 1089 break; 1090 } 1091 1092 sctp_flush_out: 1093 1094 /* Before returning, examine all the transports touched in 1095 * this call. Right now, we bluntly force clear all the 1096 * transports. Things might change after we implement Nagle. 1097 * But such an examination is still required. 1098 * 1099 * --xguo 1100 */ 1101 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) { 1102 struct sctp_transport *t = list_entry(ltransport, 1103 struct sctp_transport, 1104 send_ready); 1105 packet = &t->packet; 1106 if (!sctp_packet_empty(packet)) 1107 error = sctp_packet_transmit(packet); 1108 1109 /* Clear the burst limited state, if any */ 1110 sctp_transport_burst_reset(t); 1111 } 1112 1113 return error; 1114 } 1115 1116 /* Update unack_data based on the incoming SACK chunk */ 1117 static void sctp_sack_update_unack_data(struct sctp_association *assoc, 1118 struct sctp_sackhdr *sack) 1119 { 1120 sctp_sack_variable_t *frags; 1121 __u16 unack_data; 1122 int i; 1123 1124 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; 1125 1126 frags = sack->variable; 1127 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { 1128 unack_data -= ((ntohs(frags[i].gab.end) - 1129 ntohs(frags[i].gab.start) + 1)); 1130 } 1131 1132 assoc->unack_data = unack_data; 1133 } 1134 1135 /* This is where we REALLY process a SACK. 1136 * 1137 * Process the SACK against the outqueue. Mostly, this just frees 1138 * things off the transmitted queue. 1139 */ 1140 int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) 1141 { 1142 struct sctp_association *asoc = q->asoc; 1143 struct sctp_transport *transport; 1144 struct sctp_chunk *tchunk = NULL; 1145 struct list_head *lchunk, *transport_list, *temp; 1146 sctp_sack_variable_t *frags = sack->variable; 1147 __u32 sack_ctsn, ctsn, tsn; 1148 __u32 highest_tsn, highest_new_tsn; 1149 __u32 sack_a_rwnd; 1150 unsigned outstanding; 1151 struct sctp_transport *primary = asoc->peer.primary_path; 1152 int count_of_newacks = 0; 1153 int gap_ack_blocks; 1154 u8 accum_moved = 0; 1155 1156 /* Grab the association's destination address list. */ 1157 transport_list = &asoc->peer.transport_addr_list; 1158 1159 sack_ctsn = ntohl(sack->cum_tsn_ack); 1160 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); 1161 /* 1162 * SFR-CACC algorithm: 1163 * On receipt of a SACK the sender SHOULD execute the 1164 * following statements. 1165 * 1166 * 1) If the cumulative ack in the SACK passes next tsn_at_change 1167 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be 1168 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for 1169 * all destinations. 1170 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE 1171 * is set the receiver of the SACK MUST take the following actions: 1172 * 1173 * A) Initialize the cacc_saw_newack to 0 for all destination 1174 * addresses. 1175 * 1176 * Only bother if changeover_active is set. Otherwise, this is 1177 * totally suboptimal to do on every SACK. 1178 */ 1179 if (primary->cacc.changeover_active) { 1180 u8 clear_cycling = 0; 1181 1182 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { 1183 primary->cacc.changeover_active = 0; 1184 clear_cycling = 1; 1185 } 1186 1187 if (clear_cycling || gap_ack_blocks) { 1188 list_for_each_entry(transport, transport_list, 1189 transports) { 1190 if (clear_cycling) 1191 transport->cacc.cycling_changeover = 0; 1192 if (gap_ack_blocks) 1193 transport->cacc.cacc_saw_newack = 0; 1194 } 1195 } 1196 } 1197 1198 /* Get the highest TSN in the sack. */ 1199 highest_tsn = sack_ctsn; 1200 if (gap_ack_blocks) 1201 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); 1202 1203 if (TSN_lt(asoc->highest_sacked, highest_tsn)) 1204 asoc->highest_sacked = highest_tsn; 1205 1206 highest_new_tsn = sack_ctsn; 1207 1208 /* Run through the retransmit queue. Credit bytes received 1209 * and free those chunks that we can. 1210 */ 1211 sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn); 1212 1213 /* Run through the transmitted queue. 1214 * Credit bytes received and free those chunks which we can. 1215 * 1216 * This is a MASSIVE candidate for optimization. 1217 */ 1218 list_for_each_entry(transport, transport_list, transports) { 1219 sctp_check_transmitted(q, &transport->transmitted, 1220 transport, sack, &highest_new_tsn); 1221 /* 1222 * SFR-CACC algorithm: 1223 * C) Let count_of_newacks be the number of 1224 * destinations for which cacc_saw_newack is set. 1225 */ 1226 if (transport->cacc.cacc_saw_newack) 1227 count_of_newacks ++; 1228 } 1229 1230 /* Move the Cumulative TSN Ack Point if appropriate. */ 1231 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) { 1232 asoc->ctsn_ack_point = sack_ctsn; 1233 accum_moved = 1; 1234 } 1235 1236 if (gap_ack_blocks) { 1237 1238 if (asoc->fast_recovery && accum_moved) 1239 highest_new_tsn = highest_tsn; 1240 1241 list_for_each_entry(transport, transport_list, transports) 1242 sctp_mark_missing(q, &transport->transmitted, transport, 1243 highest_new_tsn, count_of_newacks); 1244 } 1245 1246 /* Update unack_data field in the assoc. */ 1247 sctp_sack_update_unack_data(asoc, sack); 1248 1249 ctsn = asoc->ctsn_ack_point; 1250 1251 /* Throw away stuff rotting on the sack queue. */ 1252 list_for_each_safe(lchunk, temp, &q->sacked) { 1253 tchunk = list_entry(lchunk, struct sctp_chunk, 1254 transmitted_list); 1255 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1256 if (TSN_lte(tsn, ctsn)) { 1257 list_del_init(&tchunk->transmitted_list); 1258 sctp_chunk_free(tchunk); 1259 } 1260 } 1261 1262 /* ii) Set rwnd equal to the newly received a_rwnd minus the 1263 * number of bytes still outstanding after processing the 1264 * Cumulative TSN Ack and the Gap Ack Blocks. 1265 */ 1266 1267 sack_a_rwnd = ntohl(sack->a_rwnd); 1268 outstanding = q->outstanding_bytes; 1269 1270 if (outstanding < sack_a_rwnd) 1271 sack_a_rwnd -= outstanding; 1272 else 1273 sack_a_rwnd = 0; 1274 1275 asoc->peer.rwnd = sack_a_rwnd; 1276 1277 sctp_generate_fwdtsn(q, sack_ctsn); 1278 1279 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", 1280 __func__, sack_ctsn); 1281 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " 1282 "%p is 0x%x. Adv peer ack point: 0x%x\n", 1283 __func__, asoc, ctsn, asoc->adv_peer_ack_point); 1284 1285 /* See if all chunks are acked. 1286 * Make sure the empty queue handler will get run later. 1287 */ 1288 q->empty = (list_empty(&q->out_chunk_list) && 1289 list_empty(&q->retransmit)); 1290 if (!q->empty) 1291 goto finish; 1292 1293 list_for_each_entry(transport, transport_list, transports) { 1294 q->empty = q->empty && list_empty(&transport->transmitted); 1295 if (!q->empty) 1296 goto finish; 1297 } 1298 1299 SCTP_DEBUG_PRINTK("sack queue is empty.\n"); 1300 finish: 1301 return q->empty; 1302 } 1303 1304 /* Is the outqueue empty? */ 1305 int sctp_outq_is_empty(const struct sctp_outq *q) 1306 { 1307 return q->empty; 1308 } 1309 1310 /******************************************************************** 1311 * 2nd Level Abstractions 1312 ********************************************************************/ 1313 1314 /* Go through a transport's transmitted list or the association's retransmit 1315 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. 1316 * The retransmit list will not have an associated transport. 1317 * 1318 * I added coherent debug information output. --xguo 1319 * 1320 * Instead of printing 'sacked' or 'kept' for each TSN on the 1321 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. 1322 * KEPT TSN6-TSN7, etc. 1323 */ 1324 static void sctp_check_transmitted(struct sctp_outq *q, 1325 struct list_head *transmitted_queue, 1326 struct sctp_transport *transport, 1327 struct sctp_sackhdr *sack, 1328 __u32 *highest_new_tsn_in_sack) 1329 { 1330 struct list_head *lchunk; 1331 struct sctp_chunk *tchunk; 1332 struct list_head tlist; 1333 __u32 tsn; 1334 __u32 sack_ctsn; 1335 __u32 rtt; 1336 __u8 restart_timer = 0; 1337 int bytes_acked = 0; 1338 int migrate_bytes = 0; 1339 1340 /* These state variables are for coherent debug output. --xguo */ 1341 1342 #if SCTP_DEBUG 1343 __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */ 1344 __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */ 1345 __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */ 1346 __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */ 1347 1348 /* 0 : The last TSN was ACKed. 1349 * 1 : The last TSN was NOT ACKed (i.e. KEPT). 1350 * -1: We need to initialize. 1351 */ 1352 int dbg_prt_state = -1; 1353 #endif /* SCTP_DEBUG */ 1354 1355 sack_ctsn = ntohl(sack->cum_tsn_ack); 1356 1357 INIT_LIST_HEAD(&tlist); 1358 1359 /* The while loop will skip empty transmitted queues. */ 1360 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { 1361 tchunk = list_entry(lchunk, struct sctp_chunk, 1362 transmitted_list); 1363 1364 if (sctp_chunk_abandoned(tchunk)) { 1365 /* Move the chunk to abandoned list. */ 1366 sctp_insert_list(&q->abandoned, lchunk); 1367 1368 /* If this chunk has not been acked, stop 1369 * considering it as 'outstanding'. 1370 */ 1371 if (!tchunk->tsn_gap_acked) { 1372 if (tchunk->transport) 1373 tchunk->transport->flight_size -= 1374 sctp_data_size(tchunk); 1375 q->outstanding_bytes -= sctp_data_size(tchunk); 1376 } 1377 continue; 1378 } 1379 1380 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1381 if (sctp_acked(sack, tsn)) { 1382 /* If this queue is the retransmit queue, the 1383 * retransmit timer has already reclaimed 1384 * the outstanding bytes for this chunk, so only 1385 * count bytes associated with a transport. 1386 */ 1387 if (transport) { 1388 /* If this chunk is being used for RTT 1389 * measurement, calculate the RTT and update 1390 * the RTO using this value. 1391 * 1392 * 6.3.1 C5) Karn's algorithm: RTT measurements 1393 * MUST NOT be made using packets that were 1394 * retransmitted (and thus for which it is 1395 * ambiguous whether the reply was for the 1396 * first instance of the packet or a later 1397 * instance). 1398 */ 1399 if (!tchunk->tsn_gap_acked && 1400 tchunk->rtt_in_progress) { 1401 tchunk->rtt_in_progress = 0; 1402 rtt = jiffies - tchunk->sent_at; 1403 sctp_transport_update_rto(transport, 1404 rtt); 1405 } 1406 } 1407 1408 /* If the chunk hasn't been marked as ACKED, 1409 * mark it and account bytes_acked if the 1410 * chunk had a valid transport (it will not 1411 * have a transport if ASCONF had deleted it 1412 * while DATA was outstanding). 1413 */ 1414 if (!tchunk->tsn_gap_acked) { 1415 tchunk->tsn_gap_acked = 1; 1416 *highest_new_tsn_in_sack = tsn; 1417 bytes_acked += sctp_data_size(tchunk); 1418 if (!tchunk->transport) 1419 migrate_bytes += sctp_data_size(tchunk); 1420 } 1421 1422 if (TSN_lte(tsn, sack_ctsn)) { 1423 /* RFC 2960 6.3.2 Retransmission Timer Rules 1424 * 1425 * R3) Whenever a SACK is received 1426 * that acknowledges the DATA chunk 1427 * with the earliest outstanding TSN 1428 * for that address, restart T3-rtx 1429 * timer for that address with its 1430 * current RTO. 1431 */ 1432 restart_timer = 1; 1433 1434 if (!tchunk->tsn_gap_acked) { 1435 /* 1436 * SFR-CACC algorithm: 1437 * 2) If the SACK contains gap acks 1438 * and the flag CHANGEOVER_ACTIVE is 1439 * set the receiver of the SACK MUST 1440 * take the following action: 1441 * 1442 * B) For each TSN t being acked that 1443 * has not been acked in any SACK so 1444 * far, set cacc_saw_newack to 1 for 1445 * the destination that the TSN was 1446 * sent to. 1447 */ 1448 if (transport && 1449 sack->num_gap_ack_blocks && 1450 q->asoc->peer.primary_path->cacc. 1451 changeover_active) 1452 transport->cacc.cacc_saw_newack 1453 = 1; 1454 } 1455 1456 list_add_tail(&tchunk->transmitted_list, 1457 &q->sacked); 1458 } else { 1459 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 1460 * M2) Each time a SACK arrives reporting 1461 * 'Stray DATA chunk(s)' record the highest TSN 1462 * reported as newly acknowledged, call this 1463 * value 'HighestTSNinSack'. A newly 1464 * acknowledged DATA chunk is one not 1465 * previously acknowledged in a SACK. 1466 * 1467 * When the SCTP sender of data receives a SACK 1468 * chunk that acknowledges, for the first time, 1469 * the receipt of a DATA chunk, all the still 1470 * unacknowledged DATA chunks whose TSN is 1471 * older than that newly acknowledged DATA 1472 * chunk, are qualified as 'Stray DATA chunks'. 1473 */ 1474 list_add_tail(lchunk, &tlist); 1475 } 1476 1477 #if SCTP_DEBUG 1478 switch (dbg_prt_state) { 1479 case 0: /* last TSN was ACKed */ 1480 if (dbg_last_ack_tsn + 1 == tsn) { 1481 /* This TSN belongs to the 1482 * current ACK range. 1483 */ 1484 break; 1485 } 1486 1487 if (dbg_last_ack_tsn != dbg_ack_tsn) { 1488 /* Display the end of the 1489 * current range. 1490 */ 1491 SCTP_DEBUG_PRINTK_CONT("-%08x", 1492 dbg_last_ack_tsn); 1493 } 1494 1495 /* Start a new range. */ 1496 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn); 1497 dbg_ack_tsn = tsn; 1498 break; 1499 1500 case 1: /* The last TSN was NOT ACKed. */ 1501 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1502 /* Display the end of current range. */ 1503 SCTP_DEBUG_PRINTK_CONT("-%08x", 1504 dbg_last_kept_tsn); 1505 } 1506 1507 SCTP_DEBUG_PRINTK_CONT("\n"); 1508 1509 /* FALL THROUGH... */ 1510 default: 1511 /* This is the first-ever TSN we examined. */ 1512 /* Start a new range of ACK-ed TSNs. */ 1513 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); 1514 dbg_prt_state = 0; 1515 dbg_ack_tsn = tsn; 1516 } 1517 1518 dbg_last_ack_tsn = tsn; 1519 #endif /* SCTP_DEBUG */ 1520 1521 } else { 1522 if (tchunk->tsn_gap_acked) { 1523 SCTP_DEBUG_PRINTK("%s: Receiver reneged on " 1524 "data TSN: 0x%x\n", 1525 __func__, 1526 tsn); 1527 tchunk->tsn_gap_acked = 0; 1528 1529 if (tchunk->transport) 1530 bytes_acked -= sctp_data_size(tchunk); 1531 1532 /* RFC 2960 6.3.2 Retransmission Timer Rules 1533 * 1534 * R4) Whenever a SACK is received missing a 1535 * TSN that was previously acknowledged via a 1536 * Gap Ack Block, start T3-rtx for the 1537 * destination address to which the DATA 1538 * chunk was originally 1539 * transmitted if it is not already running. 1540 */ 1541 restart_timer = 1; 1542 } 1543 1544 list_add_tail(lchunk, &tlist); 1545 1546 #if SCTP_DEBUG 1547 /* See the above comments on ACK-ed TSNs. */ 1548 switch (dbg_prt_state) { 1549 case 1: 1550 if (dbg_last_kept_tsn + 1 == tsn) 1551 break; 1552 1553 if (dbg_last_kept_tsn != dbg_kept_tsn) 1554 SCTP_DEBUG_PRINTK_CONT("-%08x", 1555 dbg_last_kept_tsn); 1556 1557 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn); 1558 dbg_kept_tsn = tsn; 1559 break; 1560 1561 case 0: 1562 if (dbg_last_ack_tsn != dbg_ack_tsn) 1563 SCTP_DEBUG_PRINTK_CONT("-%08x", 1564 dbg_last_ack_tsn); 1565 SCTP_DEBUG_PRINTK_CONT("\n"); 1566 1567 /* FALL THROUGH... */ 1568 default: 1569 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); 1570 dbg_prt_state = 1; 1571 dbg_kept_tsn = tsn; 1572 } 1573 1574 dbg_last_kept_tsn = tsn; 1575 #endif /* SCTP_DEBUG */ 1576 } 1577 } 1578 1579 #if SCTP_DEBUG 1580 /* Finish off the last range, displaying its ending TSN. */ 1581 switch (dbg_prt_state) { 1582 case 0: 1583 if (dbg_last_ack_tsn != dbg_ack_tsn) { 1584 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn); 1585 } else { 1586 SCTP_DEBUG_PRINTK_CONT("\n"); 1587 } 1588 break; 1589 1590 case 1: 1591 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1592 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn); 1593 } else { 1594 SCTP_DEBUG_PRINTK_CONT("\n"); 1595 } 1596 } 1597 #endif /* SCTP_DEBUG */ 1598 if (transport) { 1599 if (bytes_acked) { 1600 struct sctp_association *asoc = transport->asoc; 1601 1602 /* We may have counted DATA that was migrated 1603 * to this transport due to DEL-IP operation. 1604 * Subtract those bytes, since the were never 1605 * send on this transport and shouldn't be 1606 * credited to this transport. 1607 */ 1608 bytes_acked -= migrate_bytes; 1609 1610 /* 8.2. When an outstanding TSN is acknowledged, 1611 * the endpoint shall clear the error counter of 1612 * the destination transport address to which the 1613 * DATA chunk was last sent. 1614 * The association's overall error counter is 1615 * also cleared. 1616 */ 1617 transport->error_count = 0; 1618 transport->asoc->overall_error_count = 0; 1619 1620 /* 1621 * While in SHUTDOWN PENDING, we may have started 1622 * the T5 shutdown guard timer after reaching the 1623 * retransmission limit. Stop that timer as soon 1624 * as the receiver acknowledged any data. 1625 */ 1626 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && 1627 del_timer(&asoc->timers 1628 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) 1629 sctp_association_put(asoc); 1630 1631 /* Mark the destination transport address as 1632 * active if it is not so marked. 1633 */ 1634 if ((transport->state == SCTP_INACTIVE) || 1635 (transport->state == SCTP_UNCONFIRMED)) { 1636 sctp_assoc_control_transport( 1637 transport->asoc, 1638 transport, 1639 SCTP_TRANSPORT_UP, 1640 SCTP_RECEIVED_SACK); 1641 } 1642 1643 sctp_transport_raise_cwnd(transport, sack_ctsn, 1644 bytes_acked); 1645 1646 transport->flight_size -= bytes_acked; 1647 if (transport->flight_size == 0) 1648 transport->partial_bytes_acked = 0; 1649 q->outstanding_bytes -= bytes_acked + migrate_bytes; 1650 } else { 1651 /* RFC 2960 6.1, sctpimpguide-06 2.15.2 1652 * When a sender is doing zero window probing, it 1653 * should not timeout the association if it continues 1654 * to receive new packets from the receiver. The 1655 * reason is that the receiver MAY keep its window 1656 * closed for an indefinite time. 1657 * A sender is doing zero window probing when the 1658 * receiver's advertised window is zero, and there is 1659 * only one data chunk in flight to the receiver. 1660 * 1661 * Allow the association to timeout while in SHUTDOWN 1662 * PENDING or SHUTDOWN RECEIVED in case the receiver 1663 * stays in zero window mode forever. 1664 */ 1665 if (!q->asoc->peer.rwnd && 1666 !list_empty(&tlist) && 1667 (sack_ctsn+2 == q->asoc->next_tsn) && 1668 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { 1669 SCTP_DEBUG_PRINTK("%s: SACK received for zero " 1670 "window probe: %u\n", 1671 __func__, sack_ctsn); 1672 q->asoc->overall_error_count = 0; 1673 transport->error_count = 0; 1674 } 1675 } 1676 1677 /* RFC 2960 6.3.2 Retransmission Timer Rules 1678 * 1679 * R2) Whenever all outstanding data sent to an address have 1680 * been acknowledged, turn off the T3-rtx timer of that 1681 * address. 1682 */ 1683 if (!transport->flight_size) { 1684 if (timer_pending(&transport->T3_rtx_timer) && 1685 del_timer(&transport->T3_rtx_timer)) { 1686 sctp_transport_put(transport); 1687 } 1688 } else if (restart_timer) { 1689 if (!mod_timer(&transport->T3_rtx_timer, 1690 jiffies + transport->rto)) 1691 sctp_transport_hold(transport); 1692 } 1693 } 1694 1695 list_splice(&tlist, transmitted_queue); 1696 } 1697 1698 /* Mark chunks as missing and consequently may get retransmitted. */ 1699 static void sctp_mark_missing(struct sctp_outq *q, 1700 struct list_head *transmitted_queue, 1701 struct sctp_transport *transport, 1702 __u32 highest_new_tsn_in_sack, 1703 int count_of_newacks) 1704 { 1705 struct sctp_chunk *chunk; 1706 __u32 tsn; 1707 char do_fast_retransmit = 0; 1708 struct sctp_association *asoc = q->asoc; 1709 struct sctp_transport *primary = asoc->peer.primary_path; 1710 1711 list_for_each_entry(chunk, transmitted_queue, transmitted_list) { 1712 1713 tsn = ntohl(chunk->subh.data_hdr->tsn); 1714 1715 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all 1716 * 'Unacknowledged TSN's', if the TSN number of an 1717 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' 1718 * value, increment the 'TSN.Missing.Report' count on that 1719 * chunk if it has NOT been fast retransmitted or marked for 1720 * fast retransmit already. 1721 */ 1722 if (chunk->fast_retransmit == SCTP_CAN_FRTX && 1723 !chunk->tsn_gap_acked && 1724 TSN_lt(tsn, highest_new_tsn_in_sack)) { 1725 1726 /* SFR-CACC may require us to skip marking 1727 * this chunk as missing. 1728 */ 1729 if (!transport || !sctp_cacc_skip(primary, 1730 chunk->transport, 1731 count_of_newacks, tsn)) { 1732 chunk->tsn_missing_report++; 1733 1734 SCTP_DEBUG_PRINTK( 1735 "%s: TSN 0x%x missing counter: %d\n", 1736 __func__, tsn, 1737 chunk->tsn_missing_report); 1738 } 1739 } 1740 /* 1741 * M4) If any DATA chunk is found to have a 1742 * 'TSN.Missing.Report' 1743 * value larger than or equal to 3, mark that chunk for 1744 * retransmission and start the fast retransmit procedure. 1745 */ 1746 1747 if (chunk->tsn_missing_report >= 3) { 1748 chunk->fast_retransmit = SCTP_NEED_FRTX; 1749 do_fast_retransmit = 1; 1750 } 1751 } 1752 1753 if (transport) { 1754 if (do_fast_retransmit) 1755 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); 1756 1757 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " 1758 "ssthresh: %d, flight_size: %d, pba: %d\n", 1759 __func__, transport, transport->cwnd, 1760 transport->ssthresh, transport->flight_size, 1761 transport->partial_bytes_acked); 1762 } 1763 } 1764 1765 /* Is the given TSN acked by this packet? */ 1766 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) 1767 { 1768 int i; 1769 sctp_sack_variable_t *frags; 1770 __u16 gap; 1771 __u32 ctsn = ntohl(sack->cum_tsn_ack); 1772 1773 if (TSN_lte(tsn, ctsn)) 1774 goto pass; 1775 1776 /* 3.3.4 Selective Acknowledgement (SACK) (3): 1777 * 1778 * Gap Ack Blocks: 1779 * These fields contain the Gap Ack Blocks. They are repeated 1780 * for each Gap Ack Block up to the number of Gap Ack Blocks 1781 * defined in the Number of Gap Ack Blocks field. All DATA 1782 * chunks with TSNs greater than or equal to (Cumulative TSN 1783 * Ack + Gap Ack Block Start) and less than or equal to 1784 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack 1785 * Block are assumed to have been received correctly. 1786 */ 1787 1788 frags = sack->variable; 1789 gap = tsn - ctsn; 1790 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) { 1791 if (TSN_lte(ntohs(frags[i].gab.start), gap) && 1792 TSN_lte(gap, ntohs(frags[i].gab.end))) 1793 goto pass; 1794 } 1795 1796 return 0; 1797 pass: 1798 return 1; 1799 } 1800 1801 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, 1802 int nskips, __be16 stream) 1803 { 1804 int i; 1805 1806 for (i = 0; i < nskips; i++) { 1807 if (skiplist[i].stream == stream) 1808 return i; 1809 } 1810 return i; 1811 } 1812 1813 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */ 1814 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) 1815 { 1816 struct sctp_association *asoc = q->asoc; 1817 struct sctp_chunk *ftsn_chunk = NULL; 1818 struct sctp_fwdtsn_skip ftsn_skip_arr[10]; 1819 int nskips = 0; 1820 int skip_pos = 0; 1821 __u32 tsn; 1822 struct sctp_chunk *chunk; 1823 struct list_head *lchunk, *temp; 1824 1825 if (!asoc->peer.prsctp_capable) 1826 return; 1827 1828 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the 1829 * received SACK. 1830 * 1831 * If (Advanced.Peer.Ack.Point < SackCumAck), then update 1832 * Advanced.Peer.Ack.Point to be equal to SackCumAck. 1833 */ 1834 if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) 1835 asoc->adv_peer_ack_point = ctsn; 1836 1837 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" 1838 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as 1839 * the chunk next in the out-queue space is marked as "abandoned" as 1840 * shown in the following example: 1841 * 1842 * Assuming that a SACK arrived with the Cumulative TSN ACK 102 1843 * and the Advanced.Peer.Ack.Point is updated to this value: 1844 * 1845 * out-queue at the end of ==> out-queue after Adv.Ack.Point 1846 * normal SACK processing local advancement 1847 * ... ... 1848 * Adv.Ack.Pt-> 102 acked 102 acked 1849 * 103 abandoned 103 abandoned 1850 * 104 abandoned Adv.Ack.P-> 104 abandoned 1851 * 105 105 1852 * 106 acked 106 acked 1853 * ... ... 1854 * 1855 * In this example, the data sender successfully advanced the 1856 * "Advanced.Peer.Ack.Point" from 102 to 104 locally. 1857 */ 1858 list_for_each_safe(lchunk, temp, &q->abandoned) { 1859 chunk = list_entry(lchunk, struct sctp_chunk, 1860 transmitted_list); 1861 tsn = ntohl(chunk->subh.data_hdr->tsn); 1862 1863 /* Remove any chunks in the abandoned queue that are acked by 1864 * the ctsn. 1865 */ 1866 if (TSN_lte(tsn, ctsn)) { 1867 list_del_init(lchunk); 1868 sctp_chunk_free(chunk); 1869 } else { 1870 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { 1871 asoc->adv_peer_ack_point = tsn; 1872 if (chunk->chunk_hdr->flags & 1873 SCTP_DATA_UNORDERED) 1874 continue; 1875 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], 1876 nskips, 1877 chunk->subh.data_hdr->stream); 1878 ftsn_skip_arr[skip_pos].stream = 1879 chunk->subh.data_hdr->stream; 1880 ftsn_skip_arr[skip_pos].ssn = 1881 chunk->subh.data_hdr->ssn; 1882 if (skip_pos == nskips) 1883 nskips++; 1884 if (nskips == 10) 1885 break; 1886 } else 1887 break; 1888 } 1889 } 1890 1891 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" 1892 * is greater than the Cumulative TSN ACK carried in the received 1893 * SACK, the data sender MUST send the data receiver a FORWARD TSN 1894 * chunk containing the latest value of the 1895 * "Advanced.Peer.Ack.Point". 1896 * 1897 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD 1898 * list each stream and sequence number in the forwarded TSN. This 1899 * information will enable the receiver to easily find any 1900 * stranded TSN's waiting on stream reorder queues. Each stream 1901 * SHOULD only be reported once; this means that if multiple 1902 * abandoned messages occur in the same stream then only the 1903 * highest abandoned stream sequence number is reported. If the 1904 * total size of the FORWARD TSN does NOT fit in a single MTU then 1905 * the sender of the FORWARD TSN SHOULD lower the 1906 * Advanced.Peer.Ack.Point to the last TSN that will fit in a 1907 * single MTU. 1908 */ 1909 if (asoc->adv_peer_ack_point > ctsn) 1910 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, 1911 nskips, &ftsn_skip_arr[0]); 1912 1913 if (ftsn_chunk) { 1914 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1915 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 1916 } 1917 } 1918