1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * 7 * This file is part of the SCTP kernel implementation 8 * 9 * These functions implement the sctp_outq class. The outqueue handles 10 * bundling and queueing of outgoing SCTP chunks. 11 * 12 * This SCTP implementation is free software; 13 * you can redistribute it and/or modify it under the terms of 14 * the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * This SCTP implementation is distributed in the hope that it 19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 20 * ************************ 21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 22 * See the GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with GNU CC; see the file COPYING. If not, write to 26 * the Free Software Foundation, 59 Temple Place - Suite 330, 27 * Boston, MA 02111-1307, USA. 28 * 29 * Please send any bug reports or fixes you make to the 30 * email address(es): 31 * lksctp developers <lksctp-developers@lists.sourceforge.net> 32 * 33 * Or submit a bug report through the following website: 34 * http://www.sf.net/projects/lksctp 35 * 36 * Written or modified by: 37 * La Monte H.P. Yarroll <piggy@acm.org> 38 * Karl Knutson <karl@athena.chicago.il.us> 39 * Perry Melange <pmelange@null.cc.uic.edu> 40 * Xingang Guo <xingang.guo@intel.com> 41 * Hui Huang <hui.huang@nokia.com> 42 * Sridhar Samudrala <sri@us.ibm.com> 43 * Jon Grimm <jgrimm@us.ibm.com> 44 * 45 * Any bugs reported given to us we will try to fix... any fixes shared will 46 * be incorporated into the next SCTP release. 47 */ 48 49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 50 51 #include <linux/types.h> 52 #include <linux/list.h> /* For struct list_head */ 53 #include <linux/socket.h> 54 #include <linux/ip.h> 55 #include <linux/slab.h> 56 #include <net/sock.h> /* For skb_set_owner_w */ 57 58 #include <net/sctp/sctp.h> 59 #include <net/sctp/sm.h> 60 61 /* Declare internal functions here. */ 62 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); 63 static void sctp_check_transmitted(struct sctp_outq *q, 64 struct list_head *transmitted_queue, 65 struct sctp_transport *transport, 66 struct sctp_sackhdr *sack, 67 __u32 *highest_new_tsn); 68 69 static void sctp_mark_missing(struct sctp_outq *q, 70 struct list_head *transmitted_queue, 71 struct sctp_transport *transport, 72 __u32 highest_new_tsn, 73 int count_of_newacks); 74 75 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); 76 77 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout); 78 79 /* Add data to the front of the queue. */ 80 static inline void sctp_outq_head_data(struct sctp_outq *q, 81 struct sctp_chunk *ch) 82 { 83 list_add(&ch->list, &q->out_chunk_list); 84 q->out_qlen += ch->skb->len; 85 } 86 87 /* Take data from the front of the queue. */ 88 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) 89 { 90 struct sctp_chunk *ch = NULL; 91 92 if (!list_empty(&q->out_chunk_list)) { 93 struct list_head *entry = q->out_chunk_list.next; 94 95 ch = list_entry(entry, struct sctp_chunk, list); 96 list_del_init(entry); 97 q->out_qlen -= ch->skb->len; 98 } 99 return ch; 100 } 101 /* Add data chunk to the end of the queue. */ 102 static inline void sctp_outq_tail_data(struct sctp_outq *q, 103 struct sctp_chunk *ch) 104 { 105 list_add_tail(&ch->list, &q->out_chunk_list); 106 q->out_qlen += ch->skb->len; 107 } 108 109 /* 110 * SFR-CACC algorithm: 111 * D) If count_of_newacks is greater than or equal to 2 112 * and t was not sent to the current primary then the 113 * sender MUST NOT increment missing report count for t. 114 */ 115 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, 116 struct sctp_transport *transport, 117 int count_of_newacks) 118 { 119 if (count_of_newacks >=2 && transport != primary) 120 return 1; 121 return 0; 122 } 123 124 /* 125 * SFR-CACC algorithm: 126 * F) If count_of_newacks is less than 2, let d be the 127 * destination to which t was sent. If cacc_saw_newack 128 * is 0 for destination d, then the sender MUST NOT 129 * increment missing report count for t. 130 */ 131 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 132 int count_of_newacks) 133 { 134 if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack) 135 return 1; 136 return 0; 137 } 138 139 /* 140 * SFR-CACC algorithm: 141 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD 142 * execute steps C, D, F. 143 * 144 * C has been implemented in sctp_outq_sack 145 */ 146 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, 147 struct sctp_transport *transport, 148 int count_of_newacks) 149 { 150 if (!primary->cacc.cycling_changeover) { 151 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) 152 return 1; 153 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) 154 return 1; 155 return 0; 156 } 157 return 0; 158 } 159 160 /* 161 * SFR-CACC algorithm: 162 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less 163 * than next_tsn_at_change of the current primary, then 164 * the sender MUST NOT increment missing report count 165 * for t. 166 */ 167 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) 168 { 169 if (primary->cacc.cycling_changeover && 170 TSN_lt(tsn, primary->cacc.next_tsn_at_change)) 171 return 1; 172 return 0; 173 } 174 175 /* 176 * SFR-CACC algorithm: 177 * 3) If the missing report count for TSN t is to be 178 * incremented according to [RFC2960] and 179 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, 180 * then the sender MUST futher execute steps 3.1 and 181 * 3.2 to determine if the missing report count for 182 * TSN t SHOULD NOT be incremented. 183 * 184 * 3.3) If 3.1 and 3.2 do not dictate that the missing 185 * report count for t should not be incremented, then 186 * the sender SOULD increment missing report count for 187 * t (according to [RFC2960] and [SCTP_STEWART_2002]). 188 */ 189 static inline int sctp_cacc_skip(struct sctp_transport *primary, 190 struct sctp_transport *transport, 191 int count_of_newacks, 192 __u32 tsn) 193 { 194 if (primary->cacc.changeover_active && 195 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) || 196 sctp_cacc_skip_3_2(primary, tsn))) 197 return 1; 198 return 0; 199 } 200 201 /* Initialize an existing sctp_outq. This does the boring stuff. 202 * You still need to define handlers if you really want to DO 203 * something with this structure... 204 */ 205 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 206 { 207 q->asoc = asoc; 208 INIT_LIST_HEAD(&q->out_chunk_list); 209 INIT_LIST_HEAD(&q->control_chunk_list); 210 INIT_LIST_HEAD(&q->retransmit); 211 INIT_LIST_HEAD(&q->sacked); 212 INIT_LIST_HEAD(&q->abandoned); 213 214 q->fast_rtx = 0; 215 q->outstanding_bytes = 0; 216 q->empty = 1; 217 q->cork = 0; 218 219 q->malloced = 0; 220 q->out_qlen = 0; 221 } 222 223 /* Free the outqueue structure and any related pending chunks. 224 */ 225 void sctp_outq_teardown(struct sctp_outq *q) 226 { 227 struct sctp_transport *transport; 228 struct list_head *lchunk, *temp; 229 struct sctp_chunk *chunk, *tmp; 230 231 /* Throw away unacknowledged chunks. */ 232 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, 233 transports) { 234 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { 235 chunk = list_entry(lchunk, struct sctp_chunk, 236 transmitted_list); 237 /* Mark as part of a failed message. */ 238 sctp_chunk_fail(chunk, q->error); 239 sctp_chunk_free(chunk); 240 } 241 } 242 243 /* Throw away chunks that have been gap ACKed. */ 244 list_for_each_safe(lchunk, temp, &q->sacked) { 245 list_del_init(lchunk); 246 chunk = list_entry(lchunk, struct sctp_chunk, 247 transmitted_list); 248 sctp_chunk_fail(chunk, q->error); 249 sctp_chunk_free(chunk); 250 } 251 252 /* Throw away any chunks in the retransmit queue. */ 253 list_for_each_safe(lchunk, temp, &q->retransmit) { 254 list_del_init(lchunk); 255 chunk = list_entry(lchunk, struct sctp_chunk, 256 transmitted_list); 257 sctp_chunk_fail(chunk, q->error); 258 sctp_chunk_free(chunk); 259 } 260 261 /* Throw away any chunks that are in the abandoned queue. */ 262 list_for_each_safe(lchunk, temp, &q->abandoned) { 263 list_del_init(lchunk); 264 chunk = list_entry(lchunk, struct sctp_chunk, 265 transmitted_list); 266 sctp_chunk_fail(chunk, q->error); 267 sctp_chunk_free(chunk); 268 } 269 270 /* Throw away any leftover data chunks. */ 271 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 272 273 /* Mark as send failure. */ 274 sctp_chunk_fail(chunk, q->error); 275 sctp_chunk_free(chunk); 276 } 277 278 q->error = 0; 279 280 /* Throw away any leftover control chunks. */ 281 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 282 list_del_init(&chunk->list); 283 sctp_chunk_free(chunk); 284 } 285 } 286 287 /* Free the outqueue structure and any related pending chunks. */ 288 void sctp_outq_free(struct sctp_outq *q) 289 { 290 /* Throw away leftover chunks. */ 291 sctp_outq_teardown(q); 292 293 /* If we were kmalloc()'d, free the memory. */ 294 if (q->malloced) 295 kfree(q); 296 } 297 298 /* Put a new chunk in an sctp_outq. */ 299 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) 300 { 301 int error = 0; 302 303 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n", 304 q, chunk, chunk && chunk->chunk_hdr ? 305 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) 306 : "Illegal Chunk"); 307 308 /* If it is data, queue it up, otherwise, send it 309 * immediately. 310 */ 311 if (sctp_chunk_is_data(chunk)) { 312 /* Is it OK to queue data chunks? */ 313 /* From 9. Termination of Association 314 * 315 * When either endpoint performs a shutdown, the 316 * association on each peer will stop accepting new 317 * data from its user and only deliver data in queue 318 * at the time of sending or receiving the SHUTDOWN 319 * chunk. 320 */ 321 switch (q->asoc->state) { 322 case SCTP_STATE_EMPTY: 323 case SCTP_STATE_CLOSED: 324 case SCTP_STATE_SHUTDOWN_PENDING: 325 case SCTP_STATE_SHUTDOWN_SENT: 326 case SCTP_STATE_SHUTDOWN_RECEIVED: 327 case SCTP_STATE_SHUTDOWN_ACK_SENT: 328 /* Cannot send after transport endpoint shutdown */ 329 error = -ESHUTDOWN; 330 break; 331 332 default: 333 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n", 334 q, chunk, chunk && chunk->chunk_hdr ? 335 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) 336 : "Illegal Chunk"); 337 338 sctp_outq_tail_data(q, chunk); 339 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 340 SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS); 341 else 342 SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); 343 q->empty = 0; 344 break; 345 } 346 } else { 347 list_add_tail(&chunk->list, &q->control_chunk_list); 348 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 349 } 350 351 if (error < 0) 352 return error; 353 354 if (!q->cork) 355 error = sctp_outq_flush(q, 0); 356 357 return error; 358 } 359 360 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list 361 * and the abandoned list are in ascending order. 362 */ 363 static void sctp_insert_list(struct list_head *head, struct list_head *new) 364 { 365 struct list_head *pos; 366 struct sctp_chunk *nchunk, *lchunk; 367 __u32 ntsn, ltsn; 368 int done = 0; 369 370 nchunk = list_entry(new, struct sctp_chunk, transmitted_list); 371 ntsn = ntohl(nchunk->subh.data_hdr->tsn); 372 373 list_for_each(pos, head) { 374 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); 375 ltsn = ntohl(lchunk->subh.data_hdr->tsn); 376 if (TSN_lt(ntsn, ltsn)) { 377 list_add(new, pos->prev); 378 done = 1; 379 break; 380 } 381 } 382 if (!done) 383 list_add_tail(new, head); 384 } 385 386 /* Mark all the eligible packets on a transport for retransmission. */ 387 void sctp_retransmit_mark(struct sctp_outq *q, 388 struct sctp_transport *transport, 389 __u8 reason) 390 { 391 struct list_head *lchunk, *ltemp; 392 struct sctp_chunk *chunk; 393 394 /* Walk through the specified transmitted queue. */ 395 list_for_each_safe(lchunk, ltemp, &transport->transmitted) { 396 chunk = list_entry(lchunk, struct sctp_chunk, 397 transmitted_list); 398 399 /* If the chunk is abandoned, move it to abandoned list. */ 400 if (sctp_chunk_abandoned(chunk)) { 401 list_del_init(lchunk); 402 sctp_insert_list(&q->abandoned, lchunk); 403 404 /* If this chunk has not been previousely acked, 405 * stop considering it 'outstanding'. Our peer 406 * will most likely never see it since it will 407 * not be retransmitted 408 */ 409 if (!chunk->tsn_gap_acked) { 410 if (chunk->transport) 411 chunk->transport->flight_size -= 412 sctp_data_size(chunk); 413 q->outstanding_bytes -= sctp_data_size(chunk); 414 q->asoc->peer.rwnd += (sctp_data_size(chunk) + 415 sizeof(struct sk_buff)); 416 } 417 continue; 418 } 419 420 /* If we are doing retransmission due to a timeout or pmtu 421 * discovery, only the chunks that are not yet acked should 422 * be added to the retransmit queue. 423 */ 424 if ((reason == SCTP_RTXR_FAST_RTX && 425 (chunk->fast_retransmit == SCTP_NEED_FRTX)) || 426 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { 427 /* RFC 2960 6.2.1 Processing a Received SACK 428 * 429 * C) Any time a DATA chunk is marked for 430 * retransmission (via either T3-rtx timer expiration 431 * (Section 6.3.3) or via fast retransmit 432 * (Section 7.2.4)), add the data size of those 433 * chunks to the rwnd. 434 */ 435 q->asoc->peer.rwnd += (sctp_data_size(chunk) + 436 sizeof(struct sk_buff)); 437 q->outstanding_bytes -= sctp_data_size(chunk); 438 if (chunk->transport) 439 transport->flight_size -= sctp_data_size(chunk); 440 441 /* sctpimpguide-05 Section 2.8.2 442 * M5) If a T3-rtx timer expires, the 443 * 'TSN.Missing.Report' of all affected TSNs is set 444 * to 0. 445 */ 446 chunk->tsn_missing_report = 0; 447 448 /* If a chunk that is being used for RTT measurement 449 * has to be retransmitted, we cannot use this chunk 450 * anymore for RTT measurements. Reset rto_pending so 451 * that a new RTT measurement is started when a new 452 * data chunk is sent. 453 */ 454 if (chunk->rtt_in_progress) { 455 chunk->rtt_in_progress = 0; 456 transport->rto_pending = 0; 457 } 458 459 /* Move the chunk to the retransmit queue. The chunks 460 * on the retransmit queue are always kept in order. 461 */ 462 list_del_init(lchunk); 463 sctp_insert_list(&q->retransmit, lchunk); 464 } 465 } 466 467 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, " 468 "cwnd: %d, ssthresh: %d, flight_size: %d, " 469 "pba: %d\n", __func__, 470 transport, reason, 471 transport->cwnd, transport->ssthresh, 472 transport->flight_size, 473 transport->partial_bytes_acked); 474 475 } 476 477 /* Mark all the eligible packets on a transport for retransmission and force 478 * one packet out. 479 */ 480 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, 481 sctp_retransmit_reason_t reason) 482 { 483 int error = 0; 484 485 switch(reason) { 486 case SCTP_RTXR_T3_RTX: 487 SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS); 488 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); 489 /* Update the retran path if the T3-rtx timer has expired for 490 * the current retran path. 491 */ 492 if (transport == transport->asoc->peer.retran_path) 493 sctp_assoc_update_retran_path(transport->asoc); 494 transport->asoc->rtx_data_chunks += 495 transport->asoc->unack_data; 496 break; 497 case SCTP_RTXR_FAST_RTX: 498 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 499 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 500 q->fast_rtx = 1; 501 break; 502 case SCTP_RTXR_PMTUD: 503 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); 504 break; 505 case SCTP_RTXR_T1_RTX: 506 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); 507 transport->asoc->init_retries++; 508 break; 509 default: 510 BUG(); 511 } 512 513 sctp_retransmit_mark(q, transport, reason); 514 515 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, 516 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 517 * following the procedures outlined in C1 - C5. 518 */ 519 if (reason == SCTP_RTXR_T3_RTX) 520 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 521 522 /* Flush the queues only on timeout, since fast_rtx is only 523 * triggered during sack processing and the queue 524 * will be flushed at the end. 525 */ 526 if (reason != SCTP_RTXR_FAST_RTX) 527 error = sctp_outq_flush(q, /* rtx_timeout */ 1); 528 529 if (error) 530 q->asoc->base.sk->sk_err = -error; 531 } 532 533 /* 534 * Transmit DATA chunks on the retransmit queue. Upon return from 535 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which 536 * need to be transmitted by the caller. 537 * We assume that pkt->transport has already been set. 538 * 539 * The return value is a normal kernel error return value. 540 */ 541 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, 542 int rtx_timeout, int *start_timer) 543 { 544 struct list_head *lqueue; 545 struct sctp_transport *transport = pkt->transport; 546 sctp_xmit_t status; 547 struct sctp_chunk *chunk, *chunk1; 548 struct sctp_association *asoc; 549 int fast_rtx; 550 int error = 0; 551 int timer = 0; 552 int done = 0; 553 554 asoc = q->asoc; 555 lqueue = &q->retransmit; 556 fast_rtx = q->fast_rtx; 557 558 /* This loop handles time-out retransmissions, fast retransmissions, 559 * and retransmissions due to opening of whindow. 560 * 561 * RFC 2960 6.3.3 Handle T3-rtx Expiration 562 * 563 * E3) Determine how many of the earliest (i.e., lowest TSN) 564 * outstanding DATA chunks for the address for which the 565 * T3-rtx has expired will fit into a single packet, subject 566 * to the MTU constraint for the path corresponding to the 567 * destination transport address to which the retransmission 568 * is being sent (this may be different from the address for 569 * which the timer expires [see Section 6.4]). Call this value 570 * K. Bundle and retransmit those K DATA chunks in a single 571 * packet to the destination endpoint. 572 * 573 * [Just to be painfully clear, if we are retransmitting 574 * because a timeout just happened, we should send only ONE 575 * packet of retransmitted data.] 576 * 577 * For fast retransmissions we also send only ONE packet. However, 578 * if we are just flushing the queue due to open window, we'll 579 * try to send as much as possible. 580 */ 581 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 582 583 /* Make sure that Gap Acked TSNs are not retransmitted. A 584 * simple approach is just to move such TSNs out of the 585 * way and into a 'transmitted' queue and skip to the 586 * next chunk. 587 */ 588 if (chunk->tsn_gap_acked) { 589 list_del(&chunk->transmitted_list); 590 list_add_tail(&chunk->transmitted_list, 591 &transport->transmitted); 592 continue; 593 } 594 595 /* If we are doing fast retransmit, ignore non-fast_rtransmit 596 * chunks 597 */ 598 if (fast_rtx && !chunk->fast_retransmit) 599 continue; 600 601 redo: 602 /* Attempt to append this chunk to the packet. */ 603 status = sctp_packet_append_chunk(pkt, chunk); 604 605 switch (status) { 606 case SCTP_XMIT_PMTU_FULL: 607 if (!pkt->has_data && !pkt->has_cookie_echo) { 608 /* If this packet did not contain DATA then 609 * retransmission did not happen, so do it 610 * again. We'll ignore the error here since 611 * control chunks are already freed so there 612 * is nothing we can do. 613 */ 614 sctp_packet_transmit(pkt); 615 goto redo; 616 } 617 618 /* Send this packet. */ 619 error = sctp_packet_transmit(pkt); 620 621 /* If we are retransmitting, we should only 622 * send a single packet. 623 */ 624 if (rtx_timeout || fast_rtx) 625 done = 1; 626 627 /* Bundle next chunk in the next round. */ 628 break; 629 630 case SCTP_XMIT_RWND_FULL: 631 /* Send this packet. */ 632 error = sctp_packet_transmit(pkt); 633 634 /* Stop sending DATA as there is no more room 635 * at the receiver. 636 */ 637 done = 1; 638 break; 639 640 case SCTP_XMIT_NAGLE_DELAY: 641 /* Send this packet. */ 642 error = sctp_packet_transmit(pkt); 643 644 /* Stop sending DATA because of nagle delay. */ 645 done = 1; 646 break; 647 648 default: 649 /* The append was successful, so add this chunk to 650 * the transmitted list. 651 */ 652 list_del(&chunk->transmitted_list); 653 list_add_tail(&chunk->transmitted_list, 654 &transport->transmitted); 655 656 /* Mark the chunk as ineligible for fast retransmit 657 * after it is retransmitted. 658 */ 659 if (chunk->fast_retransmit == SCTP_NEED_FRTX) 660 chunk->fast_retransmit = SCTP_DONT_FRTX; 661 662 q->empty = 0; 663 break; 664 } 665 666 /* Set the timer if there were no errors */ 667 if (!error && !timer) 668 timer = 1; 669 670 if (done) 671 break; 672 } 673 674 /* If we are here due to a retransmit timeout or a fast 675 * retransmit and if there are any chunks left in the retransmit 676 * queue that could not fit in the PMTU sized packet, they need 677 * to be marked as ineligible for a subsequent fast retransmit. 678 */ 679 if (rtx_timeout || fast_rtx) { 680 list_for_each_entry(chunk1, lqueue, transmitted_list) { 681 if (chunk1->fast_retransmit == SCTP_NEED_FRTX) 682 chunk1->fast_retransmit = SCTP_DONT_FRTX; 683 } 684 } 685 686 *start_timer = timer; 687 688 /* Clear fast retransmit hint */ 689 if (fast_rtx) 690 q->fast_rtx = 0; 691 692 return error; 693 } 694 695 /* Cork the outqueue so queued chunks are really queued. */ 696 int sctp_outq_uncork(struct sctp_outq *q) 697 { 698 int error = 0; 699 if (q->cork) 700 q->cork = 0; 701 error = sctp_outq_flush(q, 0); 702 return error; 703 } 704 705 706 /* 707 * Try to flush an outqueue. 708 * 709 * Description: Send everything in q which we legally can, subject to 710 * congestion limitations. 711 * * Note: This function can be called from multiple contexts so appropriate 712 * locking concerns must be made. Today we use the sock lock to protect 713 * this function. 714 */ 715 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) 716 { 717 struct sctp_packet *packet; 718 struct sctp_packet singleton; 719 struct sctp_association *asoc = q->asoc; 720 __u16 sport = asoc->base.bind_addr.port; 721 __u16 dport = asoc->peer.port; 722 __u32 vtag = asoc->peer.i.init_tag; 723 struct sctp_transport *transport = NULL; 724 struct sctp_transport *new_transport; 725 struct sctp_chunk *chunk, *tmp; 726 sctp_xmit_t status; 727 int error = 0; 728 int start_timer = 0; 729 int one_packet = 0; 730 731 /* These transports have chunks to send. */ 732 struct list_head transport_list; 733 struct list_head *ltransport; 734 735 INIT_LIST_HEAD(&transport_list); 736 packet = NULL; 737 738 /* 739 * 6.10 Bundling 740 * ... 741 * When bundling control chunks with DATA chunks, an 742 * endpoint MUST place control chunks first in the outbound 743 * SCTP packet. The transmitter MUST transmit DATA chunks 744 * within a SCTP packet in increasing order of TSN. 745 * ... 746 */ 747 748 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 749 list_del_init(&chunk->list); 750 751 /* Pick the right transport to use. */ 752 new_transport = chunk->transport; 753 754 if (!new_transport) { 755 /* 756 * If we have a prior transport pointer, see if 757 * the destination address of the chunk 758 * matches the destination address of the 759 * current transport. If not a match, then 760 * try to look up the transport with a given 761 * destination address. We do this because 762 * after processing ASCONFs, we may have new 763 * transports created. 764 */ 765 if (transport && 766 sctp_cmp_addr_exact(&chunk->dest, 767 &transport->ipaddr)) 768 new_transport = transport; 769 else 770 new_transport = sctp_assoc_lookup_paddr(asoc, 771 &chunk->dest); 772 773 /* if we still don't have a new transport, then 774 * use the current active path. 775 */ 776 if (!new_transport) 777 new_transport = asoc->peer.active_path; 778 } else if ((new_transport->state == SCTP_INACTIVE) || 779 (new_transport->state == SCTP_UNCONFIRMED)) { 780 /* If the chunk is Heartbeat or Heartbeat Ack, 781 * send it to chunk->transport, even if it's 782 * inactive. 783 * 784 * 3.3.6 Heartbeat Acknowledgement: 785 * ... 786 * A HEARTBEAT ACK is always sent to the source IP 787 * address of the IP datagram containing the 788 * HEARTBEAT chunk to which this ack is responding. 789 * ... 790 * 791 * ASCONF_ACKs also must be sent to the source. 792 */ 793 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && 794 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && 795 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) 796 new_transport = asoc->peer.active_path; 797 } 798 799 /* Are we switching transports? 800 * Take care of transport locks. 801 */ 802 if (new_transport != transport) { 803 transport = new_transport; 804 if (list_empty(&transport->send_ready)) { 805 list_add_tail(&transport->send_ready, 806 &transport_list); 807 } 808 packet = &transport->packet; 809 sctp_packet_config(packet, vtag, 810 asoc->peer.ecn_capable); 811 } 812 813 switch (chunk->chunk_hdr->type) { 814 /* 815 * 6.10 Bundling 816 * ... 817 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN 818 * COMPLETE with any other chunks. [Send them immediately.] 819 */ 820 case SCTP_CID_INIT: 821 case SCTP_CID_INIT_ACK: 822 case SCTP_CID_SHUTDOWN_COMPLETE: 823 sctp_packet_init(&singleton, transport, sport, dport); 824 sctp_packet_config(&singleton, vtag, 0); 825 sctp_packet_append_chunk(&singleton, chunk); 826 error = sctp_packet_transmit(&singleton); 827 if (error < 0) 828 return error; 829 break; 830 831 case SCTP_CID_ABORT: 832 if (sctp_test_T_bit(chunk)) { 833 packet->vtag = asoc->c.my_vtag; 834 } 835 /* The following chunks are "response" chunks, i.e. 836 * they are generated in response to something we 837 * received. If we are sending these, then we can 838 * send only 1 packet containing these chunks. 839 */ 840 case SCTP_CID_HEARTBEAT_ACK: 841 case SCTP_CID_SHUTDOWN_ACK: 842 case SCTP_CID_COOKIE_ACK: 843 case SCTP_CID_COOKIE_ECHO: 844 case SCTP_CID_ERROR: 845 case SCTP_CID_ECN_CWR: 846 case SCTP_CID_ASCONF_ACK: 847 one_packet = 1; 848 /* Fall throught */ 849 850 case SCTP_CID_SACK: 851 case SCTP_CID_HEARTBEAT: 852 case SCTP_CID_SHUTDOWN: 853 case SCTP_CID_ECN_ECNE: 854 case SCTP_CID_ASCONF: 855 case SCTP_CID_FWD_TSN: 856 status = sctp_packet_transmit_chunk(packet, chunk, 857 one_packet); 858 if (status != SCTP_XMIT_OK) { 859 /* put the chunk back */ 860 list_add(&chunk->list, &q->control_chunk_list); 861 } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { 862 /* PR-SCTP C5) If a FORWARD TSN is sent, the 863 * sender MUST assure that at least one T3-rtx 864 * timer is running. 865 */ 866 sctp_transport_reset_timers(transport); 867 } 868 break; 869 870 default: 871 /* We built a chunk with an illegal type! */ 872 BUG(); 873 } 874 } 875 876 /* Is it OK to send data chunks? */ 877 switch (asoc->state) { 878 case SCTP_STATE_COOKIE_ECHOED: 879 /* Only allow bundling when this packet has a COOKIE-ECHO 880 * chunk. 881 */ 882 if (!packet || !packet->has_cookie_echo) 883 break; 884 885 /* fallthru */ 886 case SCTP_STATE_ESTABLISHED: 887 case SCTP_STATE_SHUTDOWN_PENDING: 888 case SCTP_STATE_SHUTDOWN_RECEIVED: 889 /* 890 * RFC 2960 6.1 Transmission of DATA Chunks 891 * 892 * C) When the time comes for the sender to transmit, 893 * before sending new DATA chunks, the sender MUST 894 * first transmit any outstanding DATA chunks which 895 * are marked for retransmission (limited by the 896 * current cwnd). 897 */ 898 if (!list_empty(&q->retransmit)) { 899 if (transport == asoc->peer.retran_path) 900 goto retran; 901 902 /* Switch transports & prepare the packet. */ 903 904 transport = asoc->peer.retran_path; 905 906 if (list_empty(&transport->send_ready)) { 907 list_add_tail(&transport->send_ready, 908 &transport_list); 909 } 910 911 packet = &transport->packet; 912 sctp_packet_config(packet, vtag, 913 asoc->peer.ecn_capable); 914 retran: 915 error = sctp_outq_flush_rtx(q, packet, 916 rtx_timeout, &start_timer); 917 918 if (start_timer) 919 sctp_transport_reset_timers(transport); 920 921 /* This can happen on COOKIE-ECHO resend. Only 922 * one chunk can get bundled with a COOKIE-ECHO. 923 */ 924 if (packet->has_cookie_echo) 925 goto sctp_flush_out; 926 927 /* Don't send new data if there is still data 928 * waiting to retransmit. 929 */ 930 if (!list_empty(&q->retransmit)) 931 goto sctp_flush_out; 932 } 933 934 /* Apply Max.Burst limitation to the current transport in 935 * case it will be used for new data. We are going to 936 * rest it before we return, but we want to apply the limit 937 * to the currently queued data. 938 */ 939 if (transport) 940 sctp_transport_burst_limited(transport); 941 942 /* Finally, transmit new packets. */ 943 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 944 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 945 * stream identifier. 946 */ 947 if (chunk->sinfo.sinfo_stream >= 948 asoc->c.sinit_num_ostreams) { 949 950 /* Mark as failed send. */ 951 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 952 sctp_chunk_free(chunk); 953 continue; 954 } 955 956 /* Has this chunk expired? */ 957 if (sctp_chunk_abandoned(chunk)) { 958 sctp_chunk_fail(chunk, 0); 959 sctp_chunk_free(chunk); 960 continue; 961 } 962 963 /* If there is a specified transport, use it. 964 * Otherwise, we want to use the active path. 965 */ 966 new_transport = chunk->transport; 967 if (!new_transport || 968 ((new_transport->state == SCTP_INACTIVE) || 969 (new_transport->state == SCTP_UNCONFIRMED))) 970 new_transport = asoc->peer.active_path; 971 972 /* Change packets if necessary. */ 973 if (new_transport != transport) { 974 transport = new_transport; 975 976 /* Schedule to have this transport's 977 * packet flushed. 978 */ 979 if (list_empty(&transport->send_ready)) { 980 list_add_tail(&transport->send_ready, 981 &transport_list); 982 } 983 984 packet = &transport->packet; 985 sctp_packet_config(packet, vtag, 986 asoc->peer.ecn_capable); 987 /* We've switched transports, so apply the 988 * Burst limit to the new transport. 989 */ 990 sctp_transport_burst_limited(transport); 991 } 992 993 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", 994 q, chunk, 995 chunk && chunk->chunk_hdr ? 996 sctp_cname(SCTP_ST_CHUNK( 997 chunk->chunk_hdr->type)) 998 : "Illegal Chunk"); 999 1000 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head " 1001 "%p skb->users %d.\n", 1002 ntohl(chunk->subh.data_hdr->tsn), 1003 chunk->skb ?chunk->skb->head : NULL, 1004 chunk->skb ? 1005 atomic_read(&chunk->skb->users) : -1); 1006 1007 /* Add the chunk to the packet. */ 1008 status = sctp_packet_transmit_chunk(packet, chunk, 0); 1009 1010 switch (status) { 1011 case SCTP_XMIT_PMTU_FULL: 1012 case SCTP_XMIT_RWND_FULL: 1013 case SCTP_XMIT_NAGLE_DELAY: 1014 /* We could not append this chunk, so put 1015 * the chunk back on the output queue. 1016 */ 1017 SCTP_DEBUG_PRINTK("sctp_outq_flush: could " 1018 "not transmit TSN: 0x%x, status: %d\n", 1019 ntohl(chunk->subh.data_hdr->tsn), 1020 status); 1021 sctp_outq_head_data(q, chunk); 1022 goto sctp_flush_out; 1023 break; 1024 1025 case SCTP_XMIT_OK: 1026 /* The sender is in the SHUTDOWN-PENDING state, 1027 * The sender MAY set the I-bit in the DATA 1028 * chunk header. 1029 */ 1030 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) 1031 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; 1032 1033 break; 1034 1035 default: 1036 BUG(); 1037 } 1038 1039 /* BUG: We assume that the sctp_packet_transmit() 1040 * call below will succeed all the time and add the 1041 * chunk to the transmitted list and restart the 1042 * timers. 1043 * It is possible that the call can fail under OOM 1044 * conditions. 1045 * 1046 * Is this really a problem? Won't this behave 1047 * like a lost TSN? 1048 */ 1049 list_add_tail(&chunk->transmitted_list, 1050 &transport->transmitted); 1051 1052 sctp_transport_reset_timers(transport); 1053 1054 q->empty = 0; 1055 1056 /* Only let one DATA chunk get bundled with a 1057 * COOKIE-ECHO chunk. 1058 */ 1059 if (packet->has_cookie_echo) 1060 goto sctp_flush_out; 1061 } 1062 break; 1063 1064 default: 1065 /* Do nothing. */ 1066 break; 1067 } 1068 1069 sctp_flush_out: 1070 1071 /* Before returning, examine all the transports touched in 1072 * this call. Right now, we bluntly force clear all the 1073 * transports. Things might change after we implement Nagle. 1074 * But such an examination is still required. 1075 * 1076 * --xguo 1077 */ 1078 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) { 1079 struct sctp_transport *t = list_entry(ltransport, 1080 struct sctp_transport, 1081 send_ready); 1082 packet = &t->packet; 1083 if (!sctp_packet_empty(packet)) 1084 error = sctp_packet_transmit(packet); 1085 1086 /* Clear the burst limited state, if any */ 1087 sctp_transport_burst_reset(t); 1088 } 1089 1090 return error; 1091 } 1092 1093 /* Update unack_data based on the incoming SACK chunk */ 1094 static void sctp_sack_update_unack_data(struct sctp_association *assoc, 1095 struct sctp_sackhdr *sack) 1096 { 1097 sctp_sack_variable_t *frags; 1098 __u16 unack_data; 1099 int i; 1100 1101 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; 1102 1103 frags = sack->variable; 1104 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { 1105 unack_data -= ((ntohs(frags[i].gab.end) - 1106 ntohs(frags[i].gab.start) + 1)); 1107 } 1108 1109 assoc->unack_data = unack_data; 1110 } 1111 1112 /* This is where we REALLY process a SACK. 1113 * 1114 * Process the SACK against the outqueue. Mostly, this just frees 1115 * things off the transmitted queue. 1116 */ 1117 int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) 1118 { 1119 struct sctp_association *asoc = q->asoc; 1120 struct sctp_transport *transport; 1121 struct sctp_chunk *tchunk = NULL; 1122 struct list_head *lchunk, *transport_list, *temp; 1123 sctp_sack_variable_t *frags = sack->variable; 1124 __u32 sack_ctsn, ctsn, tsn; 1125 __u32 highest_tsn, highest_new_tsn; 1126 __u32 sack_a_rwnd; 1127 unsigned outstanding; 1128 struct sctp_transport *primary = asoc->peer.primary_path; 1129 int count_of_newacks = 0; 1130 int gap_ack_blocks; 1131 u8 accum_moved = 0; 1132 1133 /* Grab the association's destination address list. */ 1134 transport_list = &asoc->peer.transport_addr_list; 1135 1136 sack_ctsn = ntohl(sack->cum_tsn_ack); 1137 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); 1138 /* 1139 * SFR-CACC algorithm: 1140 * On receipt of a SACK the sender SHOULD execute the 1141 * following statements. 1142 * 1143 * 1) If the cumulative ack in the SACK passes next tsn_at_change 1144 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be 1145 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for 1146 * all destinations. 1147 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE 1148 * is set the receiver of the SACK MUST take the following actions: 1149 * 1150 * A) Initialize the cacc_saw_newack to 0 for all destination 1151 * addresses. 1152 * 1153 * Only bother if changeover_active is set. Otherwise, this is 1154 * totally suboptimal to do on every SACK. 1155 */ 1156 if (primary->cacc.changeover_active) { 1157 u8 clear_cycling = 0; 1158 1159 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { 1160 primary->cacc.changeover_active = 0; 1161 clear_cycling = 1; 1162 } 1163 1164 if (clear_cycling || gap_ack_blocks) { 1165 list_for_each_entry(transport, transport_list, 1166 transports) { 1167 if (clear_cycling) 1168 transport->cacc.cycling_changeover = 0; 1169 if (gap_ack_blocks) 1170 transport->cacc.cacc_saw_newack = 0; 1171 } 1172 } 1173 } 1174 1175 /* Get the highest TSN in the sack. */ 1176 highest_tsn = sack_ctsn; 1177 if (gap_ack_blocks) 1178 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); 1179 1180 if (TSN_lt(asoc->highest_sacked, highest_tsn)) 1181 asoc->highest_sacked = highest_tsn; 1182 1183 highest_new_tsn = sack_ctsn; 1184 1185 /* Run through the retransmit queue. Credit bytes received 1186 * and free those chunks that we can. 1187 */ 1188 sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn); 1189 1190 /* Run through the transmitted queue. 1191 * Credit bytes received and free those chunks which we can. 1192 * 1193 * This is a MASSIVE candidate for optimization. 1194 */ 1195 list_for_each_entry(transport, transport_list, transports) { 1196 sctp_check_transmitted(q, &transport->transmitted, 1197 transport, sack, &highest_new_tsn); 1198 /* 1199 * SFR-CACC algorithm: 1200 * C) Let count_of_newacks be the number of 1201 * destinations for which cacc_saw_newack is set. 1202 */ 1203 if (transport->cacc.cacc_saw_newack) 1204 count_of_newacks ++; 1205 } 1206 1207 /* Move the Cumulative TSN Ack Point if appropriate. */ 1208 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) { 1209 asoc->ctsn_ack_point = sack_ctsn; 1210 accum_moved = 1; 1211 } 1212 1213 if (gap_ack_blocks) { 1214 1215 if (asoc->fast_recovery && accum_moved) 1216 highest_new_tsn = highest_tsn; 1217 1218 list_for_each_entry(transport, transport_list, transports) 1219 sctp_mark_missing(q, &transport->transmitted, transport, 1220 highest_new_tsn, count_of_newacks); 1221 } 1222 1223 /* Update unack_data field in the assoc. */ 1224 sctp_sack_update_unack_data(asoc, sack); 1225 1226 ctsn = asoc->ctsn_ack_point; 1227 1228 /* Throw away stuff rotting on the sack queue. */ 1229 list_for_each_safe(lchunk, temp, &q->sacked) { 1230 tchunk = list_entry(lchunk, struct sctp_chunk, 1231 transmitted_list); 1232 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1233 if (TSN_lte(tsn, ctsn)) { 1234 list_del_init(&tchunk->transmitted_list); 1235 sctp_chunk_free(tchunk); 1236 } 1237 } 1238 1239 /* ii) Set rwnd equal to the newly received a_rwnd minus the 1240 * number of bytes still outstanding after processing the 1241 * Cumulative TSN Ack and the Gap Ack Blocks. 1242 */ 1243 1244 sack_a_rwnd = ntohl(sack->a_rwnd); 1245 outstanding = q->outstanding_bytes; 1246 1247 if (outstanding < sack_a_rwnd) 1248 sack_a_rwnd -= outstanding; 1249 else 1250 sack_a_rwnd = 0; 1251 1252 asoc->peer.rwnd = sack_a_rwnd; 1253 1254 sctp_generate_fwdtsn(q, sack_ctsn); 1255 1256 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", 1257 __func__, sack_ctsn); 1258 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " 1259 "%p is 0x%x. Adv peer ack point: 0x%x\n", 1260 __func__, asoc, ctsn, asoc->adv_peer_ack_point); 1261 1262 /* See if all chunks are acked. 1263 * Make sure the empty queue handler will get run later. 1264 */ 1265 q->empty = (list_empty(&q->out_chunk_list) && 1266 list_empty(&q->retransmit)); 1267 if (!q->empty) 1268 goto finish; 1269 1270 list_for_each_entry(transport, transport_list, transports) { 1271 q->empty = q->empty && list_empty(&transport->transmitted); 1272 if (!q->empty) 1273 goto finish; 1274 } 1275 1276 SCTP_DEBUG_PRINTK("sack queue is empty.\n"); 1277 finish: 1278 return q->empty; 1279 } 1280 1281 /* Is the outqueue empty? */ 1282 int sctp_outq_is_empty(const struct sctp_outq *q) 1283 { 1284 return q->empty; 1285 } 1286 1287 /******************************************************************** 1288 * 2nd Level Abstractions 1289 ********************************************************************/ 1290 1291 /* Go through a transport's transmitted list or the association's retransmit 1292 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. 1293 * The retransmit list will not have an associated transport. 1294 * 1295 * I added coherent debug information output. --xguo 1296 * 1297 * Instead of printing 'sacked' or 'kept' for each TSN on the 1298 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. 1299 * KEPT TSN6-TSN7, etc. 1300 */ 1301 static void sctp_check_transmitted(struct sctp_outq *q, 1302 struct list_head *transmitted_queue, 1303 struct sctp_transport *transport, 1304 struct sctp_sackhdr *sack, 1305 __u32 *highest_new_tsn_in_sack) 1306 { 1307 struct list_head *lchunk; 1308 struct sctp_chunk *tchunk; 1309 struct list_head tlist; 1310 __u32 tsn; 1311 __u32 sack_ctsn; 1312 __u32 rtt; 1313 __u8 restart_timer = 0; 1314 int bytes_acked = 0; 1315 int migrate_bytes = 0; 1316 1317 /* These state variables are for coherent debug output. --xguo */ 1318 1319 #if SCTP_DEBUG 1320 __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */ 1321 __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */ 1322 __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */ 1323 __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */ 1324 1325 /* 0 : The last TSN was ACKed. 1326 * 1 : The last TSN was NOT ACKed (i.e. KEPT). 1327 * -1: We need to initialize. 1328 */ 1329 int dbg_prt_state = -1; 1330 #endif /* SCTP_DEBUG */ 1331 1332 sack_ctsn = ntohl(sack->cum_tsn_ack); 1333 1334 INIT_LIST_HEAD(&tlist); 1335 1336 /* The while loop will skip empty transmitted queues. */ 1337 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { 1338 tchunk = list_entry(lchunk, struct sctp_chunk, 1339 transmitted_list); 1340 1341 if (sctp_chunk_abandoned(tchunk)) { 1342 /* Move the chunk to abandoned list. */ 1343 sctp_insert_list(&q->abandoned, lchunk); 1344 1345 /* If this chunk has not been acked, stop 1346 * considering it as 'outstanding'. 1347 */ 1348 if (!tchunk->tsn_gap_acked) { 1349 if (tchunk->transport) 1350 tchunk->transport->flight_size -= 1351 sctp_data_size(tchunk); 1352 q->outstanding_bytes -= sctp_data_size(tchunk); 1353 } 1354 continue; 1355 } 1356 1357 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1358 if (sctp_acked(sack, tsn)) { 1359 /* If this queue is the retransmit queue, the 1360 * retransmit timer has already reclaimed 1361 * the outstanding bytes for this chunk, so only 1362 * count bytes associated with a transport. 1363 */ 1364 if (transport) { 1365 /* If this chunk is being used for RTT 1366 * measurement, calculate the RTT and update 1367 * the RTO using this value. 1368 * 1369 * 6.3.1 C5) Karn's algorithm: RTT measurements 1370 * MUST NOT be made using packets that were 1371 * retransmitted (and thus for which it is 1372 * ambiguous whether the reply was for the 1373 * first instance of the packet or a later 1374 * instance). 1375 */ 1376 if (!tchunk->tsn_gap_acked && 1377 tchunk->rtt_in_progress) { 1378 tchunk->rtt_in_progress = 0; 1379 rtt = jiffies - tchunk->sent_at; 1380 sctp_transport_update_rto(transport, 1381 rtt); 1382 } 1383 } 1384 1385 /* If the chunk hasn't been marked as ACKED, 1386 * mark it and account bytes_acked if the 1387 * chunk had a valid transport (it will not 1388 * have a transport if ASCONF had deleted it 1389 * while DATA was outstanding). 1390 */ 1391 if (!tchunk->tsn_gap_acked) { 1392 tchunk->tsn_gap_acked = 1; 1393 *highest_new_tsn_in_sack = tsn; 1394 bytes_acked += sctp_data_size(tchunk); 1395 if (!tchunk->transport) 1396 migrate_bytes += sctp_data_size(tchunk); 1397 } 1398 1399 if (TSN_lte(tsn, sack_ctsn)) { 1400 /* RFC 2960 6.3.2 Retransmission Timer Rules 1401 * 1402 * R3) Whenever a SACK is received 1403 * that acknowledges the DATA chunk 1404 * with the earliest outstanding TSN 1405 * for that address, restart T3-rtx 1406 * timer for that address with its 1407 * current RTO. 1408 */ 1409 restart_timer = 1; 1410 1411 if (!tchunk->tsn_gap_acked) { 1412 /* 1413 * SFR-CACC algorithm: 1414 * 2) If the SACK contains gap acks 1415 * and the flag CHANGEOVER_ACTIVE is 1416 * set the receiver of the SACK MUST 1417 * take the following action: 1418 * 1419 * B) For each TSN t being acked that 1420 * has not been acked in any SACK so 1421 * far, set cacc_saw_newack to 1 for 1422 * the destination that the TSN was 1423 * sent to. 1424 */ 1425 if (transport && 1426 sack->num_gap_ack_blocks && 1427 q->asoc->peer.primary_path->cacc. 1428 changeover_active) 1429 transport->cacc.cacc_saw_newack 1430 = 1; 1431 } 1432 1433 list_add_tail(&tchunk->transmitted_list, 1434 &q->sacked); 1435 } else { 1436 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 1437 * M2) Each time a SACK arrives reporting 1438 * 'Stray DATA chunk(s)' record the highest TSN 1439 * reported as newly acknowledged, call this 1440 * value 'HighestTSNinSack'. A newly 1441 * acknowledged DATA chunk is one not 1442 * previously acknowledged in a SACK. 1443 * 1444 * When the SCTP sender of data receives a SACK 1445 * chunk that acknowledges, for the first time, 1446 * the receipt of a DATA chunk, all the still 1447 * unacknowledged DATA chunks whose TSN is 1448 * older than that newly acknowledged DATA 1449 * chunk, are qualified as 'Stray DATA chunks'. 1450 */ 1451 list_add_tail(lchunk, &tlist); 1452 } 1453 1454 #if SCTP_DEBUG 1455 switch (dbg_prt_state) { 1456 case 0: /* last TSN was ACKed */ 1457 if (dbg_last_ack_tsn + 1 == tsn) { 1458 /* This TSN belongs to the 1459 * current ACK range. 1460 */ 1461 break; 1462 } 1463 1464 if (dbg_last_ack_tsn != dbg_ack_tsn) { 1465 /* Display the end of the 1466 * current range. 1467 */ 1468 SCTP_DEBUG_PRINTK_CONT("-%08x", 1469 dbg_last_ack_tsn); 1470 } 1471 1472 /* Start a new range. */ 1473 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn); 1474 dbg_ack_tsn = tsn; 1475 break; 1476 1477 case 1: /* The last TSN was NOT ACKed. */ 1478 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1479 /* Display the end of current range. */ 1480 SCTP_DEBUG_PRINTK_CONT("-%08x", 1481 dbg_last_kept_tsn); 1482 } 1483 1484 SCTP_DEBUG_PRINTK_CONT("\n"); 1485 1486 /* FALL THROUGH... */ 1487 default: 1488 /* This is the first-ever TSN we examined. */ 1489 /* Start a new range of ACK-ed TSNs. */ 1490 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); 1491 dbg_prt_state = 0; 1492 dbg_ack_tsn = tsn; 1493 } 1494 1495 dbg_last_ack_tsn = tsn; 1496 #endif /* SCTP_DEBUG */ 1497 1498 } else { 1499 if (tchunk->tsn_gap_acked) { 1500 SCTP_DEBUG_PRINTK("%s: Receiver reneged on " 1501 "data TSN: 0x%x\n", 1502 __func__, 1503 tsn); 1504 tchunk->tsn_gap_acked = 0; 1505 1506 if (tchunk->transport) 1507 bytes_acked -= sctp_data_size(tchunk); 1508 1509 /* RFC 2960 6.3.2 Retransmission Timer Rules 1510 * 1511 * R4) Whenever a SACK is received missing a 1512 * TSN that was previously acknowledged via a 1513 * Gap Ack Block, start T3-rtx for the 1514 * destination address to which the DATA 1515 * chunk was originally 1516 * transmitted if it is not already running. 1517 */ 1518 restart_timer = 1; 1519 } 1520 1521 list_add_tail(lchunk, &tlist); 1522 1523 #if SCTP_DEBUG 1524 /* See the above comments on ACK-ed TSNs. */ 1525 switch (dbg_prt_state) { 1526 case 1: 1527 if (dbg_last_kept_tsn + 1 == tsn) 1528 break; 1529 1530 if (dbg_last_kept_tsn != dbg_kept_tsn) 1531 SCTP_DEBUG_PRINTK_CONT("-%08x", 1532 dbg_last_kept_tsn); 1533 1534 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn); 1535 dbg_kept_tsn = tsn; 1536 break; 1537 1538 case 0: 1539 if (dbg_last_ack_tsn != dbg_ack_tsn) 1540 SCTP_DEBUG_PRINTK_CONT("-%08x", 1541 dbg_last_ack_tsn); 1542 SCTP_DEBUG_PRINTK_CONT("\n"); 1543 1544 /* FALL THROUGH... */ 1545 default: 1546 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); 1547 dbg_prt_state = 1; 1548 dbg_kept_tsn = tsn; 1549 } 1550 1551 dbg_last_kept_tsn = tsn; 1552 #endif /* SCTP_DEBUG */ 1553 } 1554 } 1555 1556 #if SCTP_DEBUG 1557 /* Finish off the last range, displaying its ending TSN. */ 1558 switch (dbg_prt_state) { 1559 case 0: 1560 if (dbg_last_ack_tsn != dbg_ack_tsn) { 1561 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn); 1562 } else { 1563 SCTP_DEBUG_PRINTK_CONT("\n"); 1564 } 1565 break; 1566 1567 case 1: 1568 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1569 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn); 1570 } else { 1571 SCTP_DEBUG_PRINTK_CONT("\n"); 1572 } 1573 } 1574 #endif /* SCTP_DEBUG */ 1575 if (transport) { 1576 if (bytes_acked) { 1577 /* We may have counted DATA that was migrated 1578 * to this transport due to DEL-IP operation. 1579 * Subtract those bytes, since the were never 1580 * send on this transport and shouldn't be 1581 * credited to this transport. 1582 */ 1583 bytes_acked -= migrate_bytes; 1584 1585 /* 8.2. When an outstanding TSN is acknowledged, 1586 * the endpoint shall clear the error counter of 1587 * the destination transport address to which the 1588 * DATA chunk was last sent. 1589 * The association's overall error counter is 1590 * also cleared. 1591 */ 1592 transport->error_count = 0; 1593 transport->asoc->overall_error_count = 0; 1594 1595 /* Mark the destination transport address as 1596 * active if it is not so marked. 1597 */ 1598 if ((transport->state == SCTP_INACTIVE) || 1599 (transport->state == SCTP_UNCONFIRMED)) { 1600 sctp_assoc_control_transport( 1601 transport->asoc, 1602 transport, 1603 SCTP_TRANSPORT_UP, 1604 SCTP_RECEIVED_SACK); 1605 } 1606 1607 sctp_transport_raise_cwnd(transport, sack_ctsn, 1608 bytes_acked); 1609 1610 transport->flight_size -= bytes_acked; 1611 if (transport->flight_size == 0) 1612 transport->partial_bytes_acked = 0; 1613 q->outstanding_bytes -= bytes_acked + migrate_bytes; 1614 } else { 1615 /* RFC 2960 6.1, sctpimpguide-06 2.15.2 1616 * When a sender is doing zero window probing, it 1617 * should not timeout the association if it continues 1618 * to receive new packets from the receiver. The 1619 * reason is that the receiver MAY keep its window 1620 * closed for an indefinite time. 1621 * A sender is doing zero window probing when the 1622 * receiver's advertised window is zero, and there is 1623 * only one data chunk in flight to the receiver. 1624 */ 1625 if (!q->asoc->peer.rwnd && 1626 !list_empty(&tlist) && 1627 (sack_ctsn+2 == q->asoc->next_tsn)) { 1628 SCTP_DEBUG_PRINTK("%s: SACK received for zero " 1629 "window probe: %u\n", 1630 __func__, sack_ctsn); 1631 q->asoc->overall_error_count = 0; 1632 transport->error_count = 0; 1633 } 1634 } 1635 1636 /* RFC 2960 6.3.2 Retransmission Timer Rules 1637 * 1638 * R2) Whenever all outstanding data sent to an address have 1639 * been acknowledged, turn off the T3-rtx timer of that 1640 * address. 1641 */ 1642 if (!transport->flight_size) { 1643 if (timer_pending(&transport->T3_rtx_timer) && 1644 del_timer(&transport->T3_rtx_timer)) { 1645 sctp_transport_put(transport); 1646 } 1647 } else if (restart_timer) { 1648 if (!mod_timer(&transport->T3_rtx_timer, 1649 jiffies + transport->rto)) 1650 sctp_transport_hold(transport); 1651 } 1652 } 1653 1654 list_splice(&tlist, transmitted_queue); 1655 } 1656 1657 /* Mark chunks as missing and consequently may get retransmitted. */ 1658 static void sctp_mark_missing(struct sctp_outq *q, 1659 struct list_head *transmitted_queue, 1660 struct sctp_transport *transport, 1661 __u32 highest_new_tsn_in_sack, 1662 int count_of_newacks) 1663 { 1664 struct sctp_chunk *chunk; 1665 __u32 tsn; 1666 char do_fast_retransmit = 0; 1667 struct sctp_association *asoc = q->asoc; 1668 struct sctp_transport *primary = asoc->peer.primary_path; 1669 1670 list_for_each_entry(chunk, transmitted_queue, transmitted_list) { 1671 1672 tsn = ntohl(chunk->subh.data_hdr->tsn); 1673 1674 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all 1675 * 'Unacknowledged TSN's', if the TSN number of an 1676 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' 1677 * value, increment the 'TSN.Missing.Report' count on that 1678 * chunk if it has NOT been fast retransmitted or marked for 1679 * fast retransmit already. 1680 */ 1681 if (chunk->fast_retransmit == SCTP_CAN_FRTX && 1682 !chunk->tsn_gap_acked && 1683 TSN_lt(tsn, highest_new_tsn_in_sack)) { 1684 1685 /* SFR-CACC may require us to skip marking 1686 * this chunk as missing. 1687 */ 1688 if (!transport || !sctp_cacc_skip(primary, transport, 1689 count_of_newacks, tsn)) { 1690 chunk->tsn_missing_report++; 1691 1692 SCTP_DEBUG_PRINTK( 1693 "%s: TSN 0x%x missing counter: %d\n", 1694 __func__, tsn, 1695 chunk->tsn_missing_report); 1696 } 1697 } 1698 /* 1699 * M4) If any DATA chunk is found to have a 1700 * 'TSN.Missing.Report' 1701 * value larger than or equal to 3, mark that chunk for 1702 * retransmission and start the fast retransmit procedure. 1703 */ 1704 1705 if (chunk->tsn_missing_report >= 3) { 1706 chunk->fast_retransmit = SCTP_NEED_FRTX; 1707 do_fast_retransmit = 1; 1708 } 1709 } 1710 1711 if (transport) { 1712 if (do_fast_retransmit) 1713 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); 1714 1715 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " 1716 "ssthresh: %d, flight_size: %d, pba: %d\n", 1717 __func__, transport, transport->cwnd, 1718 transport->ssthresh, transport->flight_size, 1719 transport->partial_bytes_acked); 1720 } 1721 } 1722 1723 /* Is the given TSN acked by this packet? */ 1724 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) 1725 { 1726 int i; 1727 sctp_sack_variable_t *frags; 1728 __u16 gap; 1729 __u32 ctsn = ntohl(sack->cum_tsn_ack); 1730 1731 if (TSN_lte(tsn, ctsn)) 1732 goto pass; 1733 1734 /* 3.3.4 Selective Acknowledgement (SACK) (3): 1735 * 1736 * Gap Ack Blocks: 1737 * These fields contain the Gap Ack Blocks. They are repeated 1738 * for each Gap Ack Block up to the number of Gap Ack Blocks 1739 * defined in the Number of Gap Ack Blocks field. All DATA 1740 * chunks with TSNs greater than or equal to (Cumulative TSN 1741 * Ack + Gap Ack Block Start) and less than or equal to 1742 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack 1743 * Block are assumed to have been received correctly. 1744 */ 1745 1746 frags = sack->variable; 1747 gap = tsn - ctsn; 1748 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) { 1749 if (TSN_lte(ntohs(frags[i].gab.start), gap) && 1750 TSN_lte(gap, ntohs(frags[i].gab.end))) 1751 goto pass; 1752 } 1753 1754 return 0; 1755 pass: 1756 return 1; 1757 } 1758 1759 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, 1760 int nskips, __be16 stream) 1761 { 1762 int i; 1763 1764 for (i = 0; i < nskips; i++) { 1765 if (skiplist[i].stream == stream) 1766 return i; 1767 } 1768 return i; 1769 } 1770 1771 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */ 1772 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) 1773 { 1774 struct sctp_association *asoc = q->asoc; 1775 struct sctp_chunk *ftsn_chunk = NULL; 1776 struct sctp_fwdtsn_skip ftsn_skip_arr[10]; 1777 int nskips = 0; 1778 int skip_pos = 0; 1779 __u32 tsn; 1780 struct sctp_chunk *chunk; 1781 struct list_head *lchunk, *temp; 1782 1783 if (!asoc->peer.prsctp_capable) 1784 return; 1785 1786 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the 1787 * received SACK. 1788 * 1789 * If (Advanced.Peer.Ack.Point < SackCumAck), then update 1790 * Advanced.Peer.Ack.Point to be equal to SackCumAck. 1791 */ 1792 if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) 1793 asoc->adv_peer_ack_point = ctsn; 1794 1795 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" 1796 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as 1797 * the chunk next in the out-queue space is marked as "abandoned" as 1798 * shown in the following example: 1799 * 1800 * Assuming that a SACK arrived with the Cumulative TSN ACK 102 1801 * and the Advanced.Peer.Ack.Point is updated to this value: 1802 * 1803 * out-queue at the end of ==> out-queue after Adv.Ack.Point 1804 * normal SACK processing local advancement 1805 * ... ... 1806 * Adv.Ack.Pt-> 102 acked 102 acked 1807 * 103 abandoned 103 abandoned 1808 * 104 abandoned Adv.Ack.P-> 104 abandoned 1809 * 105 105 1810 * 106 acked 106 acked 1811 * ... ... 1812 * 1813 * In this example, the data sender successfully advanced the 1814 * "Advanced.Peer.Ack.Point" from 102 to 104 locally. 1815 */ 1816 list_for_each_safe(lchunk, temp, &q->abandoned) { 1817 chunk = list_entry(lchunk, struct sctp_chunk, 1818 transmitted_list); 1819 tsn = ntohl(chunk->subh.data_hdr->tsn); 1820 1821 /* Remove any chunks in the abandoned queue that are acked by 1822 * the ctsn. 1823 */ 1824 if (TSN_lte(tsn, ctsn)) { 1825 list_del_init(lchunk); 1826 sctp_chunk_free(chunk); 1827 } else { 1828 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { 1829 asoc->adv_peer_ack_point = tsn; 1830 if (chunk->chunk_hdr->flags & 1831 SCTP_DATA_UNORDERED) 1832 continue; 1833 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], 1834 nskips, 1835 chunk->subh.data_hdr->stream); 1836 ftsn_skip_arr[skip_pos].stream = 1837 chunk->subh.data_hdr->stream; 1838 ftsn_skip_arr[skip_pos].ssn = 1839 chunk->subh.data_hdr->ssn; 1840 if (skip_pos == nskips) 1841 nskips++; 1842 if (nskips == 10) 1843 break; 1844 } else 1845 break; 1846 } 1847 } 1848 1849 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" 1850 * is greater than the Cumulative TSN ACK carried in the received 1851 * SACK, the data sender MUST send the data receiver a FORWARD TSN 1852 * chunk containing the latest value of the 1853 * "Advanced.Peer.Ack.Point". 1854 * 1855 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD 1856 * list each stream and sequence number in the forwarded TSN. This 1857 * information will enable the receiver to easily find any 1858 * stranded TSN's waiting on stream reorder queues. Each stream 1859 * SHOULD only be reported once; this means that if multiple 1860 * abandoned messages occur in the same stream then only the 1861 * highest abandoned stream sequence number is reported. If the 1862 * total size of the FORWARD TSN does NOT fit in a single MTU then 1863 * the sender of the FORWARD TSN SHOULD lower the 1864 * Advanced.Peer.Ack.Point to the last TSN that will fit in a 1865 * single MTU. 1866 */ 1867 if (asoc->adv_peer_ack_point > ctsn) 1868 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, 1869 nskips, &ftsn_skip_arr[0]); 1870 1871 if (ftsn_chunk) { 1872 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1873 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 1874 } 1875 } 1876