1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * 6 * This file is part of the SCTP kernel implementation 7 * 8 * These functions handle output processing. 9 * 10 * This SCTP implementation is free software; 11 * you can redistribute it and/or modify it under the terms of 12 * the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This SCTP implementation is distributed in the hope that it 17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 18 * ************************ 19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 20 * See the GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with GNU CC; see the file COPYING. If not, see 24 * <http://www.gnu.org/licenses/>. 25 * 26 * Please send any bug reports or fixes you make to the 27 * email address(es): 28 * lksctp developers <linux-sctp@vger.kernel.org> 29 * 30 * Written or modified by: 31 * La Monte H.P. Yarroll <piggy@acm.org> 32 * Karl Knutson <karl@athena.chicago.il.us> 33 * Jon Grimm <jgrimm@austin.ibm.com> 34 * Sridhar Samudrala <sri@us.ibm.com> 35 */ 36 37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 38 39 #include <linux/types.h> 40 #include <linux/kernel.h> 41 #include <linux/wait.h> 42 #include <linux/time.h> 43 #include <linux/ip.h> 44 #include <linux/ipv6.h> 45 #include <linux/init.h> 46 #include <linux/slab.h> 47 #include <net/inet_ecn.h> 48 #include <net/ip.h> 49 #include <net/icmp.h> 50 #include <net/net_namespace.h> 51 52 #include <linux/socket.h> /* for sa_family_t */ 53 #include <net/sock.h> 54 55 #include <net/sctp/sctp.h> 56 #include <net/sctp/sm.h> 57 #include <net/sctp/checksum.h> 58 59 /* Forward declarations for private helpers. */ 60 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, 61 struct sctp_chunk *chunk); 62 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, 63 struct sctp_chunk *chunk); 64 static void sctp_packet_append_data(struct sctp_packet *packet, 65 struct sctp_chunk *chunk); 66 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, 67 struct sctp_chunk *chunk, 68 u16 chunk_len); 69 70 static void sctp_packet_reset(struct sctp_packet *packet) 71 { 72 /* sctp_packet_transmit() relies on this to reset size to the 73 * current overhead after sending packets. 74 */ 75 packet->size = packet->overhead; 76 77 packet->has_cookie_echo = 0; 78 packet->has_sack = 0; 79 packet->has_data = 0; 80 packet->has_auth = 0; 81 packet->ipfragok = 0; 82 packet->auth = NULL; 83 } 84 85 /* Config a packet. 86 * This appears to be a followup set of initializations. 87 */ 88 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, 89 int ecn_capable) 90 { 91 struct sctp_transport *tp = packet->transport; 92 struct sctp_association *asoc = tp->asoc; 93 struct sctp_sock *sp = NULL; 94 struct sock *sk; 95 96 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 97 packet->vtag = vtag; 98 99 /* do the following jobs only once for a flush schedule */ 100 if (!sctp_packet_empty(packet)) 101 return; 102 103 /* set packet max_size with pathmtu, then calculate overhead */ 104 packet->max_size = tp->pathmtu; 105 106 if (asoc) { 107 sk = asoc->base.sk; 108 sp = sctp_sk(sk); 109 } 110 packet->overhead = sctp_mtu_payload(sp, 0, 0); 111 packet->size = packet->overhead; 112 113 if (!asoc) 114 return; 115 116 /* update dst or transport pathmtu if in need */ 117 if (!sctp_transport_dst_check(tp)) { 118 sctp_transport_route(tp, NULL, sp); 119 if (asoc->param_flags & SPP_PMTUD_ENABLE) 120 sctp_assoc_sync_pmtu(asoc); 121 } 122 123 /* If there a is a prepend chunk stick it on the list before 124 * any other chunks get appended. 125 */ 126 if (ecn_capable) { 127 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); 128 129 if (chunk) 130 sctp_packet_append_chunk(packet, chunk); 131 } 132 133 if (!tp->dst) 134 return; 135 136 /* set packet max_size with gso_max_size if gso is enabled*/ 137 rcu_read_lock(); 138 if (__sk_dst_get(sk) != tp->dst) { 139 dst_hold(tp->dst); 140 sk_setup_caps(sk, tp->dst); 141 } 142 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size 143 : asoc->pathmtu; 144 rcu_read_unlock(); 145 } 146 147 /* Initialize the packet structure. */ 148 void sctp_packet_init(struct sctp_packet *packet, 149 struct sctp_transport *transport, 150 __u16 sport, __u16 dport) 151 { 152 pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport); 153 154 packet->transport = transport; 155 packet->source_port = sport; 156 packet->destination_port = dport; 157 INIT_LIST_HEAD(&packet->chunk_list); 158 /* The overhead will be calculated by sctp_packet_config() */ 159 packet->overhead = 0; 160 sctp_packet_reset(packet); 161 packet->vtag = 0; 162 } 163 164 /* Free a packet. */ 165 void sctp_packet_free(struct sctp_packet *packet) 166 { 167 struct sctp_chunk *chunk, *tmp; 168 169 pr_debug("%s: packet:%p\n", __func__, packet); 170 171 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 172 list_del_init(&chunk->list); 173 sctp_chunk_free(chunk); 174 } 175 } 176 177 /* This routine tries to append the chunk to the offered packet. If adding 178 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk 179 * is not present in the packet, it transmits the input packet. 180 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long 181 * as it can fit in the packet, but any more data that does not fit in this 182 * packet can be sent only after receiving the COOKIE_ACK. 183 */ 184 enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet, 185 struct sctp_chunk *chunk, 186 int one_packet, gfp_t gfp) 187 { 188 enum sctp_xmit retval; 189 190 pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__, 191 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); 192 193 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { 194 case SCTP_XMIT_PMTU_FULL: 195 if (!packet->has_cookie_echo) { 196 int error = 0; 197 198 error = sctp_packet_transmit(packet, gfp); 199 if (error < 0) 200 chunk->skb->sk->sk_err = -error; 201 202 /* If we have an empty packet, then we can NOT ever 203 * return PMTU_FULL. 204 */ 205 if (!one_packet) 206 retval = sctp_packet_append_chunk(packet, 207 chunk); 208 } 209 break; 210 211 case SCTP_XMIT_RWND_FULL: 212 case SCTP_XMIT_OK: 213 case SCTP_XMIT_DELAY: 214 break; 215 } 216 217 return retval; 218 } 219 220 /* Try to bundle an auth chunk into the packet. */ 221 static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt, 222 struct sctp_chunk *chunk) 223 { 224 struct sctp_association *asoc = pkt->transport->asoc; 225 enum sctp_xmit retval = SCTP_XMIT_OK; 226 struct sctp_chunk *auth; 227 228 /* if we don't have an association, we can't do authentication */ 229 if (!asoc) 230 return retval; 231 232 /* See if this is an auth chunk we are bundling or if 233 * auth is already bundled. 234 */ 235 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) 236 return retval; 237 238 /* if the peer did not request this chunk to be authenticated, 239 * don't do it 240 */ 241 if (!chunk->auth) 242 return retval; 243 244 auth = sctp_make_auth(asoc, chunk->shkey->key_id); 245 if (!auth) 246 return retval; 247 248 auth->shkey = chunk->shkey; 249 sctp_auth_shkey_hold(auth->shkey); 250 251 retval = __sctp_packet_append_chunk(pkt, auth); 252 253 if (retval != SCTP_XMIT_OK) 254 sctp_chunk_free(auth); 255 256 return retval; 257 } 258 259 /* Try to bundle a SACK with the packet. */ 260 static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt, 261 struct sctp_chunk *chunk) 262 { 263 enum sctp_xmit retval = SCTP_XMIT_OK; 264 265 /* If sending DATA and haven't aleady bundled a SACK, try to 266 * bundle one in to the packet. 267 */ 268 if (sctp_chunk_is_data(chunk) && !pkt->has_sack && 269 !pkt->has_cookie_echo) { 270 struct sctp_association *asoc; 271 struct timer_list *timer; 272 asoc = pkt->transport->asoc; 273 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 274 275 /* If the SACK timer is running, we have a pending SACK */ 276 if (timer_pending(timer)) { 277 struct sctp_chunk *sack; 278 279 if (pkt->transport->sack_generation != 280 pkt->transport->asoc->peer.sack_generation) 281 return retval; 282 283 asoc->a_rwnd = asoc->rwnd; 284 sack = sctp_make_sack(asoc); 285 if (sack) { 286 retval = __sctp_packet_append_chunk(pkt, sack); 287 if (retval != SCTP_XMIT_OK) { 288 sctp_chunk_free(sack); 289 goto out; 290 } 291 asoc->peer.sack_needed = 0; 292 if (del_timer(timer)) 293 sctp_association_put(asoc); 294 } 295 } 296 } 297 out: 298 return retval; 299 } 300 301 302 /* Append a chunk to the offered packet reporting back any inability to do 303 * so. 304 */ 305 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, 306 struct sctp_chunk *chunk) 307 { 308 __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length)); 309 enum sctp_xmit retval = SCTP_XMIT_OK; 310 311 /* Check to see if this chunk will fit into the packet */ 312 retval = sctp_packet_will_fit(packet, chunk, chunk_len); 313 if (retval != SCTP_XMIT_OK) 314 goto finish; 315 316 /* We believe that this chunk is OK to add to the packet */ 317 switch (chunk->chunk_hdr->type) { 318 case SCTP_CID_DATA: 319 case SCTP_CID_I_DATA: 320 /* Account for the data being in the packet */ 321 sctp_packet_append_data(packet, chunk); 322 /* Disallow SACK bundling after DATA. */ 323 packet->has_sack = 1; 324 /* Disallow AUTH bundling after DATA */ 325 packet->has_auth = 1; 326 /* Let it be knows that packet has DATA in it */ 327 packet->has_data = 1; 328 /* timestamp the chunk for rtx purposes */ 329 chunk->sent_at = jiffies; 330 /* Mainly used for prsctp RTX policy */ 331 chunk->sent_count++; 332 break; 333 case SCTP_CID_COOKIE_ECHO: 334 packet->has_cookie_echo = 1; 335 break; 336 337 case SCTP_CID_SACK: 338 packet->has_sack = 1; 339 if (chunk->asoc) 340 chunk->asoc->stats.osacks++; 341 break; 342 343 case SCTP_CID_AUTH: 344 packet->has_auth = 1; 345 packet->auth = chunk; 346 break; 347 } 348 349 /* It is OK to send this chunk. */ 350 list_add_tail(&chunk->list, &packet->chunk_list); 351 packet->size += chunk_len; 352 chunk->transport = packet->transport; 353 finish: 354 return retval; 355 } 356 357 /* Append a chunk to the offered packet reporting back any inability to do 358 * so. 359 */ 360 enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet, 361 struct sctp_chunk *chunk) 362 { 363 enum sctp_xmit retval = SCTP_XMIT_OK; 364 365 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); 366 367 /* Data chunks are special. Before seeing what else we can 368 * bundle into this packet, check to see if we are allowed to 369 * send this DATA. 370 */ 371 if (sctp_chunk_is_data(chunk)) { 372 retval = sctp_packet_can_append_data(packet, chunk); 373 if (retval != SCTP_XMIT_OK) 374 goto finish; 375 } 376 377 /* Try to bundle AUTH chunk */ 378 retval = sctp_packet_bundle_auth(packet, chunk); 379 if (retval != SCTP_XMIT_OK) 380 goto finish; 381 382 /* Try to bundle SACK chunk */ 383 retval = sctp_packet_bundle_sack(packet, chunk); 384 if (retval != SCTP_XMIT_OK) 385 goto finish; 386 387 retval = __sctp_packet_append_chunk(packet, chunk); 388 389 finish: 390 return retval; 391 } 392 393 static void sctp_packet_release_owner(struct sk_buff *skb) 394 { 395 sk_free(skb->sk); 396 } 397 398 static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) 399 { 400 skb_orphan(skb); 401 skb->sk = sk; 402 skb->destructor = sctp_packet_release_owner; 403 404 /* 405 * The data chunks have already been accounted for in sctp_sendmsg(), 406 * therefore only reserve a single byte to keep socket around until 407 * the packet has been transmitted. 408 */ 409 refcount_inc(&sk->sk_wmem_alloc); 410 } 411 412 static int sctp_packet_pack(struct sctp_packet *packet, 413 struct sk_buff *head, int gso, gfp_t gfp) 414 { 415 struct sctp_transport *tp = packet->transport; 416 struct sctp_auth_chunk *auth = NULL; 417 struct sctp_chunk *chunk, *tmp; 418 int pkt_count = 0, pkt_size; 419 struct sock *sk = head->sk; 420 struct sk_buff *nskb; 421 int auth_len = 0; 422 423 if (gso) { 424 skb_shinfo(head)->gso_type = sk->sk_gso_type; 425 NAPI_GRO_CB(head)->last = head; 426 } else { 427 nskb = head; 428 pkt_size = packet->size; 429 goto merge; 430 } 431 432 do { 433 /* calculate the pkt_size and alloc nskb */ 434 pkt_size = packet->overhead; 435 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, 436 list) { 437 int padded = SCTP_PAD4(chunk->skb->len); 438 439 if (chunk == packet->auth) 440 auth_len = padded; 441 else if (auth_len + padded + packet->overhead > 442 tp->pathmtu) 443 return 0; 444 else if (pkt_size + padded > tp->pathmtu) 445 break; 446 pkt_size += padded; 447 } 448 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp); 449 if (!nskb) 450 return 0; 451 skb_reserve(nskb, packet->overhead + MAX_HEADER); 452 453 merge: 454 /* merge chunks into nskb and append nskb into head list */ 455 pkt_size -= packet->overhead; 456 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 457 int padding; 458 459 list_del_init(&chunk->list); 460 if (sctp_chunk_is_data(chunk)) { 461 if (!sctp_chunk_retransmitted(chunk) && 462 !tp->rto_pending) { 463 chunk->rtt_in_progress = 1; 464 tp->rto_pending = 1; 465 } 466 } 467 468 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len; 469 if (padding) 470 skb_put_zero(chunk->skb, padding); 471 472 if (chunk == packet->auth) 473 auth = (struct sctp_auth_chunk *) 474 skb_tail_pointer(nskb); 475 476 skb_put_data(nskb, chunk->skb->data, chunk->skb->len); 477 478 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n", 479 chunk, 480 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), 481 chunk->has_tsn ? "TSN" : "No TSN", 482 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, 483 ntohs(chunk->chunk_hdr->length), chunk->skb->len, 484 chunk->rtt_in_progress); 485 486 pkt_size -= SCTP_PAD4(chunk->skb->len); 487 488 if (!sctp_chunk_is_data(chunk) && chunk != packet->auth) 489 sctp_chunk_free(chunk); 490 491 if (!pkt_size) 492 break; 493 } 494 495 if (auth) { 496 sctp_auth_calculate_hmac(tp->asoc, nskb, auth, 497 packet->auth->shkey, gfp); 498 /* free auth if no more chunks, or add it back */ 499 if (list_empty(&packet->chunk_list)) 500 sctp_chunk_free(packet->auth); 501 else 502 list_add(&packet->auth->list, 503 &packet->chunk_list); 504 } 505 506 if (gso) { 507 if (skb_gro_receive(&head, nskb)) { 508 kfree_skb(nskb); 509 return 0; 510 } 511 if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >= 512 sk->sk_gso_max_segs)) 513 return 0; 514 } 515 516 pkt_count++; 517 } while (!list_empty(&packet->chunk_list)); 518 519 if (gso) { 520 memset(head->cb, 0, max(sizeof(struct inet_skb_parm), 521 sizeof(struct inet6_skb_parm))); 522 skb_shinfo(head)->gso_segs = pkt_count; 523 skb_shinfo(head)->gso_size = GSO_BY_FRAGS; 524 rcu_read_lock(); 525 if (skb_dst(head) != tp->dst) { 526 dst_hold(tp->dst); 527 sk_setup_caps(sk, tp->dst); 528 } 529 rcu_read_unlock(); 530 goto chksum; 531 } 532 533 if (sctp_checksum_disable) 534 return 1; 535 536 if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) || 537 dst_xfrm(skb_dst(head)) || packet->ipfragok) { 538 struct sctphdr *sh = 539 (struct sctphdr *)skb_transport_header(head); 540 541 sh->checksum = sctp_compute_cksum(head, 0); 542 } else { 543 chksum: 544 head->ip_summed = CHECKSUM_PARTIAL; 545 head->csum_not_inet = 1; 546 head->csum_start = skb_transport_header(head) - head->head; 547 head->csum_offset = offsetof(struct sctphdr, checksum); 548 } 549 550 return pkt_count; 551 } 552 553 /* All packets are sent to the network through this function from 554 * sctp_outq_tail(). 555 * 556 * The return value is always 0 for now. 557 */ 558 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) 559 { 560 struct sctp_transport *tp = packet->transport; 561 struct sctp_association *asoc = tp->asoc; 562 struct sctp_chunk *chunk, *tmp; 563 int pkt_count, gso = 0; 564 struct dst_entry *dst; 565 struct sk_buff *head; 566 struct sctphdr *sh; 567 struct sock *sk; 568 569 pr_debug("%s: packet:%p\n", __func__, packet); 570 if (list_empty(&packet->chunk_list)) 571 return 0; 572 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); 573 sk = chunk->skb->sk; 574 575 /* check gso */ 576 if (packet->size > tp->pathmtu && !packet->ipfragok) { 577 if (!sk_can_gso(sk)) { 578 pr_err_once("Trying to GSO but underlying device doesn't support it."); 579 goto out; 580 } 581 gso = 1; 582 } 583 584 /* alloc head skb */ 585 head = alloc_skb((gso ? packet->overhead : packet->size) + 586 MAX_HEADER, gfp); 587 if (!head) 588 goto out; 589 skb_reserve(head, packet->overhead + MAX_HEADER); 590 sctp_packet_set_owner_w(head, sk); 591 592 /* set sctp header */ 593 sh = skb_push(head, sizeof(struct sctphdr)); 594 skb_reset_transport_header(head); 595 sh->source = htons(packet->source_port); 596 sh->dest = htons(packet->destination_port); 597 sh->vtag = htonl(packet->vtag); 598 sh->checksum = 0; 599 600 /* drop packet if no dst */ 601 dst = dst_clone(tp->dst); 602 if (!dst) { 603 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 604 kfree_skb(head); 605 goto out; 606 } 607 skb_dst_set(head, dst); 608 609 /* pack up chunks */ 610 pkt_count = sctp_packet_pack(packet, head, gso, gfp); 611 if (!pkt_count) { 612 kfree_skb(head); 613 goto out; 614 } 615 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len); 616 617 /* start autoclose timer */ 618 if (packet->has_data && sctp_state(asoc, ESTABLISHED) && 619 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { 620 struct timer_list *timer = 621 &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 622 unsigned long timeout = 623 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 624 625 if (!mod_timer(timer, jiffies + timeout)) 626 sctp_association_hold(asoc); 627 } 628 629 /* sctp xmit */ 630 tp->af_specific->ecn_capable(sk); 631 if (asoc) { 632 asoc->stats.opackets += pkt_count; 633 if (asoc->peer.last_sent_to != tp) 634 asoc->peer.last_sent_to = tp; 635 } 636 head->ignore_df = packet->ipfragok; 637 if (tp->dst_pending_confirm) 638 skb_set_dst_pending_confirm(head, 1); 639 /* neighbour should be confirmed on successful transmission or 640 * positive error 641 */ 642 if (tp->af_specific->sctp_xmit(head, tp) >= 0 && 643 tp->dst_pending_confirm) 644 tp->dst_pending_confirm = 0; 645 646 out: 647 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 648 list_del_init(&chunk->list); 649 if (!sctp_chunk_is_data(chunk)) 650 sctp_chunk_free(chunk); 651 } 652 sctp_packet_reset(packet); 653 return 0; 654 } 655 656 /******************************************************************** 657 * 2nd Level Abstractions 658 ********************************************************************/ 659 660 /* This private function check to see if a chunk can be added */ 661 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, 662 struct sctp_chunk *chunk) 663 { 664 size_t datasize, rwnd, inflight, flight_size; 665 struct sctp_transport *transport = packet->transport; 666 struct sctp_association *asoc = transport->asoc; 667 struct sctp_outq *q = &asoc->outqueue; 668 669 /* RFC 2960 6.1 Transmission of DATA Chunks 670 * 671 * A) At any given time, the data sender MUST NOT transmit new data to 672 * any destination transport address if its peer's rwnd indicates 673 * that the peer has no buffer space (i.e. rwnd is 0, see Section 674 * 6.2.1). However, regardless of the value of rwnd (including if it 675 * is 0), the data sender can always have one DATA chunk in flight to 676 * the receiver if allowed by cwnd (see rule B below). This rule 677 * allows the sender to probe for a change in rwnd that the sender 678 * missed due to the SACK having been lost in transit from the data 679 * receiver to the data sender. 680 */ 681 682 rwnd = asoc->peer.rwnd; 683 inflight = q->outstanding_bytes; 684 flight_size = transport->flight_size; 685 686 datasize = sctp_data_size(chunk); 687 688 if (datasize > rwnd && inflight > 0) 689 /* We have (at least) one data chunk in flight, 690 * so we can't fall back to rule 6.1 B). 691 */ 692 return SCTP_XMIT_RWND_FULL; 693 694 /* RFC 2960 6.1 Transmission of DATA Chunks 695 * 696 * B) At any given time, the sender MUST NOT transmit new data 697 * to a given transport address if it has cwnd or more bytes 698 * of data outstanding to that transport address. 699 */ 700 /* RFC 7.2.4 & the Implementers Guide 2.8. 701 * 702 * 3) ... 703 * When a Fast Retransmit is being performed the sender SHOULD 704 * ignore the value of cwnd and SHOULD NOT delay retransmission. 705 */ 706 if (chunk->fast_retransmit != SCTP_NEED_FRTX && 707 flight_size >= transport->cwnd) 708 return SCTP_XMIT_RWND_FULL; 709 710 /* Nagle's algorithm to solve small-packet problem: 711 * Inhibit the sending of new chunks when new outgoing data arrives 712 * if any previously transmitted data on the connection remains 713 * unacknowledged. 714 */ 715 716 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && 717 !asoc->force_delay) 718 /* Nothing unacked */ 719 return SCTP_XMIT_OK; 720 721 if (!sctp_packet_empty(packet)) 722 /* Append to packet */ 723 return SCTP_XMIT_OK; 724 725 if (!sctp_state(asoc, ESTABLISHED)) 726 return SCTP_XMIT_OK; 727 728 /* Check whether this chunk and all the rest of pending data will fit 729 * or delay in hopes of bundling a full sized packet. 730 */ 731 if (chunk->skb->len + q->out_qlen > transport->pathmtu - 732 packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4) 733 /* Enough data queued to fill a packet */ 734 return SCTP_XMIT_OK; 735 736 /* Don't delay large message writes that may have been fragmented */ 737 if (!chunk->msg->can_delay) 738 return SCTP_XMIT_OK; 739 740 /* Defer until all data acked or packet full */ 741 return SCTP_XMIT_DELAY; 742 } 743 744 /* This private function does management things when adding DATA chunk */ 745 static void sctp_packet_append_data(struct sctp_packet *packet, 746 struct sctp_chunk *chunk) 747 { 748 struct sctp_transport *transport = packet->transport; 749 size_t datasize = sctp_data_size(chunk); 750 struct sctp_association *asoc = transport->asoc; 751 u32 rwnd = asoc->peer.rwnd; 752 753 /* Keep track of how many bytes are in flight over this transport. */ 754 transport->flight_size += datasize; 755 756 /* Keep track of how many bytes are in flight to the receiver. */ 757 asoc->outqueue.outstanding_bytes += datasize; 758 759 /* Update our view of the receiver's rwnd. */ 760 if (datasize < rwnd) 761 rwnd -= datasize; 762 else 763 rwnd = 0; 764 765 asoc->peer.rwnd = rwnd; 766 sctp_chunk_assign_tsn(chunk); 767 asoc->stream.si->assign_number(chunk); 768 } 769 770 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, 771 struct sctp_chunk *chunk, 772 u16 chunk_len) 773 { 774 enum sctp_xmit retval = SCTP_XMIT_OK; 775 size_t psize, pmtu, maxsize; 776 777 /* Don't bundle in this packet if this chunk's auth key doesn't 778 * match other chunks already enqueued on this packet. Also, 779 * don't bundle the chunk with auth key if other chunks in this 780 * packet don't have auth key. 781 */ 782 if ((packet->auth && chunk->shkey != packet->auth->shkey) || 783 (!packet->auth && chunk->shkey && 784 chunk->chunk_hdr->type != SCTP_CID_AUTH)) 785 return SCTP_XMIT_PMTU_FULL; 786 787 psize = packet->size; 788 if (packet->transport->asoc) 789 pmtu = packet->transport->asoc->pathmtu; 790 else 791 pmtu = packet->transport->pathmtu; 792 793 /* Decide if we need to fragment or resubmit later. */ 794 if (psize + chunk_len > pmtu) { 795 /* It's OK to fragment at IP level if any one of the following 796 * is true: 797 * 1. The packet is empty (meaning this chunk is greater 798 * the MTU) 799 * 2. The packet doesn't have any data in it yet and data 800 * requires authentication. 801 */ 802 if (sctp_packet_empty(packet) || 803 (!packet->has_data && chunk->auth)) { 804 /* We no longer do re-fragmentation. 805 * Just fragment at the IP layer, if we 806 * actually hit this condition 807 */ 808 packet->ipfragok = 1; 809 goto out; 810 } 811 812 /* Similarly, if this chunk was built before a PMTU 813 * reduction, we have to fragment it at IP level now. So 814 * if the packet already contains something, we need to 815 * flush. 816 */ 817 maxsize = pmtu - packet->overhead; 818 if (packet->auth) 819 maxsize -= SCTP_PAD4(packet->auth->skb->len); 820 if (chunk_len > maxsize) 821 retval = SCTP_XMIT_PMTU_FULL; 822 823 /* It is also okay to fragment if the chunk we are 824 * adding is a control chunk, but only if current packet 825 * is not a GSO one otherwise it causes fragmentation of 826 * a large frame. So in this case we allow the 827 * fragmentation by forcing it to be in a new packet. 828 */ 829 if (!sctp_chunk_is_data(chunk) && packet->has_data) 830 retval = SCTP_XMIT_PMTU_FULL; 831 832 if (psize + chunk_len > packet->max_size) 833 /* Hit GSO/PMTU limit, gotta flush */ 834 retval = SCTP_XMIT_PMTU_FULL; 835 836 if (!packet->transport->burst_limited && 837 psize + chunk_len > (packet->transport->cwnd >> 1)) 838 /* Do not allow a single GSO packet to use more 839 * than half of cwnd. 840 */ 841 retval = SCTP_XMIT_PMTU_FULL; 842 843 if (packet->transport->burst_limited && 844 psize + chunk_len > (packet->transport->burst_limited >> 1)) 845 /* Do not allow a single GSO packet to use more 846 * than half of original cwnd. 847 */ 848 retval = SCTP_XMIT_PMTU_FULL; 849 /* Otherwise it will fit in the GSO packet */ 850 } 851 852 out: 853 return retval; 854 } 855