xref: /openbmc/linux/net/sctp/output.c (revision 26dd3e4f)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  *
6  * This file is part of the SCTP kernel implementation
7  *
8  * These functions handle output processing.
9  *
10  * This SCTP implementation is free software;
11  * you can redistribute it and/or modify it under the terms of
12  * the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This SCTP implementation is distributed in the hope that it
17  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18  *                 ************************
19  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20  * See the GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with GNU CC; see the file COPYING.  If not, see
24  * <http://www.gnu.org/licenses/>.
25  *
26  * Please send any bug reports or fixes you make to the
27  * email address(es):
28  *    lksctp developers <linux-sctp@vger.kernel.org>
29  *
30  * Written or modified by:
31  *    La Monte H.P. Yarroll <piggy@acm.org>
32  *    Karl Knutson          <karl@athena.chicago.il.us>
33  *    Jon Grimm             <jgrimm@austin.ibm.com>
34  *    Sridhar Samudrala     <sri@us.ibm.com>
35  */
36 
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 
39 #include <linux/types.h>
40 #include <linux/kernel.h>
41 #include <linux/wait.h>
42 #include <linux/time.h>
43 #include <linux/ip.h>
44 #include <linux/ipv6.h>
45 #include <linux/init.h>
46 #include <linux/slab.h>
47 #include <net/inet_ecn.h>
48 #include <net/ip.h>
49 #include <net/icmp.h>
50 #include <net/net_namespace.h>
51 
52 #include <linux/socket.h> /* for sa_family_t */
53 #include <net/sock.h>
54 
55 #include <net/sctp/sctp.h>
56 #include <net/sctp/sm.h>
57 #include <net/sctp/checksum.h>
58 
59 /* Forward declarations for private helpers. */
60 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
61 					      struct sctp_chunk *chunk);
62 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
63 					   struct sctp_chunk *chunk);
64 static void sctp_packet_append_data(struct sctp_packet *packet,
65 					   struct sctp_chunk *chunk);
66 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
67 					struct sctp_chunk *chunk,
68 					u16 chunk_len);
69 
70 static void sctp_packet_reset(struct sctp_packet *packet)
71 {
72 	packet->size = packet->overhead;
73 	packet->has_cookie_echo = 0;
74 	packet->has_sack = 0;
75 	packet->has_data = 0;
76 	packet->has_auth = 0;
77 	packet->ipfragok = 0;
78 	packet->auth = NULL;
79 }
80 
81 /* Config a packet.
82  * This appears to be a followup set of initializations.
83  */
84 struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
85 				       __u32 vtag, int ecn_capable)
86 {
87 	struct sctp_transport *tp = packet->transport;
88 	struct sctp_association *asoc = tp->asoc;
89 
90 	pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
91 
92 	packet->vtag = vtag;
93 
94 	if (asoc && tp->dst) {
95 		struct sock *sk = asoc->base.sk;
96 
97 		rcu_read_lock();
98 		if (__sk_dst_get(sk) != tp->dst) {
99 			dst_hold(tp->dst);
100 			sk_setup_caps(sk, tp->dst);
101 		}
102 
103 		if (sk_can_gso(sk)) {
104 			struct net_device *dev = tp->dst->dev;
105 
106 			packet->max_size = dev->gso_max_size;
107 		} else {
108 			packet->max_size = asoc->pathmtu;
109 		}
110 		rcu_read_unlock();
111 
112 	} else {
113 		packet->max_size = tp->pathmtu;
114 	}
115 
116 	if (ecn_capable && sctp_packet_empty(packet)) {
117 		struct sctp_chunk *chunk;
118 
119 		/* If there a is a prepend chunk stick it on the list before
120 		 * any other chunks get appended.
121 		 */
122 		chunk = sctp_get_ecne_prepend(asoc);
123 		if (chunk)
124 			sctp_packet_append_chunk(packet, chunk);
125 	}
126 
127 	return packet;
128 }
129 
130 /* Initialize the packet structure. */
131 struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
132 				     struct sctp_transport *transport,
133 				     __u16 sport, __u16 dport)
134 {
135 	struct sctp_association *asoc = transport->asoc;
136 	size_t overhead;
137 
138 	pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
139 
140 	packet->transport = transport;
141 	packet->source_port = sport;
142 	packet->destination_port = dport;
143 	INIT_LIST_HEAD(&packet->chunk_list);
144 	if (asoc) {
145 		struct sctp_sock *sp = sctp_sk(asoc->base.sk);
146 		overhead = sp->pf->af->net_header_len;
147 	} else {
148 		overhead = sizeof(struct ipv6hdr);
149 	}
150 	overhead += sizeof(struct sctphdr);
151 	packet->overhead = overhead;
152 	sctp_packet_reset(packet);
153 	packet->vtag = 0;
154 
155 	return packet;
156 }
157 
158 /* Free a packet.  */
159 void sctp_packet_free(struct sctp_packet *packet)
160 {
161 	struct sctp_chunk *chunk, *tmp;
162 
163 	pr_debug("%s: packet:%p\n", __func__, packet);
164 
165 	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
166 		list_del_init(&chunk->list);
167 		sctp_chunk_free(chunk);
168 	}
169 }
170 
171 /* This routine tries to append the chunk to the offered packet. If adding
172  * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
173  * is not present in the packet, it transmits the input packet.
174  * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
175  * as it can fit in the packet, but any more data that does not fit in this
176  * packet can be sent only after receiving the COOKIE_ACK.
177  */
178 sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
179 				       struct sctp_chunk *chunk,
180 				       int one_packet, gfp_t gfp)
181 {
182 	sctp_xmit_t retval;
183 
184 	pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__,
185 		 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
186 
187 	switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
188 	case SCTP_XMIT_PMTU_FULL:
189 		if (!packet->has_cookie_echo) {
190 			int error = 0;
191 
192 			error = sctp_packet_transmit(packet, gfp);
193 			if (error < 0)
194 				chunk->skb->sk->sk_err = -error;
195 
196 			/* If we have an empty packet, then we can NOT ever
197 			 * return PMTU_FULL.
198 			 */
199 			if (!one_packet)
200 				retval = sctp_packet_append_chunk(packet,
201 								  chunk);
202 		}
203 		break;
204 
205 	case SCTP_XMIT_RWND_FULL:
206 	case SCTP_XMIT_OK:
207 	case SCTP_XMIT_DELAY:
208 		break;
209 	}
210 
211 	return retval;
212 }
213 
214 /* Try to bundle an auth chunk into the packet. */
215 static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
216 					   struct sctp_chunk *chunk)
217 {
218 	struct sctp_association *asoc = pkt->transport->asoc;
219 	struct sctp_chunk *auth;
220 	sctp_xmit_t retval = SCTP_XMIT_OK;
221 
222 	/* if we don't have an association, we can't do authentication */
223 	if (!asoc)
224 		return retval;
225 
226 	/* See if this is an auth chunk we are bundling or if
227 	 * auth is already bundled.
228 	 */
229 	if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
230 		return retval;
231 
232 	/* if the peer did not request this chunk to be authenticated,
233 	 * don't do it
234 	 */
235 	if (!chunk->auth)
236 		return retval;
237 
238 	auth = sctp_make_auth(asoc);
239 	if (!auth)
240 		return retval;
241 
242 	retval = __sctp_packet_append_chunk(pkt, auth);
243 
244 	if (retval != SCTP_XMIT_OK)
245 		sctp_chunk_free(auth);
246 
247 	return retval;
248 }
249 
250 /* Try to bundle a SACK with the packet. */
251 static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
252 					   struct sctp_chunk *chunk)
253 {
254 	sctp_xmit_t retval = SCTP_XMIT_OK;
255 
256 	/* If sending DATA and haven't aleady bundled a SACK, try to
257 	 * bundle one in to the packet.
258 	 */
259 	if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
260 	    !pkt->has_cookie_echo) {
261 		struct sctp_association *asoc;
262 		struct timer_list *timer;
263 		asoc = pkt->transport->asoc;
264 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
265 
266 		/* If the SACK timer is running, we have a pending SACK */
267 		if (timer_pending(timer)) {
268 			struct sctp_chunk *sack;
269 
270 			if (pkt->transport->sack_generation !=
271 			    pkt->transport->asoc->peer.sack_generation)
272 				return retval;
273 
274 			asoc->a_rwnd = asoc->rwnd;
275 			sack = sctp_make_sack(asoc);
276 			if (sack) {
277 				retval = __sctp_packet_append_chunk(pkt, sack);
278 				if (retval != SCTP_XMIT_OK) {
279 					sctp_chunk_free(sack);
280 					goto out;
281 				}
282 				asoc->peer.sack_needed = 0;
283 				if (del_timer(timer))
284 					sctp_association_put(asoc);
285 			}
286 		}
287 	}
288 out:
289 	return retval;
290 }
291 
292 
293 /* Append a chunk to the offered packet reporting back any inability to do
294  * so.
295  */
296 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
297 					      struct sctp_chunk *chunk)
298 {
299 	sctp_xmit_t retval = SCTP_XMIT_OK;
300 	__u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
301 
302 	/* Check to see if this chunk will fit into the packet */
303 	retval = sctp_packet_will_fit(packet, chunk, chunk_len);
304 	if (retval != SCTP_XMIT_OK)
305 		goto finish;
306 
307 	/* We believe that this chunk is OK to add to the packet */
308 	switch (chunk->chunk_hdr->type) {
309 	case SCTP_CID_DATA:
310 		/* Account for the data being in the packet */
311 		sctp_packet_append_data(packet, chunk);
312 		/* Disallow SACK bundling after DATA. */
313 		packet->has_sack = 1;
314 		/* Disallow AUTH bundling after DATA */
315 		packet->has_auth = 1;
316 		/* Let it be knows that packet has DATA in it */
317 		packet->has_data = 1;
318 		/* timestamp the chunk for rtx purposes */
319 		chunk->sent_at = jiffies;
320 		/* Mainly used for prsctp RTX policy */
321 		chunk->sent_count++;
322 		break;
323 	case SCTP_CID_COOKIE_ECHO:
324 		packet->has_cookie_echo = 1;
325 		break;
326 
327 	case SCTP_CID_SACK:
328 		packet->has_sack = 1;
329 		if (chunk->asoc)
330 			chunk->asoc->stats.osacks++;
331 		break;
332 
333 	case SCTP_CID_AUTH:
334 		packet->has_auth = 1;
335 		packet->auth = chunk;
336 		break;
337 	}
338 
339 	/* It is OK to send this chunk.  */
340 	list_add_tail(&chunk->list, &packet->chunk_list);
341 	packet->size += chunk_len;
342 	chunk->transport = packet->transport;
343 finish:
344 	return retval;
345 }
346 
347 /* Append a chunk to the offered packet reporting back any inability to do
348  * so.
349  */
350 sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
351 				     struct sctp_chunk *chunk)
352 {
353 	sctp_xmit_t retval = SCTP_XMIT_OK;
354 
355 	pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
356 
357 	/* Data chunks are special.  Before seeing what else we can
358 	 * bundle into this packet, check to see if we are allowed to
359 	 * send this DATA.
360 	 */
361 	if (sctp_chunk_is_data(chunk)) {
362 		retval = sctp_packet_can_append_data(packet, chunk);
363 		if (retval != SCTP_XMIT_OK)
364 			goto finish;
365 	}
366 
367 	/* Try to bundle AUTH chunk */
368 	retval = sctp_packet_bundle_auth(packet, chunk);
369 	if (retval != SCTP_XMIT_OK)
370 		goto finish;
371 
372 	/* Try to bundle SACK chunk */
373 	retval = sctp_packet_bundle_sack(packet, chunk);
374 	if (retval != SCTP_XMIT_OK)
375 		goto finish;
376 
377 	retval = __sctp_packet_append_chunk(packet, chunk);
378 
379 finish:
380 	return retval;
381 }
382 
383 static void sctp_packet_release_owner(struct sk_buff *skb)
384 {
385 	sk_free(skb->sk);
386 }
387 
388 static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
389 {
390 	skb_orphan(skb);
391 	skb->sk = sk;
392 	skb->destructor = sctp_packet_release_owner;
393 
394 	/*
395 	 * The data chunks have already been accounted for in sctp_sendmsg(),
396 	 * therefore only reserve a single byte to keep socket around until
397 	 * the packet has been transmitted.
398 	 */
399 	atomic_inc(&sk->sk_wmem_alloc);
400 }
401 
402 static int sctp_packet_pack(struct sctp_packet *packet,
403 			    struct sk_buff *head, int gso, gfp_t gfp)
404 {
405 	struct sctp_transport *tp = packet->transport;
406 	struct sctp_auth_chunk *auth = NULL;
407 	struct sctp_chunk *chunk, *tmp;
408 	int pkt_count = 0, pkt_size;
409 	struct sock *sk = head->sk;
410 	struct sk_buff *nskb;
411 	int auth_len = 0;
412 
413 	if (gso) {
414 		skb_shinfo(head)->gso_type = sk->sk_gso_type;
415 		NAPI_GRO_CB(head)->last = head;
416 	} else {
417 		nskb = head;
418 		pkt_size = packet->size;
419 		goto merge;
420 	}
421 
422 	do {
423 		/* calculate the pkt_size and alloc nskb */
424 		pkt_size = packet->overhead;
425 		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
426 					 list) {
427 			int padded = SCTP_PAD4(chunk->skb->len);
428 
429 			if (chunk == packet->auth)
430 				auth_len = padded;
431 			else if (auth_len + padded + packet->overhead >
432 				 tp->pathmtu)
433 				return 0;
434 			else if (pkt_size + padded > tp->pathmtu)
435 				break;
436 			pkt_size += padded;
437 		}
438 		nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
439 		if (!nskb)
440 			return 0;
441 		skb_reserve(nskb, packet->overhead + MAX_HEADER);
442 
443 merge:
444 		/* merge chunks into nskb and append nskb into head list */
445 		pkt_size -= packet->overhead;
446 		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
447 			int padding;
448 
449 			list_del_init(&chunk->list);
450 			if (sctp_chunk_is_data(chunk)) {
451 				if (!sctp_chunk_retransmitted(chunk) &&
452 				    !tp->rto_pending) {
453 					chunk->rtt_in_progress = 1;
454 					tp->rto_pending = 1;
455 				}
456 			}
457 
458 			padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
459 			if (padding)
460 				memset(skb_put(chunk->skb, padding), 0, padding);
461 
462 			if (chunk == packet->auth)
463 				auth = (struct sctp_auth_chunk *)
464 							skb_tail_pointer(nskb);
465 
466 			memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data,
467 			       chunk->skb->len);
468 
469 			pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
470 				 chunk,
471 				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
472 				 chunk->has_tsn ? "TSN" : "No TSN",
473 				 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
474 				 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
475 				 chunk->rtt_in_progress);
476 
477 			pkt_size -= SCTP_PAD4(chunk->skb->len);
478 
479 			if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
480 				sctp_chunk_free(chunk);
481 
482 			if (!pkt_size)
483 				break;
484 		}
485 
486 		if (auth) {
487 			sctp_auth_calculate_hmac(tp->asoc, nskb, auth, gfp);
488 			/* free auth if no more chunks, or add it back */
489 			if (list_empty(&packet->chunk_list))
490 				sctp_chunk_free(packet->auth);
491 			else
492 				list_add(&packet->auth->list,
493 					 &packet->chunk_list);
494 		}
495 
496 		if (gso) {
497 			if (skb_gro_receive(&head, nskb)) {
498 				kfree_skb(nskb);
499 				return 0;
500 			}
501 			if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
502 					 sk->sk_gso_max_segs))
503 				return 0;
504 		}
505 
506 		pkt_count++;
507 	} while (!list_empty(&packet->chunk_list));
508 
509 	if (gso) {
510 		memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
511 					sizeof(struct inet6_skb_parm)));
512 		skb_shinfo(head)->gso_segs = pkt_count;
513 		skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
514 		rcu_read_lock();
515 		if (skb_dst(head) != tp->dst) {
516 			dst_hold(tp->dst);
517 			sk_setup_caps(sk, tp->dst);
518 		}
519 		rcu_read_unlock();
520 		goto chksum;
521 	}
522 
523 	if (sctp_checksum_disable)
524 		return 1;
525 
526 	if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) ||
527 	    dst_xfrm(skb_dst(head)) || packet->ipfragok) {
528 		struct sctphdr *sh =
529 			(struct sctphdr *)skb_transport_header(head);
530 
531 		sh->checksum = sctp_compute_cksum(head, 0);
532 	} else {
533 chksum:
534 		head->ip_summed = CHECKSUM_PARTIAL;
535 		head->csum_start = skb_transport_header(head) - head->head;
536 		head->csum_offset = offsetof(struct sctphdr, checksum);
537 	}
538 
539 	return pkt_count;
540 }
541 
542 /* All packets are sent to the network through this function from
543  * sctp_outq_tail().
544  *
545  * The return value is always 0 for now.
546  */
547 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
548 {
549 	struct sctp_transport *tp = packet->transport;
550 	struct sctp_association *asoc = tp->asoc;
551 	struct sctp_chunk *chunk, *tmp;
552 	int pkt_count, gso = 0;
553 	struct dst_entry *dst;
554 	struct sk_buff *head;
555 	struct sctphdr *sh;
556 	struct sock *sk;
557 
558 	pr_debug("%s: packet:%p\n", __func__, packet);
559 	if (list_empty(&packet->chunk_list))
560 		return 0;
561 	chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
562 	sk = chunk->skb->sk;
563 
564 	/* check gso */
565 	if (packet->size > tp->pathmtu && !packet->ipfragok) {
566 		if (!sk_can_gso(sk)) {
567 			pr_err_once("Trying to GSO but underlying device doesn't support it.");
568 			goto out;
569 		}
570 		gso = 1;
571 	}
572 
573 	/* alloc head skb */
574 	head = alloc_skb((gso ? packet->overhead : packet->size) +
575 			 MAX_HEADER, gfp);
576 	if (!head)
577 		goto out;
578 	skb_reserve(head, packet->overhead + MAX_HEADER);
579 	sctp_packet_set_owner_w(head, sk);
580 
581 	/* set sctp header */
582 	sh = (struct sctphdr *)skb_push(head, sizeof(struct sctphdr));
583 	skb_reset_transport_header(head);
584 	sh->source = htons(packet->source_port);
585 	sh->dest = htons(packet->destination_port);
586 	sh->vtag = htonl(packet->vtag);
587 	sh->checksum = 0;
588 
589 	/* update dst if in need */
590 	if (!sctp_transport_dst_check(tp)) {
591 		sctp_transport_route(tp, NULL, sctp_sk(sk));
592 		if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
593 			sctp_assoc_sync_pmtu(sk, asoc);
594 	}
595 	dst = dst_clone(tp->dst);
596 	if (!dst) {
597 		IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
598 		kfree_skb(head);
599 		goto out;
600 	}
601 	skb_dst_set(head, dst);
602 
603 	/* pack up chunks */
604 	pkt_count = sctp_packet_pack(packet, head, gso, gfp);
605 	if (!pkt_count) {
606 		kfree_skb(head);
607 		goto out;
608 	}
609 	pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
610 
611 	/* start autoclose timer */
612 	if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
613 	    asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
614 		struct timer_list *timer =
615 			&asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
616 		unsigned long timeout =
617 			asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
618 
619 		if (!mod_timer(timer, jiffies + timeout))
620 			sctp_association_hold(asoc);
621 	}
622 
623 	/* sctp xmit */
624 	tp->af_specific->ecn_capable(sk);
625 	if (asoc) {
626 		asoc->stats.opackets += pkt_count;
627 		if (asoc->peer.last_sent_to != tp)
628 			asoc->peer.last_sent_to = tp;
629 	}
630 	head->ignore_df = packet->ipfragok;
631 	tp->af_specific->sctp_xmit(head, tp);
632 
633 out:
634 	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
635 		list_del_init(&chunk->list);
636 		if (!sctp_chunk_is_data(chunk))
637 			sctp_chunk_free(chunk);
638 	}
639 	sctp_packet_reset(packet);
640 	return 0;
641 }
642 
643 /********************************************************************
644  * 2nd Level Abstractions
645  ********************************************************************/
646 
647 /* This private function check to see if a chunk can be added */
648 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
649 					   struct sctp_chunk *chunk)
650 {
651 	size_t datasize, rwnd, inflight, flight_size;
652 	struct sctp_transport *transport = packet->transport;
653 	struct sctp_association *asoc = transport->asoc;
654 	struct sctp_outq *q = &asoc->outqueue;
655 
656 	/* RFC 2960 6.1  Transmission of DATA Chunks
657 	 *
658 	 * A) At any given time, the data sender MUST NOT transmit new data to
659 	 * any destination transport address if its peer's rwnd indicates
660 	 * that the peer has no buffer space (i.e. rwnd is 0, see Section
661 	 * 6.2.1).  However, regardless of the value of rwnd (including if it
662 	 * is 0), the data sender can always have one DATA chunk in flight to
663 	 * the receiver if allowed by cwnd (see rule B below).  This rule
664 	 * allows the sender to probe for a change in rwnd that the sender
665 	 * missed due to the SACK having been lost in transit from the data
666 	 * receiver to the data sender.
667 	 */
668 
669 	rwnd = asoc->peer.rwnd;
670 	inflight = q->outstanding_bytes;
671 	flight_size = transport->flight_size;
672 
673 	datasize = sctp_data_size(chunk);
674 
675 	if (datasize > rwnd && inflight > 0)
676 		/* We have (at least) one data chunk in flight,
677 		 * so we can't fall back to rule 6.1 B).
678 		 */
679 		return SCTP_XMIT_RWND_FULL;
680 
681 	/* RFC 2960 6.1  Transmission of DATA Chunks
682 	 *
683 	 * B) At any given time, the sender MUST NOT transmit new data
684 	 * to a given transport address if it has cwnd or more bytes
685 	 * of data outstanding to that transport address.
686 	 */
687 	/* RFC 7.2.4 & the Implementers Guide 2.8.
688 	 *
689 	 * 3) ...
690 	 *    When a Fast Retransmit is being performed the sender SHOULD
691 	 *    ignore the value of cwnd and SHOULD NOT delay retransmission.
692 	 */
693 	if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
694 	    flight_size >= transport->cwnd)
695 		return SCTP_XMIT_RWND_FULL;
696 
697 	/* Nagle's algorithm to solve small-packet problem:
698 	 * Inhibit the sending of new chunks when new outgoing data arrives
699 	 * if any previously transmitted data on the connection remains
700 	 * unacknowledged.
701 	 */
702 
703 	if (sctp_sk(asoc->base.sk)->nodelay)
704 		/* Nagle disabled */
705 		return SCTP_XMIT_OK;
706 
707 	if (!sctp_packet_empty(packet))
708 		/* Append to packet */
709 		return SCTP_XMIT_OK;
710 
711 	if (inflight == 0)
712 		/* Nothing unacked */
713 		return SCTP_XMIT_OK;
714 
715 	if (!sctp_state(asoc, ESTABLISHED))
716 		return SCTP_XMIT_OK;
717 
718 	/* Check whether this chunk and all the rest of pending data will fit
719 	 * or delay in hopes of bundling a full sized packet.
720 	 */
721 	if (chunk->skb->len + q->out_qlen >
722 		transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
723 		/* Enough data queued to fill a packet */
724 		return SCTP_XMIT_OK;
725 
726 	/* Don't delay large message writes that may have been fragmented */
727 	if (!chunk->msg->can_delay)
728 		return SCTP_XMIT_OK;
729 
730 	/* Defer until all data acked or packet full */
731 	return SCTP_XMIT_DELAY;
732 }
733 
734 /* This private function does management things when adding DATA chunk */
735 static void sctp_packet_append_data(struct sctp_packet *packet,
736 				struct sctp_chunk *chunk)
737 {
738 	struct sctp_transport *transport = packet->transport;
739 	size_t datasize = sctp_data_size(chunk);
740 	struct sctp_association *asoc = transport->asoc;
741 	u32 rwnd = asoc->peer.rwnd;
742 
743 	/* Keep track of how many bytes are in flight over this transport. */
744 	transport->flight_size += datasize;
745 
746 	/* Keep track of how many bytes are in flight to the receiver. */
747 	asoc->outqueue.outstanding_bytes += datasize;
748 
749 	/* Update our view of the receiver's rwnd. */
750 	if (datasize < rwnd)
751 		rwnd -= datasize;
752 	else
753 		rwnd = 0;
754 
755 	asoc->peer.rwnd = rwnd;
756 	sctp_chunk_assign_tsn(chunk);
757 	sctp_chunk_assign_ssn(chunk);
758 }
759 
760 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
761 					struct sctp_chunk *chunk,
762 					u16 chunk_len)
763 {
764 	size_t psize, pmtu, maxsize;
765 	sctp_xmit_t retval = SCTP_XMIT_OK;
766 
767 	psize = packet->size;
768 	if (packet->transport->asoc)
769 		pmtu = packet->transport->asoc->pathmtu;
770 	else
771 		pmtu = packet->transport->pathmtu;
772 
773 	/* Decide if we need to fragment or resubmit later. */
774 	if (psize + chunk_len > pmtu) {
775 		/* It's OK to fragment at IP level if any one of the following
776 		 * is true:
777 		 *	1. The packet is empty (meaning this chunk is greater
778 		 *	   the MTU)
779 		 *	2. The packet doesn't have any data in it yet and data
780 		 *	   requires authentication.
781 		 */
782 		if (sctp_packet_empty(packet) ||
783 		    (!packet->has_data && chunk->auth)) {
784 			/* We no longer do re-fragmentation.
785 			 * Just fragment at the IP layer, if we
786 			 * actually hit this condition
787 			 */
788 			packet->ipfragok = 1;
789 			goto out;
790 		}
791 
792 		/* Similarly, if this chunk was built before a PMTU
793 		 * reduction, we have to fragment it at IP level now. So
794 		 * if the packet already contains something, we need to
795 		 * flush.
796 		 */
797 		maxsize = pmtu - packet->overhead;
798 		if (packet->auth)
799 			maxsize -= SCTP_PAD4(packet->auth->skb->len);
800 		if (chunk_len > maxsize)
801 			retval = SCTP_XMIT_PMTU_FULL;
802 
803 		/* It is also okay to fragment if the chunk we are
804 		 * adding is a control chunk, but only if current packet
805 		 * is not a GSO one otherwise it causes fragmentation of
806 		 * a large frame. So in this case we allow the
807 		 * fragmentation by forcing it to be in a new packet.
808 		 */
809 		if (!sctp_chunk_is_data(chunk) && packet->has_data)
810 			retval = SCTP_XMIT_PMTU_FULL;
811 
812 		if (psize + chunk_len > packet->max_size)
813 			/* Hit GSO/PMTU limit, gotta flush */
814 			retval = SCTP_XMIT_PMTU_FULL;
815 
816 		if (!packet->transport->burst_limited &&
817 		    psize + chunk_len > (packet->transport->cwnd >> 1))
818 			/* Do not allow a single GSO packet to use more
819 			 * than half of cwnd.
820 			 */
821 			retval = SCTP_XMIT_PMTU_FULL;
822 
823 		if (packet->transport->burst_limited &&
824 		    psize + chunk_len > (packet->transport->burst_limited >> 1))
825 			/* Do not allow a single GSO packet to use more
826 			 * than half of original cwnd.
827 			 */
828 			retval = SCTP_XMIT_PMTU_FULL;
829 		/* Otherwise it will fit in the GSO packet */
830 	}
831 
832 out:
833 	return retval;
834 }
835