xref: /openbmc/linux/net/sctp/output.c (revision 9ae05fd1)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  *
6  * This file is part of the SCTP kernel implementation
7  *
8  * These functions handle output processing.
9  *
10  * This SCTP implementation is free software;
11  * you can redistribute it and/or modify it under the terms of
12  * the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This SCTP implementation is distributed in the hope that it
17  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18  *                 ************************
19  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20  * See the GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with GNU CC; see the file COPYING.  If not, see
24  * <http://www.gnu.org/licenses/>.
25  *
26  * Please send any bug reports or fixes you make to the
27  * email address(es):
28  *    lksctp developers <linux-sctp@vger.kernel.org>
29  *
30  * Written or modified by:
31  *    La Monte H.P. Yarroll <piggy@acm.org>
32  *    Karl Knutson          <karl@athena.chicago.il.us>
33  *    Jon Grimm             <jgrimm@austin.ibm.com>
34  *    Sridhar Samudrala     <sri@us.ibm.com>
35  */
36 
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 
39 #include <linux/types.h>
40 #include <linux/kernel.h>
41 #include <linux/wait.h>
42 #include <linux/time.h>
43 #include <linux/ip.h>
44 #include <linux/ipv6.h>
45 #include <linux/init.h>
46 #include <linux/slab.h>
47 #include <net/inet_ecn.h>
48 #include <net/ip.h>
49 #include <net/icmp.h>
50 #include <net/net_namespace.h>
51 
52 #include <linux/socket.h> /* for sa_family_t */
53 #include <net/sock.h>
54 
55 #include <net/sctp/sctp.h>
56 #include <net/sctp/sm.h>
57 #include <net/sctp/checksum.h>
58 
59 /* Forward declarations for private helpers. */
60 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
61 					      struct sctp_chunk *chunk);
62 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
63 					   struct sctp_chunk *chunk);
64 static void sctp_packet_append_data(struct sctp_packet *packet,
65 					   struct sctp_chunk *chunk);
66 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
67 					struct sctp_chunk *chunk,
68 					u16 chunk_len);
69 
70 static void sctp_packet_reset(struct sctp_packet *packet)
71 {
72 	packet->size = packet->overhead;
73 	packet->has_cookie_echo = 0;
74 	packet->has_sack = 0;
75 	packet->has_data = 0;
76 	packet->has_auth = 0;
77 	packet->ipfragok = 0;
78 	packet->auth = NULL;
79 }
80 
81 /* Config a packet.
82  * This appears to be a followup set of initializations.
83  */
84 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
85 			int ecn_capable)
86 {
87 	struct sctp_transport *tp = packet->transport;
88 	struct sctp_association *asoc = tp->asoc;
89 
90 	pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
91 
92 	packet->vtag = vtag;
93 
94 	if (asoc && tp->dst) {
95 		struct sock *sk = asoc->base.sk;
96 
97 		rcu_read_lock();
98 		if (__sk_dst_get(sk) != tp->dst) {
99 			dst_hold(tp->dst);
100 			sk_setup_caps(sk, tp->dst);
101 		}
102 
103 		if (sk_can_gso(sk)) {
104 			struct net_device *dev = tp->dst->dev;
105 
106 			packet->max_size = dev->gso_max_size;
107 		} else {
108 			packet->max_size = asoc->pathmtu;
109 		}
110 		rcu_read_unlock();
111 
112 	} else {
113 		packet->max_size = tp->pathmtu;
114 	}
115 
116 	if (ecn_capable && sctp_packet_empty(packet)) {
117 		struct sctp_chunk *chunk;
118 
119 		/* If there a is a prepend chunk stick it on the list before
120 		 * any other chunks get appended.
121 		 */
122 		chunk = sctp_get_ecne_prepend(asoc);
123 		if (chunk)
124 			sctp_packet_append_chunk(packet, chunk);
125 	}
126 }
127 
128 /* Initialize the packet structure. */
129 void sctp_packet_init(struct sctp_packet *packet,
130 		      struct sctp_transport *transport,
131 		      __u16 sport, __u16 dport)
132 {
133 	struct sctp_association *asoc = transport->asoc;
134 	size_t overhead;
135 
136 	pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
137 
138 	packet->transport = transport;
139 	packet->source_port = sport;
140 	packet->destination_port = dport;
141 	INIT_LIST_HEAD(&packet->chunk_list);
142 	if (asoc) {
143 		struct sctp_sock *sp = sctp_sk(asoc->base.sk);
144 		overhead = sp->pf->af->net_header_len;
145 	} else {
146 		overhead = sizeof(struct ipv6hdr);
147 	}
148 	overhead += sizeof(struct sctphdr);
149 	packet->overhead = overhead;
150 	sctp_packet_reset(packet);
151 	packet->vtag = 0;
152 }
153 
154 /* Free a packet.  */
155 void sctp_packet_free(struct sctp_packet *packet)
156 {
157 	struct sctp_chunk *chunk, *tmp;
158 
159 	pr_debug("%s: packet:%p\n", __func__, packet);
160 
161 	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
162 		list_del_init(&chunk->list);
163 		sctp_chunk_free(chunk);
164 	}
165 }
166 
167 /* This routine tries to append the chunk to the offered packet. If adding
168  * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
169  * is not present in the packet, it transmits the input packet.
170  * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
171  * as it can fit in the packet, but any more data that does not fit in this
172  * packet can be sent only after receiving the COOKIE_ACK.
173  */
174 sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
175 				       struct sctp_chunk *chunk,
176 				       int one_packet, gfp_t gfp)
177 {
178 	sctp_xmit_t retval;
179 
180 	pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
181 		 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
182 
183 	switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
184 	case SCTP_XMIT_PMTU_FULL:
185 		if (!packet->has_cookie_echo) {
186 			int error = 0;
187 
188 			error = sctp_packet_transmit(packet, gfp);
189 			if (error < 0)
190 				chunk->skb->sk->sk_err = -error;
191 
192 			/* If we have an empty packet, then we can NOT ever
193 			 * return PMTU_FULL.
194 			 */
195 			if (!one_packet)
196 				retval = sctp_packet_append_chunk(packet,
197 								  chunk);
198 		}
199 		break;
200 
201 	case SCTP_XMIT_RWND_FULL:
202 	case SCTP_XMIT_OK:
203 	case SCTP_XMIT_DELAY:
204 		break;
205 	}
206 
207 	return retval;
208 }
209 
210 /* Try to bundle an auth chunk into the packet. */
211 static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
212 					   struct sctp_chunk *chunk)
213 {
214 	struct sctp_association *asoc = pkt->transport->asoc;
215 	struct sctp_chunk *auth;
216 	sctp_xmit_t retval = SCTP_XMIT_OK;
217 
218 	/* if we don't have an association, we can't do authentication */
219 	if (!asoc)
220 		return retval;
221 
222 	/* See if this is an auth chunk we are bundling or if
223 	 * auth is already bundled.
224 	 */
225 	if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
226 		return retval;
227 
228 	/* if the peer did not request this chunk to be authenticated,
229 	 * don't do it
230 	 */
231 	if (!chunk->auth)
232 		return retval;
233 
234 	auth = sctp_make_auth(asoc);
235 	if (!auth)
236 		return retval;
237 
238 	retval = __sctp_packet_append_chunk(pkt, auth);
239 
240 	if (retval != SCTP_XMIT_OK)
241 		sctp_chunk_free(auth);
242 
243 	return retval;
244 }
245 
246 /* Try to bundle a SACK with the packet. */
247 static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
248 					   struct sctp_chunk *chunk)
249 {
250 	sctp_xmit_t retval = SCTP_XMIT_OK;
251 
252 	/* If sending DATA and haven't aleady bundled a SACK, try to
253 	 * bundle one in to the packet.
254 	 */
255 	if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
256 	    !pkt->has_cookie_echo) {
257 		struct sctp_association *asoc;
258 		struct timer_list *timer;
259 		asoc = pkt->transport->asoc;
260 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
261 
262 		/* If the SACK timer is running, we have a pending SACK */
263 		if (timer_pending(timer)) {
264 			struct sctp_chunk *sack;
265 
266 			if (pkt->transport->sack_generation !=
267 			    pkt->transport->asoc->peer.sack_generation)
268 				return retval;
269 
270 			asoc->a_rwnd = asoc->rwnd;
271 			sack = sctp_make_sack(asoc);
272 			if (sack) {
273 				retval = __sctp_packet_append_chunk(pkt, sack);
274 				if (retval != SCTP_XMIT_OK) {
275 					sctp_chunk_free(sack);
276 					goto out;
277 				}
278 				asoc->peer.sack_needed = 0;
279 				if (del_timer(timer))
280 					sctp_association_put(asoc);
281 			}
282 		}
283 	}
284 out:
285 	return retval;
286 }
287 
288 
289 /* Append a chunk to the offered packet reporting back any inability to do
290  * so.
291  */
292 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
293 					      struct sctp_chunk *chunk)
294 {
295 	sctp_xmit_t retval = SCTP_XMIT_OK;
296 	__u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
297 
298 	/* Check to see if this chunk will fit into the packet */
299 	retval = sctp_packet_will_fit(packet, chunk, chunk_len);
300 	if (retval != SCTP_XMIT_OK)
301 		goto finish;
302 
303 	/* We believe that this chunk is OK to add to the packet */
304 	switch (chunk->chunk_hdr->type) {
305 	case SCTP_CID_DATA:
306 		/* Account for the data being in the packet */
307 		sctp_packet_append_data(packet, chunk);
308 		/* Disallow SACK bundling after DATA. */
309 		packet->has_sack = 1;
310 		/* Disallow AUTH bundling after DATA */
311 		packet->has_auth = 1;
312 		/* Let it be knows that packet has DATA in it */
313 		packet->has_data = 1;
314 		/* timestamp the chunk for rtx purposes */
315 		chunk->sent_at = jiffies;
316 		/* Mainly used for prsctp RTX policy */
317 		chunk->sent_count++;
318 		break;
319 	case SCTP_CID_COOKIE_ECHO:
320 		packet->has_cookie_echo = 1;
321 		break;
322 
323 	case SCTP_CID_SACK:
324 		packet->has_sack = 1;
325 		if (chunk->asoc)
326 			chunk->asoc->stats.osacks++;
327 		break;
328 
329 	case SCTP_CID_AUTH:
330 		packet->has_auth = 1;
331 		packet->auth = chunk;
332 		break;
333 	}
334 
335 	/* It is OK to send this chunk.  */
336 	list_add_tail(&chunk->list, &packet->chunk_list);
337 	packet->size += chunk_len;
338 	chunk->transport = packet->transport;
339 finish:
340 	return retval;
341 }
342 
343 /* Append a chunk to the offered packet reporting back any inability to do
344  * so.
345  */
346 sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
347 				     struct sctp_chunk *chunk)
348 {
349 	sctp_xmit_t retval = SCTP_XMIT_OK;
350 
351 	pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
352 
353 	/* Data chunks are special.  Before seeing what else we can
354 	 * bundle into this packet, check to see if we are allowed to
355 	 * send this DATA.
356 	 */
357 	if (sctp_chunk_is_data(chunk)) {
358 		retval = sctp_packet_can_append_data(packet, chunk);
359 		if (retval != SCTP_XMIT_OK)
360 			goto finish;
361 	}
362 
363 	/* Try to bundle AUTH chunk */
364 	retval = sctp_packet_bundle_auth(packet, chunk);
365 	if (retval != SCTP_XMIT_OK)
366 		goto finish;
367 
368 	/* Try to bundle SACK chunk */
369 	retval = sctp_packet_bundle_sack(packet, chunk);
370 	if (retval != SCTP_XMIT_OK)
371 		goto finish;
372 
373 	retval = __sctp_packet_append_chunk(packet, chunk);
374 
375 finish:
376 	return retval;
377 }
378 
379 static void sctp_packet_release_owner(struct sk_buff *skb)
380 {
381 	sk_free(skb->sk);
382 }
383 
384 static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
385 {
386 	skb_orphan(skb);
387 	skb->sk = sk;
388 	skb->destructor = sctp_packet_release_owner;
389 
390 	/*
391 	 * The data chunks have already been accounted for in sctp_sendmsg(),
392 	 * therefore only reserve a single byte to keep socket around until
393 	 * the packet has been transmitted.
394 	 */
395 	atomic_inc(&sk->sk_wmem_alloc);
396 }
397 
398 static int sctp_packet_pack(struct sctp_packet *packet,
399 			    struct sk_buff *head, int gso, gfp_t gfp)
400 {
401 	struct sctp_transport *tp = packet->transport;
402 	struct sctp_auth_chunk *auth = NULL;
403 	struct sctp_chunk *chunk, *tmp;
404 	int pkt_count = 0, pkt_size;
405 	struct sock *sk = head->sk;
406 	struct sk_buff *nskb;
407 	int auth_len = 0;
408 
409 	if (gso) {
410 		skb_shinfo(head)->gso_type = sk->sk_gso_type;
411 		NAPI_GRO_CB(head)->last = head;
412 	} else {
413 		nskb = head;
414 		pkt_size = packet->size;
415 		goto merge;
416 	}
417 
418 	do {
419 		/* calculate the pkt_size and alloc nskb */
420 		pkt_size = packet->overhead;
421 		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
422 					 list) {
423 			int padded = SCTP_PAD4(chunk->skb->len);
424 
425 			if (chunk == packet->auth)
426 				auth_len = padded;
427 			else if (auth_len + padded + packet->overhead >
428 				 tp->pathmtu)
429 				return 0;
430 			else if (pkt_size + padded > tp->pathmtu)
431 				break;
432 			pkt_size += padded;
433 		}
434 		nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
435 		if (!nskb)
436 			return 0;
437 		skb_reserve(nskb, packet->overhead + MAX_HEADER);
438 
439 merge:
440 		/* merge chunks into nskb and append nskb into head list */
441 		pkt_size -= packet->overhead;
442 		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
443 			int padding;
444 
445 			list_del_init(&chunk->list);
446 			if (sctp_chunk_is_data(chunk)) {
447 				if (!sctp_chunk_retransmitted(chunk) &&
448 				    !tp->rto_pending) {
449 					chunk->rtt_in_progress = 1;
450 					tp->rto_pending = 1;
451 				}
452 			}
453 
454 			padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
455 			if (padding)
456 				memset(skb_put(chunk->skb, padding), 0, padding);
457 
458 			if (chunk == packet->auth)
459 				auth = (struct sctp_auth_chunk *)
460 							skb_tail_pointer(nskb);
461 
462 			memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data,
463 			       chunk->skb->len);
464 
465 			pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
466 				 chunk,
467 				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
468 				 chunk->has_tsn ? "TSN" : "No TSN",
469 				 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
470 				 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
471 				 chunk->rtt_in_progress);
472 
473 			pkt_size -= SCTP_PAD4(chunk->skb->len);
474 
475 			if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
476 				sctp_chunk_free(chunk);
477 
478 			if (!pkt_size)
479 				break;
480 		}
481 
482 		if (auth) {
483 			sctp_auth_calculate_hmac(tp->asoc, nskb, auth, gfp);
484 			/* free auth if no more chunks, or add it back */
485 			if (list_empty(&packet->chunk_list))
486 				sctp_chunk_free(packet->auth);
487 			else
488 				list_add(&packet->auth->list,
489 					 &packet->chunk_list);
490 		}
491 
492 		if (gso) {
493 			if (skb_gro_receive(&head, nskb)) {
494 				kfree_skb(nskb);
495 				return 0;
496 			}
497 			if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
498 					 sk->sk_gso_max_segs))
499 				return 0;
500 		}
501 
502 		pkt_count++;
503 	} while (!list_empty(&packet->chunk_list));
504 
505 	if (gso) {
506 		memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
507 					sizeof(struct inet6_skb_parm)));
508 		skb_shinfo(head)->gso_segs = pkt_count;
509 		skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
510 		rcu_read_lock();
511 		if (skb_dst(head) != tp->dst) {
512 			dst_hold(tp->dst);
513 			sk_setup_caps(sk, tp->dst);
514 		}
515 		rcu_read_unlock();
516 		goto chksum;
517 	}
518 
519 	if (sctp_checksum_disable)
520 		return 1;
521 
522 	if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) ||
523 	    dst_xfrm(skb_dst(head)) || packet->ipfragok) {
524 		struct sctphdr *sh =
525 			(struct sctphdr *)skb_transport_header(head);
526 
527 		sh->checksum = sctp_compute_cksum(head, 0);
528 	} else {
529 chksum:
530 		head->ip_summed = CHECKSUM_PARTIAL;
531 		head->csum_start = skb_transport_header(head) - head->head;
532 		head->csum_offset = offsetof(struct sctphdr, checksum);
533 	}
534 
535 	return pkt_count;
536 }
537 
538 /* All packets are sent to the network through this function from
539  * sctp_outq_tail().
540  *
541  * The return value is always 0 for now.
542  */
543 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
544 {
545 	struct sctp_transport *tp = packet->transport;
546 	struct sctp_association *asoc = tp->asoc;
547 	struct sctp_chunk *chunk, *tmp;
548 	int pkt_count, gso = 0;
549 	struct dst_entry *dst;
550 	struct sk_buff *head;
551 	struct sctphdr *sh;
552 	struct sock *sk;
553 
554 	pr_debug("%s: packet:%p\n", __func__, packet);
555 	if (list_empty(&packet->chunk_list))
556 		return 0;
557 	chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
558 	sk = chunk->skb->sk;
559 
560 	/* check gso */
561 	if (packet->size > tp->pathmtu && !packet->ipfragok) {
562 		if (!sk_can_gso(sk)) {
563 			pr_err_once("Trying to GSO but underlying device doesn't support it.");
564 			goto out;
565 		}
566 		gso = 1;
567 	}
568 
569 	/* alloc head skb */
570 	head = alloc_skb((gso ? packet->overhead : packet->size) +
571 			 MAX_HEADER, gfp);
572 	if (!head)
573 		goto out;
574 	skb_reserve(head, packet->overhead + MAX_HEADER);
575 	sctp_packet_set_owner_w(head, sk);
576 
577 	/* set sctp header */
578 	sh = (struct sctphdr *)skb_push(head, sizeof(struct sctphdr));
579 	skb_reset_transport_header(head);
580 	sh->source = htons(packet->source_port);
581 	sh->dest = htons(packet->destination_port);
582 	sh->vtag = htonl(packet->vtag);
583 	sh->checksum = 0;
584 
585 	/* update dst if in need */
586 	if (!sctp_transport_dst_check(tp)) {
587 		sctp_transport_route(tp, NULL, sctp_sk(sk));
588 		if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
589 			sctp_assoc_sync_pmtu(sk, asoc);
590 	}
591 	dst = dst_clone(tp->dst);
592 	if (!dst) {
593 		IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
594 		kfree_skb(head);
595 		goto out;
596 	}
597 	skb_dst_set(head, dst);
598 
599 	/* pack up chunks */
600 	pkt_count = sctp_packet_pack(packet, head, gso, gfp);
601 	if (!pkt_count) {
602 		kfree_skb(head);
603 		goto out;
604 	}
605 	pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
606 
607 	/* start autoclose timer */
608 	if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
609 	    asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
610 		struct timer_list *timer =
611 			&asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
612 		unsigned long timeout =
613 			asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
614 
615 		if (!mod_timer(timer, jiffies + timeout))
616 			sctp_association_hold(asoc);
617 	}
618 
619 	/* sctp xmit */
620 	tp->af_specific->ecn_capable(sk);
621 	if (asoc) {
622 		asoc->stats.opackets += pkt_count;
623 		if (asoc->peer.last_sent_to != tp)
624 			asoc->peer.last_sent_to = tp;
625 	}
626 	head->ignore_df = packet->ipfragok;
627 	if (tp->dst_pending_confirm)
628 		skb_set_dst_pending_confirm(head, 1);
629 	/* neighbour should be confirmed on successful transmission or
630 	 * positive error
631 	 */
632 	if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
633 	    tp->dst_pending_confirm)
634 		tp->dst_pending_confirm = 0;
635 
636 out:
637 	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
638 		list_del_init(&chunk->list);
639 		if (!sctp_chunk_is_data(chunk))
640 			sctp_chunk_free(chunk);
641 	}
642 	sctp_packet_reset(packet);
643 	return 0;
644 }
645 
646 /********************************************************************
647  * 2nd Level Abstractions
648  ********************************************************************/
649 
650 /* This private function check to see if a chunk can be added */
651 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
652 					   struct sctp_chunk *chunk)
653 {
654 	size_t datasize, rwnd, inflight, flight_size;
655 	struct sctp_transport *transport = packet->transport;
656 	struct sctp_association *asoc = transport->asoc;
657 	struct sctp_outq *q = &asoc->outqueue;
658 
659 	/* RFC 2960 6.1  Transmission of DATA Chunks
660 	 *
661 	 * A) At any given time, the data sender MUST NOT transmit new data to
662 	 * any destination transport address if its peer's rwnd indicates
663 	 * that the peer has no buffer space (i.e. rwnd is 0, see Section
664 	 * 6.2.1).  However, regardless of the value of rwnd (including if it
665 	 * is 0), the data sender can always have one DATA chunk in flight to
666 	 * the receiver if allowed by cwnd (see rule B below).  This rule
667 	 * allows the sender to probe for a change in rwnd that the sender
668 	 * missed due to the SACK having been lost in transit from the data
669 	 * receiver to the data sender.
670 	 */
671 
672 	rwnd = asoc->peer.rwnd;
673 	inflight = q->outstanding_bytes;
674 	flight_size = transport->flight_size;
675 
676 	datasize = sctp_data_size(chunk);
677 
678 	if (datasize > rwnd && inflight > 0)
679 		/* We have (at least) one data chunk in flight,
680 		 * so we can't fall back to rule 6.1 B).
681 		 */
682 		return SCTP_XMIT_RWND_FULL;
683 
684 	/* RFC 2960 6.1  Transmission of DATA Chunks
685 	 *
686 	 * B) At any given time, the sender MUST NOT transmit new data
687 	 * to a given transport address if it has cwnd or more bytes
688 	 * of data outstanding to that transport address.
689 	 */
690 	/* RFC 7.2.4 & the Implementers Guide 2.8.
691 	 *
692 	 * 3) ...
693 	 *    When a Fast Retransmit is being performed the sender SHOULD
694 	 *    ignore the value of cwnd and SHOULD NOT delay retransmission.
695 	 */
696 	if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
697 	    flight_size >= transport->cwnd)
698 		return SCTP_XMIT_RWND_FULL;
699 
700 	/* Nagle's algorithm to solve small-packet problem:
701 	 * Inhibit the sending of new chunks when new outgoing data arrives
702 	 * if any previously transmitted data on the connection remains
703 	 * unacknowledged.
704 	 */
705 
706 	if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
707 	    !chunk->msg->force_delay)
708 		/* Nothing unacked */
709 		return SCTP_XMIT_OK;
710 
711 	if (!sctp_packet_empty(packet))
712 		/* Append to packet */
713 		return SCTP_XMIT_OK;
714 
715 	if (!sctp_state(asoc, ESTABLISHED))
716 		return SCTP_XMIT_OK;
717 
718 	/* Check whether this chunk and all the rest of pending data will fit
719 	 * or delay in hopes of bundling a full sized packet.
720 	 */
721 	if (chunk->skb->len + q->out_qlen >
722 		transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
723 		/* Enough data queued to fill a packet */
724 		return SCTP_XMIT_OK;
725 
726 	/* Don't delay large message writes that may have been fragmented */
727 	if (!chunk->msg->can_delay)
728 		return SCTP_XMIT_OK;
729 
730 	/* Defer until all data acked or packet full */
731 	return SCTP_XMIT_DELAY;
732 }
733 
734 /* This private function does management things when adding DATA chunk */
735 static void sctp_packet_append_data(struct sctp_packet *packet,
736 				struct sctp_chunk *chunk)
737 {
738 	struct sctp_transport *transport = packet->transport;
739 	size_t datasize = sctp_data_size(chunk);
740 	struct sctp_association *asoc = transport->asoc;
741 	u32 rwnd = asoc->peer.rwnd;
742 
743 	/* Keep track of how many bytes are in flight over this transport. */
744 	transport->flight_size += datasize;
745 
746 	/* Keep track of how many bytes are in flight to the receiver. */
747 	asoc->outqueue.outstanding_bytes += datasize;
748 
749 	/* Update our view of the receiver's rwnd. */
750 	if (datasize < rwnd)
751 		rwnd -= datasize;
752 	else
753 		rwnd = 0;
754 
755 	asoc->peer.rwnd = rwnd;
756 	sctp_chunk_assign_tsn(chunk);
757 	sctp_chunk_assign_ssn(chunk);
758 }
759 
760 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
761 					struct sctp_chunk *chunk,
762 					u16 chunk_len)
763 {
764 	size_t psize, pmtu, maxsize;
765 	sctp_xmit_t retval = SCTP_XMIT_OK;
766 
767 	psize = packet->size;
768 	if (packet->transport->asoc)
769 		pmtu = packet->transport->asoc->pathmtu;
770 	else
771 		pmtu = packet->transport->pathmtu;
772 
773 	/* Decide if we need to fragment or resubmit later. */
774 	if (psize + chunk_len > pmtu) {
775 		/* It's OK to fragment at IP level if any one of the following
776 		 * is true:
777 		 *	1. The packet is empty (meaning this chunk is greater
778 		 *	   the MTU)
779 		 *	2. The packet doesn't have any data in it yet and data
780 		 *	   requires authentication.
781 		 */
782 		if (sctp_packet_empty(packet) ||
783 		    (!packet->has_data && chunk->auth)) {
784 			/* We no longer do re-fragmentation.
785 			 * Just fragment at the IP layer, if we
786 			 * actually hit this condition
787 			 */
788 			packet->ipfragok = 1;
789 			goto out;
790 		}
791 
792 		/* Similarly, if this chunk was built before a PMTU
793 		 * reduction, we have to fragment it at IP level now. So
794 		 * if the packet already contains something, we need to
795 		 * flush.
796 		 */
797 		maxsize = pmtu - packet->overhead;
798 		if (packet->auth)
799 			maxsize -= SCTP_PAD4(packet->auth->skb->len);
800 		if (chunk_len > maxsize)
801 			retval = SCTP_XMIT_PMTU_FULL;
802 
803 		/* It is also okay to fragment if the chunk we are
804 		 * adding is a control chunk, but only if current packet
805 		 * is not a GSO one otherwise it causes fragmentation of
806 		 * a large frame. So in this case we allow the
807 		 * fragmentation by forcing it to be in a new packet.
808 		 */
809 		if (!sctp_chunk_is_data(chunk) && packet->has_data)
810 			retval = SCTP_XMIT_PMTU_FULL;
811 
812 		if (psize + chunk_len > packet->max_size)
813 			/* Hit GSO/PMTU limit, gotta flush */
814 			retval = SCTP_XMIT_PMTU_FULL;
815 
816 		if (!packet->transport->burst_limited &&
817 		    psize + chunk_len > (packet->transport->cwnd >> 1))
818 			/* Do not allow a single GSO packet to use more
819 			 * than half of cwnd.
820 			 */
821 			retval = SCTP_XMIT_PMTU_FULL;
822 
823 		if (packet->transport->burst_limited &&
824 		    psize + chunk_len > (packet->transport->burst_limited >> 1))
825 			/* Do not allow a single GSO packet to use more
826 			 * than half of original cwnd.
827 			 */
828 			retval = SCTP_XMIT_PMTU_FULL;
829 		/* Otherwise it will fit in the GSO packet */
830 	}
831 
832 out:
833 	return retval;
834 }
835