xref: /openbmc/linux/net/sctp/output.c (revision 39b6f3aa)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  *
6  * This file is part of the SCTP kernel implementation
7  *
8  * These functions handle output processing.
9  *
10  * This SCTP implementation is free software;
11  * you can redistribute it and/or modify it under the terms of
12  * the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This SCTP implementation is distributed in the hope that it
17  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18  *                 ************************
19  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20  * See the GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with GNU CC; see the file COPYING.  If not, write to
24  * the Free Software Foundation, 59 Temple Place - Suite 330,
25  * Boston, MA 02111-1307, USA.
26  *
27  * Please send any bug reports or fixes you make to the
28  * email address(es):
29  *    lksctp developers <lksctp-developers@lists.sourceforge.net>
30  *
31  * Or submit a bug report through the following website:
32  *    http://www.sf.net/projects/lksctp
33  *
34  * Written or modified by:
35  *    La Monte H.P. Yarroll <piggy@acm.org>
36  *    Karl Knutson          <karl@athena.chicago.il.us>
37  *    Jon Grimm             <jgrimm@austin.ibm.com>
38  *    Sridhar Samudrala     <sri@us.ibm.com>
39  *
40  * Any bugs reported given to us we will try to fix... any fixes shared will
41  * be incorporated into the next SCTP release.
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/types.h>
47 #include <linux/kernel.h>
48 #include <linux/wait.h>
49 #include <linux/time.h>
50 #include <linux/ip.h>
51 #include <linux/ipv6.h>
52 #include <linux/init.h>
53 #include <linux/slab.h>
54 #include <net/inet_ecn.h>
55 #include <net/ip.h>
56 #include <net/icmp.h>
57 #include <net/net_namespace.h>
58 
59 #include <linux/socket.h> /* for sa_family_t */
60 #include <net/sock.h>
61 
62 #include <net/sctp/sctp.h>
63 #include <net/sctp/sm.h>
64 #include <net/sctp/checksum.h>
65 
66 /* Forward declarations for private helpers. */
67 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
68 					      struct sctp_chunk *chunk);
69 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
70 					   struct sctp_chunk *chunk);
71 static void sctp_packet_append_data(struct sctp_packet *packet,
72 					   struct sctp_chunk *chunk);
73 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
74 					struct sctp_chunk *chunk,
75 					u16 chunk_len);
76 
77 static void sctp_packet_reset(struct sctp_packet *packet)
78 {
79 	packet->size = packet->overhead;
80 	packet->has_cookie_echo = 0;
81 	packet->has_sack = 0;
82 	packet->has_data = 0;
83 	packet->has_auth = 0;
84 	packet->ipfragok = 0;
85 	packet->auth = NULL;
86 }
87 
88 /* Config a packet.
89  * This appears to be a followup set of initializations.
90  */
91 struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
92 				       __u32 vtag, int ecn_capable)
93 {
94 	struct sctp_chunk *chunk = NULL;
95 
96 	SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
97 			  packet, vtag);
98 
99 	packet->vtag = vtag;
100 
101 	if (ecn_capable && sctp_packet_empty(packet)) {
102 		chunk = sctp_get_ecne_prepend(packet->transport->asoc);
103 
104 		/* If there a is a prepend chunk stick it on the list before
105 		 * any other chunks get appended.
106 		 */
107 		if (chunk)
108 			sctp_packet_append_chunk(packet, chunk);
109 	}
110 
111 	return packet;
112 }
113 
114 /* Initialize the packet structure. */
115 struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
116 				     struct sctp_transport *transport,
117 				     __u16 sport, __u16 dport)
118 {
119 	struct sctp_association *asoc = transport->asoc;
120 	size_t overhead;
121 
122 	SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __func__,
123 			  packet, transport);
124 
125 	packet->transport = transport;
126 	packet->source_port = sport;
127 	packet->destination_port = dport;
128 	INIT_LIST_HEAD(&packet->chunk_list);
129 	if (asoc) {
130 		struct sctp_sock *sp = sctp_sk(asoc->base.sk);
131 		overhead = sp->pf->af->net_header_len;
132 	} else {
133 		overhead = sizeof(struct ipv6hdr);
134 	}
135 	overhead += sizeof(struct sctphdr);
136 	packet->overhead = overhead;
137 	sctp_packet_reset(packet);
138 	packet->vtag = 0;
139 
140 	return packet;
141 }
142 
143 /* Free a packet.  */
144 void sctp_packet_free(struct sctp_packet *packet)
145 {
146 	struct sctp_chunk *chunk, *tmp;
147 
148 	SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet);
149 
150 	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
151 		list_del_init(&chunk->list);
152 		sctp_chunk_free(chunk);
153 	}
154 }
155 
156 /* This routine tries to append the chunk to the offered packet. If adding
157  * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
158  * is not present in the packet, it transmits the input packet.
159  * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
160  * as it can fit in the packet, but any more data that does not fit in this
161  * packet can be sent only after receiving the COOKIE_ACK.
162  */
163 sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
164 				       struct sctp_chunk *chunk,
165 				       int one_packet)
166 {
167 	sctp_xmit_t retval;
168 	int error = 0;
169 
170 	SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__,
171 			  packet, chunk);
172 
173 	switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
174 	case SCTP_XMIT_PMTU_FULL:
175 		if (!packet->has_cookie_echo) {
176 			error = sctp_packet_transmit(packet);
177 			if (error < 0)
178 				chunk->skb->sk->sk_err = -error;
179 
180 			/* If we have an empty packet, then we can NOT ever
181 			 * return PMTU_FULL.
182 			 */
183 			if (!one_packet)
184 				retval = sctp_packet_append_chunk(packet,
185 								  chunk);
186 		}
187 		break;
188 
189 	case SCTP_XMIT_RWND_FULL:
190 	case SCTP_XMIT_OK:
191 	case SCTP_XMIT_NAGLE_DELAY:
192 		break;
193 	}
194 
195 	return retval;
196 }
197 
198 /* Try to bundle an auth chunk into the packet. */
199 static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
200 					   struct sctp_chunk *chunk)
201 {
202 	struct sctp_association *asoc = pkt->transport->asoc;
203 	struct sctp_chunk *auth;
204 	sctp_xmit_t retval = SCTP_XMIT_OK;
205 
206 	/* if we don't have an association, we can't do authentication */
207 	if (!asoc)
208 		return retval;
209 
210 	/* See if this is an auth chunk we are bundling or if
211 	 * auth is already bundled.
212 	 */
213 	if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
214 		return retval;
215 
216 	/* if the peer did not request this chunk to be authenticated,
217 	 * don't do it
218 	 */
219 	if (!chunk->auth)
220 		return retval;
221 
222 	auth = sctp_make_auth(asoc);
223 	if (!auth)
224 		return retval;
225 
226 	retval = __sctp_packet_append_chunk(pkt, auth);
227 
228 	if (retval != SCTP_XMIT_OK)
229 		sctp_chunk_free(auth);
230 
231 	return retval;
232 }
233 
234 /* Try to bundle a SACK with the packet. */
235 static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
236 					   struct sctp_chunk *chunk)
237 {
238 	sctp_xmit_t retval = SCTP_XMIT_OK;
239 
240 	/* If sending DATA and haven't aleady bundled a SACK, try to
241 	 * bundle one in to the packet.
242 	 */
243 	if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
244 	    !pkt->has_cookie_echo) {
245 		struct sctp_association *asoc;
246 		struct timer_list *timer;
247 		asoc = pkt->transport->asoc;
248 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
249 
250 		/* If the SACK timer is running, we have a pending SACK */
251 		if (timer_pending(timer)) {
252 			struct sctp_chunk *sack;
253 
254 			if (pkt->transport->sack_generation !=
255 			    pkt->transport->asoc->peer.sack_generation)
256 				return retval;
257 
258 			asoc->a_rwnd = asoc->rwnd;
259 			sack = sctp_make_sack(asoc);
260 			if (sack) {
261 				retval = __sctp_packet_append_chunk(pkt, sack);
262 				if (retval != SCTP_XMIT_OK) {
263 					sctp_chunk_free(sack);
264 					goto out;
265 				}
266 				asoc->peer.sack_needed = 0;
267 				if (del_timer(timer))
268 					sctp_association_put(asoc);
269 			}
270 		}
271 	}
272 out:
273 	return retval;
274 }
275 
276 
277 /* Append a chunk to the offered packet reporting back any inability to do
278  * so.
279  */
280 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
281 					      struct sctp_chunk *chunk)
282 {
283 	sctp_xmit_t retval = SCTP_XMIT_OK;
284 	__u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
285 
286 	/* Check to see if this chunk will fit into the packet */
287 	retval = sctp_packet_will_fit(packet, chunk, chunk_len);
288 	if (retval != SCTP_XMIT_OK)
289 		goto finish;
290 
291 	/* We believe that this chunk is OK to add to the packet */
292 	switch (chunk->chunk_hdr->type) {
293 	    case SCTP_CID_DATA:
294 		/* Account for the data being in the packet */
295 		sctp_packet_append_data(packet, chunk);
296 		/* Disallow SACK bundling after DATA. */
297 		packet->has_sack = 1;
298 		/* Disallow AUTH bundling after DATA */
299 		packet->has_auth = 1;
300 		/* Let it be knows that packet has DATA in it */
301 		packet->has_data = 1;
302 		/* timestamp the chunk for rtx purposes */
303 		chunk->sent_at = jiffies;
304 		break;
305 	    case SCTP_CID_COOKIE_ECHO:
306 		packet->has_cookie_echo = 1;
307 		break;
308 
309 	    case SCTP_CID_SACK:
310 		packet->has_sack = 1;
311 		if (chunk->asoc)
312 			chunk->asoc->stats.osacks++;
313 		break;
314 
315 	    case SCTP_CID_AUTH:
316 		packet->has_auth = 1;
317 		packet->auth = chunk;
318 		break;
319 	}
320 
321 	/* It is OK to send this chunk.  */
322 	list_add_tail(&chunk->list, &packet->chunk_list);
323 	packet->size += chunk_len;
324 	chunk->transport = packet->transport;
325 finish:
326 	return retval;
327 }
328 
329 /* Append a chunk to the offered packet reporting back any inability to do
330  * so.
331  */
332 sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
333 				     struct sctp_chunk *chunk)
334 {
335 	sctp_xmit_t retval = SCTP_XMIT_OK;
336 
337 	SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
338 			  chunk);
339 
340 	/* Data chunks are special.  Before seeing what else we can
341 	 * bundle into this packet, check to see if we are allowed to
342 	 * send this DATA.
343 	 */
344 	if (sctp_chunk_is_data(chunk)) {
345 		retval = sctp_packet_can_append_data(packet, chunk);
346 		if (retval != SCTP_XMIT_OK)
347 			goto finish;
348 	}
349 
350 	/* Try to bundle AUTH chunk */
351 	retval = sctp_packet_bundle_auth(packet, chunk);
352 	if (retval != SCTP_XMIT_OK)
353 		goto finish;
354 
355 	/* Try to bundle SACK chunk */
356 	retval = sctp_packet_bundle_sack(packet, chunk);
357 	if (retval != SCTP_XMIT_OK)
358 		goto finish;
359 
360 	retval = __sctp_packet_append_chunk(packet, chunk);
361 
362 finish:
363 	return retval;
364 }
365 
366 static void sctp_packet_release_owner(struct sk_buff *skb)
367 {
368 	sk_free(skb->sk);
369 }
370 
371 static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
372 {
373 	skb_orphan(skb);
374 	skb->sk = sk;
375 	skb->destructor = sctp_packet_release_owner;
376 
377 	/*
378 	 * The data chunks have already been accounted for in sctp_sendmsg(),
379 	 * therefore only reserve a single byte to keep socket around until
380 	 * the packet has been transmitted.
381 	 */
382 	atomic_inc(&sk->sk_wmem_alloc);
383 }
384 
385 /* All packets are sent to the network through this function from
386  * sctp_outq_tail().
387  *
388  * The return value is a normal kernel error return value.
389  */
390 int sctp_packet_transmit(struct sctp_packet *packet)
391 {
392 	struct sctp_transport *tp = packet->transport;
393 	struct sctp_association *asoc = tp->asoc;
394 	struct sctphdr *sh;
395 	struct sk_buff *nskb;
396 	struct sctp_chunk *chunk, *tmp;
397 	struct sock *sk;
398 	int err = 0;
399 	int padding;		/* How much padding do we need?  */
400 	__u8 has_data = 0;
401 	struct dst_entry *dst = tp->dst;
402 	unsigned char *auth = NULL;	/* pointer to auth in skb data */
403 	__u32 cksum_buf_len = sizeof(struct sctphdr);
404 
405 	SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet);
406 
407 	/* Do NOT generate a chunkless packet. */
408 	if (list_empty(&packet->chunk_list))
409 		return err;
410 
411 	/* Set up convenience variables... */
412 	chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
413 	sk = chunk->skb->sk;
414 
415 	/* Allocate the new skb.  */
416 	nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
417 	if (!nskb)
418 		goto nomem;
419 
420 	/* Make sure the outbound skb has enough header room reserved. */
421 	skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
422 
423 	/* Set the owning socket so that we know where to get the
424 	 * destination IP address.
425 	 */
426 	sctp_packet_set_owner_w(nskb, sk);
427 
428 	if (!sctp_transport_dst_check(tp)) {
429 		sctp_transport_route(tp, NULL, sctp_sk(sk));
430 		if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
431 			sctp_assoc_sync_pmtu(sk, asoc);
432 		}
433 	}
434 	dst = dst_clone(tp->dst);
435 	skb_dst_set(nskb, dst);
436 	if (!dst)
437 		goto no_route;
438 
439 	/* Build the SCTP header.  */
440 	sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
441 	skb_reset_transport_header(nskb);
442 	sh->source = htons(packet->source_port);
443 	sh->dest   = htons(packet->destination_port);
444 
445 	/* From 6.8 Adler-32 Checksum Calculation:
446 	 * After the packet is constructed (containing the SCTP common
447 	 * header and one or more control or DATA chunks), the
448 	 * transmitter shall:
449 	 *
450 	 * 1) Fill in the proper Verification Tag in the SCTP common
451 	 *    header and initialize the checksum field to 0's.
452 	 */
453 	sh->vtag     = htonl(packet->vtag);
454 	sh->checksum = 0;
455 
456 	/**
457 	 * 6.10 Bundling
458 	 *
459 	 *    An endpoint bundles chunks by simply including multiple
460 	 *    chunks in one outbound SCTP packet.  ...
461 	 */
462 
463 	/**
464 	 * 3.2  Chunk Field Descriptions
465 	 *
466 	 * The total length of a chunk (including Type, Length and
467 	 * Value fields) MUST be a multiple of 4 bytes.  If the length
468 	 * of the chunk is not a multiple of 4 bytes, the sender MUST
469 	 * pad the chunk with all zero bytes and this padding is not
470 	 * included in the chunk length field.  The sender should
471 	 * never pad with more than 3 bytes.
472 	 *
473 	 * [This whole comment explains WORD_ROUND() below.]
474 	 */
475 	SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n");
476 	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
477 		list_del_init(&chunk->list);
478 		if (sctp_chunk_is_data(chunk)) {
479 			/* 6.3.1 C4) When data is in flight and when allowed
480 			 * by rule C5, a new RTT measurement MUST be made each
481 			 * round trip.  Furthermore, new RTT measurements
482 			 * SHOULD be made no more than once per round-trip
483 			 * for a given destination transport address.
484 			 */
485 
486 			if (!tp->rto_pending) {
487 				chunk->rtt_in_progress = 1;
488 				tp->rto_pending = 1;
489 			}
490 			has_data = 1;
491 		}
492 
493 		padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
494 		if (padding)
495 			memset(skb_put(chunk->skb, padding), 0, padding);
496 
497 		/* if this is the auth chunk that we are adding,
498 		 * store pointer where it will be added and put
499 		 * the auth into the packet.
500 		 */
501 		if (chunk == packet->auth)
502 			auth = skb_tail_pointer(nskb);
503 
504 		cksum_buf_len += chunk->skb->len;
505 		memcpy(skb_put(nskb, chunk->skb->len),
506 			       chunk->skb->data, chunk->skb->len);
507 
508 		SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n",
509 				  "*** Chunk", chunk,
510 				  sctp_cname(SCTP_ST_CHUNK(
511 					  chunk->chunk_hdr->type)),
512 				  chunk->has_tsn ? "TSN" : "No TSN",
513 				  chunk->has_tsn ?
514 				  ntohl(chunk->subh.data_hdr->tsn) : 0,
515 				  "length", ntohs(chunk->chunk_hdr->length),
516 				  "chunk->skb->len", chunk->skb->len,
517 				  "rtt_in_progress", chunk->rtt_in_progress);
518 
519 		/*
520 		 * If this is a control chunk, this is our last
521 		 * reference. Free data chunks after they've been
522 		 * acknowledged or have failed.
523 		 */
524 		if (!sctp_chunk_is_data(chunk))
525 			sctp_chunk_free(chunk);
526 	}
527 
528 	/* SCTP-AUTH, Section 6.2
529 	 *    The sender MUST calculate the MAC as described in RFC2104 [2]
530 	 *    using the hash function H as described by the MAC Identifier and
531 	 *    the shared association key K based on the endpoint pair shared key
532 	 *    described by the shared key identifier.  The 'data' used for the
533 	 *    computation of the AUTH-chunk is given by the AUTH chunk with its
534 	 *    HMAC field set to zero (as shown in Figure 6) followed by all
535 	 *    chunks that are placed after the AUTH chunk in the SCTP packet.
536 	 */
537 	if (auth)
538 		sctp_auth_calculate_hmac(asoc, nskb,
539 					(struct sctp_auth_chunk *)auth,
540 					GFP_ATOMIC);
541 
542 	/* 2) Calculate the Adler-32 checksum of the whole packet,
543 	 *    including the SCTP common header and all the
544 	 *    chunks.
545 	 *
546 	 * Note: Adler-32 is no longer applicable, as has been replaced
547 	 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
548 	 */
549 	if (!sctp_checksum_disable) {
550 		if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
551 			__u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
552 
553 			/* 3) Put the resultant value into the checksum field in the
554 			 *    common header, and leave the rest of the bits unchanged.
555 			 */
556 			sh->checksum = sctp_end_cksum(crc32);
557 		} else {
558 			/* no need to seed pseudo checksum for SCTP */
559 			nskb->ip_summed = CHECKSUM_PARTIAL;
560 			nskb->csum_start = (skb_transport_header(nskb) -
561 			                    nskb->head);
562 			nskb->csum_offset = offsetof(struct sctphdr, checksum);
563 		}
564 	}
565 
566 	/* IP layer ECN support
567 	 * From RFC 2481
568 	 *  "The ECN-Capable Transport (ECT) bit would be set by the
569 	 *   data sender to indicate that the end-points of the
570 	 *   transport protocol are ECN-capable."
571 	 *
572 	 * Now setting the ECT bit all the time, as it should not cause
573 	 * any problems protocol-wise even if our peer ignores it.
574 	 *
575 	 * Note: The works for IPv6 layer checks this bit too later
576 	 * in transmission.  See IP6_ECN_flow_xmit().
577 	 */
578 	(*tp->af_specific->ecn_capable)(nskb->sk);
579 
580 	/* Set up the IP options.  */
581 	/* BUG: not implemented
582 	 * For v4 this all lives somewhere in sk->sk_opt...
583 	 */
584 
585 	/* Dump that on IP!  */
586 	if (asoc) {
587 		asoc->stats.opackets++;
588 		if (asoc->peer.last_sent_to != tp)
589 			/* Considering the multiple CPU scenario, this is a
590 			 * "correcter" place for last_sent_to.  --xguo
591 			 */
592 			asoc->peer.last_sent_to = tp;
593 	}
594 
595 	if (has_data) {
596 		struct timer_list *timer;
597 		unsigned long timeout;
598 
599 		/* Restart the AUTOCLOSE timer when sending data. */
600 		if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) {
601 			timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
602 			timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
603 
604 			if (!mod_timer(timer, jiffies + timeout))
605 				sctp_association_hold(asoc);
606 		}
607 	}
608 
609 	SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n",
610 			  nskb->len);
611 
612 	nskb->local_df = packet->ipfragok;
613 	(*tp->af_specific->sctp_xmit)(nskb, tp);
614 
615 out:
616 	sctp_packet_reset(packet);
617 	return err;
618 no_route:
619 	kfree_skb(nskb);
620 	IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
621 
622 	/* FIXME: Returning the 'err' will effect all the associations
623 	 * associated with a socket, although only one of the paths of the
624 	 * association is unreachable.
625 	 * The real failure of a transport or association can be passed on
626 	 * to the user via notifications. So setting this error may not be
627 	 * required.
628 	 */
629 	 /* err = -EHOSTUNREACH; */
630 err:
631 	/* Control chunks are unreliable so just drop them.  DATA chunks
632 	 * will get resent or dropped later.
633 	 */
634 
635 	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
636 		list_del_init(&chunk->list);
637 		if (!sctp_chunk_is_data(chunk))
638 			sctp_chunk_free(chunk);
639 	}
640 	goto out;
641 nomem:
642 	err = -ENOMEM;
643 	goto err;
644 }
645 
646 /********************************************************************
647  * 2nd Level Abstractions
648  ********************************************************************/
649 
650 /* This private function check to see if a chunk can be added */
651 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
652 					   struct sctp_chunk *chunk)
653 {
654 	sctp_xmit_t retval = SCTP_XMIT_OK;
655 	size_t datasize, rwnd, inflight, flight_size;
656 	struct sctp_transport *transport = packet->transport;
657 	struct sctp_association *asoc = transport->asoc;
658 	struct sctp_outq *q = &asoc->outqueue;
659 
660 	/* RFC 2960 6.1  Transmission of DATA Chunks
661 	 *
662 	 * A) At any given time, the data sender MUST NOT transmit new data to
663 	 * any destination transport address if its peer's rwnd indicates
664 	 * that the peer has no buffer space (i.e. rwnd is 0, see Section
665 	 * 6.2.1).  However, regardless of the value of rwnd (including if it
666 	 * is 0), the data sender can always have one DATA chunk in flight to
667 	 * the receiver if allowed by cwnd (see rule B below).  This rule
668 	 * allows the sender to probe for a change in rwnd that the sender
669 	 * missed due to the SACK having been lost in transit from the data
670 	 * receiver to the data sender.
671 	 */
672 
673 	rwnd = asoc->peer.rwnd;
674 	inflight = q->outstanding_bytes;
675 	flight_size = transport->flight_size;
676 
677 	datasize = sctp_data_size(chunk);
678 
679 	if (datasize > rwnd) {
680 		if (inflight > 0) {
681 			/* We have (at least) one data chunk in flight,
682 			 * so we can't fall back to rule 6.1 B).
683 			 */
684 			retval = SCTP_XMIT_RWND_FULL;
685 			goto finish;
686 		}
687 	}
688 
689 	/* RFC 2960 6.1  Transmission of DATA Chunks
690 	 *
691 	 * B) At any given time, the sender MUST NOT transmit new data
692 	 * to a given transport address if it has cwnd or more bytes
693 	 * of data outstanding to that transport address.
694 	 */
695 	/* RFC 7.2.4 & the Implementers Guide 2.8.
696 	 *
697 	 * 3) ...
698 	 *    When a Fast Retransmit is being performed the sender SHOULD
699 	 *    ignore the value of cwnd and SHOULD NOT delay retransmission.
700 	 */
701 	if (chunk->fast_retransmit != SCTP_NEED_FRTX)
702 		if (flight_size >= transport->cwnd) {
703 			retval = SCTP_XMIT_RWND_FULL;
704 			goto finish;
705 		}
706 
707 	/* Nagle's algorithm to solve small-packet problem:
708 	 * Inhibit the sending of new chunks when new outgoing data arrives
709 	 * if any previously transmitted data on the connection remains
710 	 * unacknowledged.
711 	 */
712 	if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
713 	    inflight && sctp_state(asoc, ESTABLISHED)) {
714 		unsigned int max = transport->pathmtu - packet->overhead;
715 		unsigned int len = chunk->skb->len + q->out_qlen;
716 
717 		/* Check whether this chunk and all the rest of pending
718 		 * data will fit or delay in hopes of bundling a full
719 		 * sized packet.
720 		 * Don't delay large message writes that may have been
721 		 * fragmeneted into small peices.
722 		 */
723 		if ((len < max) && chunk->msg->can_delay) {
724 			retval = SCTP_XMIT_NAGLE_DELAY;
725 			goto finish;
726 		}
727 	}
728 
729 finish:
730 	return retval;
731 }
732 
733 /* This private function does management things when adding DATA chunk */
734 static void sctp_packet_append_data(struct sctp_packet *packet,
735 				struct sctp_chunk *chunk)
736 {
737 	struct sctp_transport *transport = packet->transport;
738 	size_t datasize = sctp_data_size(chunk);
739 	struct sctp_association *asoc = transport->asoc;
740 	u32 rwnd = asoc->peer.rwnd;
741 
742 	/* Keep track of how many bytes are in flight over this transport. */
743 	transport->flight_size += datasize;
744 
745 	/* Keep track of how many bytes are in flight to the receiver. */
746 	asoc->outqueue.outstanding_bytes += datasize;
747 
748 	/* Update our view of the receiver's rwnd. */
749 	if (datasize < rwnd)
750 		rwnd -= datasize;
751 	else
752 		rwnd = 0;
753 
754 	asoc->peer.rwnd = rwnd;
755 	/* Has been accepted for transmission. */
756 	if (!asoc->peer.prsctp_capable)
757 		chunk->msg->can_abandon = 0;
758 	sctp_chunk_assign_tsn(chunk);
759 	sctp_chunk_assign_ssn(chunk);
760 }
761 
762 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
763 					struct sctp_chunk *chunk,
764 					u16 chunk_len)
765 {
766 	size_t psize;
767 	size_t pmtu;
768 	int too_big;
769 	sctp_xmit_t retval = SCTP_XMIT_OK;
770 
771 	psize = packet->size;
772 	pmtu  = ((packet->transport->asoc) ?
773 		(packet->transport->asoc->pathmtu) :
774 		(packet->transport->pathmtu));
775 
776 	too_big = (psize + chunk_len > pmtu);
777 
778 	/* Decide if we need to fragment or resubmit later. */
779 	if (too_big) {
780 		/* It's OK to fragmet at IP level if any one of the following
781 		 * is true:
782 		 * 	1. The packet is empty (meaning this chunk is greater
783 		 * 	   the MTU)
784 		 * 	2. The chunk we are adding is a control chunk
785 		 * 	3. The packet doesn't have any data in it yet and data
786 		 * 	requires authentication.
787 		 */
788 		if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) ||
789 		    (!packet->has_data && chunk->auth)) {
790 			/* We no longer do re-fragmentation.
791 			 * Just fragment at the IP layer, if we
792 			 * actually hit this condition
793 			 */
794 			packet->ipfragok = 1;
795 		} else {
796 			retval = SCTP_XMIT_PMTU_FULL;
797 		}
798 	}
799 
800 	return retval;
801 }
802