xref: /openbmc/linux/net/sctp/sm_sideeffect.c (revision 25b892b5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * (C) Copyright IBM Corp. 2001, 2004
4  * Copyright (c) 1999 Cisco, Inc.
5  * Copyright (c) 1999-2001 Motorola, Inc.
6  *
7  * This file is part of the SCTP kernel implementation
8  *
9  * These functions work with the state functions in sctp_sm_statefuns.c
10  * to implement that state operations.  These functions implement the
11  * steps which require modifying existing data structures.
12  *
13  * Please send any bug reports or fixes you make to the
14  * email address(es):
15  *    lksctp developers <linux-sctp@vger.kernel.org>
16  *
17  * Written or modified by:
18  *    La Monte H.P. Yarroll <piggy@acm.org>
19  *    Karl Knutson          <karl@athena.chicago.il.us>
20  *    Jon Grimm             <jgrimm@austin.ibm.com>
21  *    Hui Huang		    <hui.huang@nokia.com>
22  *    Dajiang Zhang	    <dajiang.zhang@nokia.com>
23  *    Daisy Chang	    <daisyc@us.ibm.com>
24  *    Sridhar Samudrala	    <sri@us.ibm.com>
25  *    Ardelle Fan	    <ardelle.fan@intel.com>
26  */
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 
30 #include <linux/skbuff.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/ip.h>
34 #include <linux/gfp.h>
35 #include <net/sock.h>
36 #include <net/sctp/sctp.h>
37 #include <net/sctp/sm.h>
38 #include <net/sctp/stream_sched.h>
39 
40 static int sctp_cmd_interpreter(enum sctp_event_type event_type,
41 				union sctp_subtype subtype,
42 				enum sctp_state state,
43 				struct sctp_endpoint *ep,
44 				struct sctp_association *asoc,
45 				void *event_arg,
46 				enum sctp_disposition status,
47 				struct sctp_cmd_seq *commands,
48 				gfp_t gfp);
49 static int sctp_side_effects(enum sctp_event_type event_type,
50 			     union sctp_subtype subtype,
51 			     enum sctp_state state,
52 			     struct sctp_endpoint *ep,
53 			     struct sctp_association **asoc,
54 			     void *event_arg,
55 			     enum sctp_disposition status,
56 			     struct sctp_cmd_seq *commands,
57 			     gfp_t gfp);
58 
59 /********************************************************************
60  * Helper functions
61  ********************************************************************/
62 
63 /* A helper function for delayed processing of INET ECN CE bit. */
64 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
65 				__u32 lowest_tsn)
66 {
67 	/* Save the TSN away for comparison when we receive CWR */
68 
69 	asoc->last_ecne_tsn = lowest_tsn;
70 	asoc->need_ecne = 1;
71 }
72 
73 /* Helper function for delayed processing of SCTP ECNE chunk.  */
74 /* RFC 2960 Appendix A
75  *
76  * RFC 2481 details a specific bit for a sender to send in
77  * the header of its next outbound TCP segment to indicate to
78  * its peer that it has reduced its congestion window.  This
79  * is termed the CWR bit.  For SCTP the same indication is made
80  * by including the CWR chunk.  This chunk contains one data
81  * element, i.e. the TSN number that was sent in the ECNE chunk.
82  * This element represents the lowest TSN number in the datagram
83  * that was originally marked with the CE bit.
84  */
85 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
86 						__u32 lowest_tsn,
87 						struct sctp_chunk *chunk)
88 {
89 	struct sctp_chunk *repl;
90 
91 	/* Our previously transmitted packet ran into some congestion
92 	 * so we should take action by reducing cwnd and ssthresh
93 	 * and then ACK our peer that we we've done so by
94 	 * sending a CWR.
95 	 */
96 
97 	/* First, try to determine if we want to actually lower
98 	 * our cwnd variables.  Only lower them if the ECNE looks more
99 	 * recent than the last response.
100 	 */
101 	if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
102 		struct sctp_transport *transport;
103 
104 		/* Find which transport's congestion variables
105 		 * need to be adjusted.
106 		 */
107 		transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
108 
109 		/* Update the congestion variables. */
110 		if (transport)
111 			sctp_transport_lower_cwnd(transport,
112 						  SCTP_LOWER_CWND_ECNE);
113 		asoc->last_cwr_tsn = lowest_tsn;
114 	}
115 
116 	/* Always try to quiet the other end.  In case of lost CWR,
117 	 * resend last_cwr_tsn.
118 	 */
119 	repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
120 
121 	/* If we run out of memory, it will look like a lost CWR.  We'll
122 	 * get back in sync eventually.
123 	 */
124 	return repl;
125 }
126 
127 /* Helper function to do delayed processing of ECN CWR chunk.  */
128 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
129 				 __u32 lowest_tsn)
130 {
131 	/* Turn off ECNE getting auto-prepended to every outgoing
132 	 * packet
133 	 */
134 	asoc->need_ecne = 0;
135 }
136 
137 /* Generate SACK if necessary.  We call this at the end of a packet.  */
138 static int sctp_gen_sack(struct sctp_association *asoc, int force,
139 			 struct sctp_cmd_seq *commands)
140 {
141 	struct sctp_transport *trans = asoc->peer.last_data_from;
142 	__u32 ctsn, max_tsn_seen;
143 	struct sctp_chunk *sack;
144 	int error = 0;
145 
146 	if (force ||
147 	    (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
148 	    (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
149 		asoc->peer.sack_needed = 1;
150 
151 	ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
152 	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
153 
154 	/* From 12.2 Parameters necessary per association (i.e. the TCB):
155 	 *
156 	 * Ack State : This flag indicates if the next received packet
157 	 * 	     : is to be responded to with a SACK. ...
158 	 *	     : When DATA chunks are out of order, SACK's
159 	 *           : are not delayed (see Section 6).
160 	 *
161 	 * [This is actually not mentioned in Section 6, but we
162 	 * implement it here anyway. --piggy]
163 	 */
164 	if (max_tsn_seen != ctsn)
165 		asoc->peer.sack_needed = 1;
166 
167 	/* From 6.2  Acknowledgement on Reception of DATA Chunks:
168 	 *
169 	 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
170 	 * an acknowledgement SHOULD be generated for at least every
171 	 * second packet (not every second DATA chunk) received, and
172 	 * SHOULD be generated within 200 ms of the arrival of any
173 	 * unacknowledged DATA chunk. ...
174 	 */
175 	if (!asoc->peer.sack_needed) {
176 		asoc->peer.sack_cnt++;
177 
178 		/* Set the SACK delay timeout based on the
179 		 * SACK delay for the last transport
180 		 * data was received from, or the default
181 		 * for the association.
182 		 */
183 		if (trans) {
184 			/* We will need a SACK for the next packet.  */
185 			if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
186 				asoc->peer.sack_needed = 1;
187 
188 			asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
189 				trans->sackdelay;
190 		} else {
191 			/* We will need a SACK for the next packet.  */
192 			if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
193 				asoc->peer.sack_needed = 1;
194 
195 			asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
196 				asoc->sackdelay;
197 		}
198 
199 		/* Restart the SACK timer. */
200 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
201 				SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
202 	} else {
203 		__u32 old_a_rwnd = asoc->a_rwnd;
204 
205 		asoc->a_rwnd = asoc->rwnd;
206 		sack = sctp_make_sack(asoc);
207 		if (!sack) {
208 			asoc->a_rwnd = old_a_rwnd;
209 			goto nomem;
210 		}
211 
212 		asoc->peer.sack_needed = 0;
213 		asoc->peer.sack_cnt = 0;
214 
215 		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
216 
217 		/* Stop the SACK timer.  */
218 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
219 				SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
220 	}
221 
222 	return error;
223 nomem:
224 	error = -ENOMEM;
225 	return error;
226 }
227 
228 /* When the T3-RTX timer expires, it calls this function to create the
229  * relevant state machine event.
230  */
231 void sctp_generate_t3_rtx_event(struct timer_list *t)
232 {
233 	struct sctp_transport *transport =
234 		from_timer(transport, t, T3_rtx_timer);
235 	struct sctp_association *asoc = transport->asoc;
236 	struct sock *sk = asoc->base.sk;
237 	struct net *net = sock_net(sk);
238 	int error;
239 
240 	/* Check whether a task is in the sock.  */
241 
242 	bh_lock_sock(sk);
243 	if (sock_owned_by_user(sk)) {
244 		pr_debug("%s: sock is busy\n", __func__);
245 
246 		/* Try again later.  */
247 		if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
248 			sctp_transport_hold(transport);
249 		goto out_unlock;
250 	}
251 
252 	/* Run through the state machine.  */
253 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
254 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
255 			   asoc->state,
256 			   asoc->ep, asoc,
257 			   transport, GFP_ATOMIC);
258 
259 	if (error)
260 		sk->sk_err = -error;
261 
262 out_unlock:
263 	bh_unlock_sock(sk);
264 	sctp_transport_put(transport);
265 }
266 
267 /* This is a sa interface for producing timeout events.  It works
268  * for timeouts which use the association as their parameter.
269  */
270 static void sctp_generate_timeout_event(struct sctp_association *asoc,
271 					enum sctp_event_timeout timeout_type)
272 {
273 	struct sock *sk = asoc->base.sk;
274 	struct net *net = sock_net(sk);
275 	int error = 0;
276 
277 	bh_lock_sock(sk);
278 	if (sock_owned_by_user(sk)) {
279 		pr_debug("%s: sock is busy: timer %d\n", __func__,
280 			 timeout_type);
281 
282 		/* Try again later.  */
283 		if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
284 			sctp_association_hold(asoc);
285 		goto out_unlock;
286 	}
287 
288 	/* Is this association really dead and just waiting around for
289 	 * the timer to let go of the reference?
290 	 */
291 	if (asoc->base.dead)
292 		goto out_unlock;
293 
294 	/* Run through the state machine.  */
295 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
296 			   SCTP_ST_TIMEOUT(timeout_type),
297 			   asoc->state, asoc->ep, asoc,
298 			   (void *)timeout_type, GFP_ATOMIC);
299 
300 	if (error)
301 		sk->sk_err = -error;
302 
303 out_unlock:
304 	bh_unlock_sock(sk);
305 	sctp_association_put(asoc);
306 }
307 
308 static void sctp_generate_t1_cookie_event(struct timer_list *t)
309 {
310 	struct sctp_association *asoc =
311 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
312 
313 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
314 }
315 
316 static void sctp_generate_t1_init_event(struct timer_list *t)
317 {
318 	struct sctp_association *asoc =
319 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]);
320 
321 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
322 }
323 
324 static void sctp_generate_t2_shutdown_event(struct timer_list *t)
325 {
326 	struct sctp_association *asoc =
327 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]);
328 
329 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
330 }
331 
332 static void sctp_generate_t4_rto_event(struct timer_list *t)
333 {
334 	struct sctp_association *asoc =
335 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]);
336 
337 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
338 }
339 
340 static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t)
341 {
342 	struct sctp_association *asoc =
343 		from_timer(asoc, t,
344 			   timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]);
345 
346 	sctp_generate_timeout_event(asoc,
347 				    SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
348 
349 } /* sctp_generate_t5_shutdown_guard_event() */
350 
351 static void sctp_generate_autoclose_event(struct timer_list *t)
352 {
353 	struct sctp_association *asoc =
354 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]);
355 
356 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
357 }
358 
359 /* Generate a heart beat event.  If the sock is busy, reschedule.   Make
360  * sure that the transport is still valid.
361  */
362 void sctp_generate_heartbeat_event(struct timer_list *t)
363 {
364 	struct sctp_transport *transport = from_timer(transport, t, hb_timer);
365 	struct sctp_association *asoc = transport->asoc;
366 	struct sock *sk = asoc->base.sk;
367 	struct net *net = sock_net(sk);
368 	u32 elapsed, timeout;
369 	int error = 0;
370 
371 	bh_lock_sock(sk);
372 	if (sock_owned_by_user(sk)) {
373 		pr_debug("%s: sock is busy\n", __func__);
374 
375 		/* Try again later.  */
376 		if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
377 			sctp_transport_hold(transport);
378 		goto out_unlock;
379 	}
380 
381 	/* Check if we should still send the heartbeat or reschedule */
382 	elapsed = jiffies - transport->last_time_sent;
383 	timeout = sctp_transport_timeout(transport);
384 	if (elapsed < timeout) {
385 		elapsed = timeout - elapsed;
386 		if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
387 			sctp_transport_hold(transport);
388 		goto out_unlock;
389 	}
390 
391 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
392 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
393 			   asoc->state, asoc->ep, asoc,
394 			   transport, GFP_ATOMIC);
395 
396 	if (error)
397 		sk->sk_err = -error;
398 
399 out_unlock:
400 	bh_unlock_sock(sk);
401 	sctp_transport_put(transport);
402 }
403 
404 /* Handle the timeout of the ICMP protocol unreachable timer.  Trigger
405  * the correct state machine transition that will close the association.
406  */
407 void sctp_generate_proto_unreach_event(struct timer_list *t)
408 {
409 	struct sctp_transport *transport =
410 		from_timer(transport, t, proto_unreach_timer);
411 	struct sctp_association *asoc = transport->asoc;
412 	struct sock *sk = asoc->base.sk;
413 	struct net *net = sock_net(sk);
414 
415 	bh_lock_sock(sk);
416 	if (sock_owned_by_user(sk)) {
417 		pr_debug("%s: sock is busy\n", __func__);
418 
419 		/* Try again later.  */
420 		if (!mod_timer(&transport->proto_unreach_timer,
421 				jiffies + (HZ/20)))
422 			sctp_transport_hold(transport);
423 		goto out_unlock;
424 	}
425 
426 	/* Is this structure just waiting around for us to actually
427 	 * get destroyed?
428 	 */
429 	if (asoc->base.dead)
430 		goto out_unlock;
431 
432 	sctp_do_sm(net, SCTP_EVENT_T_OTHER,
433 		   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
434 		   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
435 
436 out_unlock:
437 	bh_unlock_sock(sk);
438 	sctp_transport_put(transport);
439 }
440 
441  /* Handle the timeout of the RE-CONFIG timer. */
442 void sctp_generate_reconf_event(struct timer_list *t)
443 {
444 	struct sctp_transport *transport =
445 		from_timer(transport, t, reconf_timer);
446 	struct sctp_association *asoc = transport->asoc;
447 	struct sock *sk = asoc->base.sk;
448 	struct net *net = sock_net(sk);
449 	int error = 0;
450 
451 	bh_lock_sock(sk);
452 	if (sock_owned_by_user(sk)) {
453 		pr_debug("%s: sock is busy\n", __func__);
454 
455 		/* Try again later.  */
456 		if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20)))
457 			sctp_transport_hold(transport);
458 		goto out_unlock;
459 	}
460 
461 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
462 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
463 			   asoc->state, asoc->ep, asoc,
464 			   transport, GFP_ATOMIC);
465 
466 	if (error)
467 		sk->sk_err = -error;
468 
469 out_unlock:
470 	bh_unlock_sock(sk);
471 	sctp_transport_put(transport);
472 }
473 
474 /* Handle the timeout of the probe timer. */
475 void sctp_generate_probe_event(struct timer_list *t)
476 {
477 	struct sctp_transport *transport = from_timer(transport, t, probe_timer);
478 	struct sctp_association *asoc = transport->asoc;
479 	struct sock *sk = asoc->base.sk;
480 	struct net *net = sock_net(sk);
481 	int error = 0;
482 
483 	bh_lock_sock(sk);
484 	if (sock_owned_by_user(sk)) {
485 		pr_debug("%s: sock is busy\n", __func__);
486 
487 		/* Try again later.  */
488 		if (!mod_timer(&transport->probe_timer, jiffies + (HZ / 20)))
489 			sctp_transport_hold(transport);
490 		goto out_unlock;
491 	}
492 
493 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
494 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_PROBE),
495 			   asoc->state, asoc->ep, asoc,
496 			   transport, GFP_ATOMIC);
497 
498 	if (error)
499 		sk->sk_err = -error;
500 
501 out_unlock:
502 	bh_unlock_sock(sk);
503 	sctp_transport_put(transport);
504 }
505 
506 /* Inject a SACK Timeout event into the state machine.  */
507 static void sctp_generate_sack_event(struct timer_list *t)
508 {
509 	struct sctp_association *asoc =
510 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]);
511 
512 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
513 }
514 
515 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
516 	[SCTP_EVENT_TIMEOUT_NONE] =		NULL,
517 	[SCTP_EVENT_TIMEOUT_T1_COOKIE] =	sctp_generate_t1_cookie_event,
518 	[SCTP_EVENT_TIMEOUT_T1_INIT] =		sctp_generate_t1_init_event,
519 	[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =	sctp_generate_t2_shutdown_event,
520 	[SCTP_EVENT_TIMEOUT_T3_RTX] =		NULL,
521 	[SCTP_EVENT_TIMEOUT_T4_RTO] =		sctp_generate_t4_rto_event,
522 	[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] =
523 					sctp_generate_t5_shutdown_guard_event,
524 	[SCTP_EVENT_TIMEOUT_HEARTBEAT] =	NULL,
525 	[SCTP_EVENT_TIMEOUT_RECONF] =		NULL,
526 	[SCTP_EVENT_TIMEOUT_SACK] =		sctp_generate_sack_event,
527 	[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =	sctp_generate_autoclose_event,
528 };
529 
530 
531 /* RFC 2960 8.2 Path Failure Detection
532  *
533  * When its peer endpoint is multi-homed, an endpoint should keep a
534  * error counter for each of the destination transport addresses of the
535  * peer endpoint.
536  *
537  * Each time the T3-rtx timer expires on any address, or when a
538  * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
539  * the error counter of that destination address will be incremented.
540  * When the value in the error counter exceeds the protocol parameter
541  * 'Path.Max.Retrans' of that destination address, the endpoint should
542  * mark the destination transport address as inactive, and a
543  * notification SHOULD be sent to the upper layer.
544  *
545  */
546 static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
547 					 struct sctp_association *asoc,
548 					 struct sctp_transport *transport,
549 					 int is_hb)
550 {
551 	/* The check for association's overall error counter exceeding the
552 	 * threshold is done in the state function.
553 	 */
554 	/* We are here due to a timer expiration.  If the timer was
555 	 * not a HEARTBEAT, then normal error tracking is done.
556 	 * If the timer was a heartbeat, we only increment error counts
557 	 * when we already have an outstanding HEARTBEAT that has not
558 	 * been acknowledged.
559 	 * Additionally, some tranport states inhibit error increments.
560 	 */
561 	if (!is_hb) {
562 		asoc->overall_error_count++;
563 		if (transport->state != SCTP_INACTIVE)
564 			transport->error_count++;
565 	 } else if (transport->hb_sent) {
566 		if (transport->state != SCTP_UNCONFIRMED)
567 			asoc->overall_error_count++;
568 		if (transport->state != SCTP_INACTIVE)
569 			transport->error_count++;
570 	}
571 
572 	/* If the transport error count is greater than the pf_retrans
573 	 * threshold, and less than pathmaxrtx, and if the current state
574 	 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
575 	 * see SCTP Quick Failover Draft, section 5.1
576 	 */
577 	if (asoc->base.net->sctp.pf_enable &&
578 	    transport->state == SCTP_ACTIVE &&
579 	    transport->error_count < transport->pathmaxrxt &&
580 	    transport->error_count > transport->pf_retrans) {
581 
582 		sctp_assoc_control_transport(asoc, transport,
583 					     SCTP_TRANSPORT_PF,
584 					     0);
585 
586 		/* Update the hb timer to resend a heartbeat every rto */
587 		sctp_transport_reset_hb_timer(transport);
588 	}
589 
590 	if (transport->state != SCTP_INACTIVE &&
591 	    (transport->error_count > transport->pathmaxrxt)) {
592 		pr_debug("%s: association:%p transport addr:%pISpc failed\n",
593 			 __func__, asoc, &transport->ipaddr.sa);
594 
595 		sctp_assoc_control_transport(asoc, transport,
596 					     SCTP_TRANSPORT_DOWN,
597 					     SCTP_FAILED_THRESHOLD);
598 	}
599 
600 	if (transport->error_count > transport->ps_retrans &&
601 	    asoc->peer.primary_path == transport &&
602 	    asoc->peer.active_path != transport)
603 		sctp_assoc_set_primary(asoc, asoc->peer.active_path);
604 
605 	/* E2) For the destination address for which the timer
606 	 * expires, set RTO <- RTO * 2 ("back off the timer").  The
607 	 * maximum value discussed in rule C7 above (RTO.max) may be
608 	 * used to provide an upper bound to this doubling operation.
609 	 *
610 	 * Special Case:  the first HB doesn't trigger exponential backoff.
611 	 * The first unacknowledged HB triggers it.  We do this with a flag
612 	 * that indicates that we have an outstanding HB.
613 	 */
614 	if (!is_hb || transport->hb_sent) {
615 		transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
616 		sctp_max_rto(asoc, transport);
617 	}
618 }
619 
620 /* Worker routine to handle INIT command failure.  */
621 static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands,
622 				 struct sctp_association *asoc,
623 				 unsigned int error)
624 {
625 	struct sctp_ulpevent *event;
626 
627 	event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
628 						(__u16)error, 0, 0, NULL,
629 						GFP_ATOMIC);
630 
631 	if (event)
632 		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
633 				SCTP_ULPEVENT(event));
634 
635 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
636 			SCTP_STATE(SCTP_STATE_CLOSED));
637 
638 	/* SEND_FAILED sent later when cleaning up the association. */
639 	asoc->outqueue.error = error;
640 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
641 }
642 
643 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED.  */
644 static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
645 				  struct sctp_association *asoc,
646 				  enum sctp_event_type event_type,
647 				  union sctp_subtype subtype,
648 				  struct sctp_chunk *chunk,
649 				  unsigned int error)
650 {
651 	struct sctp_ulpevent *event;
652 	struct sctp_chunk *abort;
653 
654 	/* Cancel any partial delivery in progress. */
655 	asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
656 
657 	if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
658 		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
659 						(__u16)error, 0, 0, chunk,
660 						GFP_ATOMIC);
661 	else
662 		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
663 						(__u16)error, 0, 0, NULL,
664 						GFP_ATOMIC);
665 	if (event)
666 		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
667 				SCTP_ULPEVENT(event));
668 
669 	if (asoc->overall_error_count >= asoc->max_retrans) {
670 		abort = sctp_make_violation_max_retrans(asoc, chunk);
671 		if (abort)
672 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
673 					SCTP_CHUNK(abort));
674 	}
675 
676 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
677 			SCTP_STATE(SCTP_STATE_CLOSED));
678 
679 	/* SEND_FAILED sent later when cleaning up the association. */
680 	asoc->outqueue.error = error;
681 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
682 }
683 
684 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
685  * inside the cookie.  In reality, this is only used for INIT-ACK processing
686  * since all other cases use "temporary" associations and can do all
687  * their work in statefuns directly.
688  */
689 static int sctp_cmd_process_init(struct sctp_cmd_seq *commands,
690 				 struct sctp_association *asoc,
691 				 struct sctp_chunk *chunk,
692 				 struct sctp_init_chunk *peer_init,
693 				 gfp_t gfp)
694 {
695 	int error;
696 
697 	/* We only process the init as a sideeffect in a single
698 	 * case.   This is when we process the INIT-ACK.   If we
699 	 * fail during INIT processing (due to malloc problems),
700 	 * just return the error and stop processing the stack.
701 	 */
702 	if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
703 		error = -ENOMEM;
704 	else
705 		error = 0;
706 
707 	return error;
708 }
709 
710 /* Helper function to break out starting up of heartbeat timers.  */
711 static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds,
712 				     struct sctp_association *asoc)
713 {
714 	struct sctp_transport *t;
715 
716 	/* Start a heartbeat timer for each transport on the association.
717 	 * hold a reference on the transport to make sure none of
718 	 * the needed data structures go away.
719 	 */
720 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
721 		sctp_transport_reset_hb_timer(t);
722 }
723 
724 static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds,
725 				    struct sctp_association *asoc)
726 {
727 	struct sctp_transport *t;
728 
729 	/* Stop all heartbeat timers. */
730 
731 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
732 			transports) {
733 		if (del_timer(&t->hb_timer))
734 			sctp_transport_put(t);
735 	}
736 }
737 
738 /* Helper function to stop any pending T3-RTX timers */
739 static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds,
740 					struct sctp_association *asoc)
741 {
742 	struct sctp_transport *t;
743 
744 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
745 			transports) {
746 		if (del_timer(&t->T3_rtx_timer))
747 			sctp_transport_put(t);
748 	}
749 }
750 
751 
752 /* Helper function to handle the reception of an HEARTBEAT ACK.  */
753 static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds,
754 				  struct sctp_association *asoc,
755 				  struct sctp_transport *t,
756 				  struct sctp_chunk *chunk)
757 {
758 	struct sctp_sender_hb_info *hbinfo;
759 	int was_unconfirmed = 0;
760 
761 	/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
762 	 * HEARTBEAT should clear the error counter of the destination
763 	 * transport address to which the HEARTBEAT was sent.
764 	 */
765 	t->error_count = 0;
766 
767 	/*
768 	 * Although RFC4960 specifies that the overall error count must
769 	 * be cleared when a HEARTBEAT ACK is received, we make an
770 	 * exception while in SHUTDOWN PENDING. If the peer keeps its
771 	 * window shut forever, we may never be able to transmit our
772 	 * outstanding data and rely on the retransmission limit be reached
773 	 * to shutdown the association.
774 	 */
775 	if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
776 		t->asoc->overall_error_count = 0;
777 
778 	/* Clear the hb_sent flag to signal that we had a good
779 	 * acknowledgement.
780 	 */
781 	t->hb_sent = 0;
782 
783 	/* Mark the destination transport address as active if it is not so
784 	 * marked.
785 	 */
786 	if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
787 		was_unconfirmed = 1;
788 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
789 					     SCTP_HEARTBEAT_SUCCESS);
790 	}
791 
792 	if (t->state == SCTP_PF)
793 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
794 					     SCTP_HEARTBEAT_SUCCESS);
795 
796 	/* HB-ACK was received for a the proper HB.  Consider this
797 	 * forward progress.
798 	 */
799 	if (t->dst)
800 		sctp_transport_dst_confirm(t);
801 
802 	/* The receiver of the HEARTBEAT ACK should also perform an
803 	 * RTT measurement for that destination transport address
804 	 * using the time value carried in the HEARTBEAT ACK chunk.
805 	 * If the transport's rto_pending variable has been cleared,
806 	 * it was most likely due to a retransmit.  However, we want
807 	 * to re-enable it to properly update the rto.
808 	 */
809 	if (t->rto_pending == 0)
810 		t->rto_pending = 1;
811 
812 	hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
813 	sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
814 
815 	/* Update the heartbeat timer.  */
816 	sctp_transport_reset_hb_timer(t);
817 
818 	if (was_unconfirmed && asoc->peer.transport_count == 1)
819 		sctp_transport_immediate_rtx(t);
820 }
821 
822 
823 /* Helper function to process the process SACK command.  */
824 static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds,
825 				 struct sctp_association *asoc,
826 				 struct sctp_chunk *chunk)
827 {
828 	int err = 0;
829 
830 	if (sctp_outq_sack(&asoc->outqueue, chunk)) {
831 		/* There are no more TSNs awaiting SACK.  */
832 		err = sctp_do_sm(asoc->base.net, SCTP_EVENT_T_OTHER,
833 				 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
834 				 asoc->state, asoc->ep, asoc, NULL,
835 				 GFP_ATOMIC);
836 	}
837 
838 	return err;
839 }
840 
841 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
842  * the transport for a shutdown chunk.
843  */
844 static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds,
845 			      struct sctp_association *asoc,
846 			      struct sctp_chunk *chunk)
847 {
848 	struct sctp_transport *t;
849 
850 	if (chunk->transport)
851 		t = chunk->transport;
852 	else {
853 		t = sctp_assoc_choose_alter_transport(asoc,
854 					      asoc->shutdown_last_sent_to);
855 		chunk->transport = t;
856 	}
857 	asoc->shutdown_last_sent_to = t;
858 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
859 }
860 
861 /* Helper function to change the state of an association. */
862 static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
863 			       struct sctp_association *asoc,
864 			       enum sctp_state state)
865 {
866 	struct sock *sk = asoc->base.sk;
867 
868 	asoc->state = state;
869 
870 	pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
871 
872 	if (sctp_style(sk, TCP)) {
873 		/* Change the sk->sk_state of a TCP-style socket that has
874 		 * successfully completed a connect() call.
875 		 */
876 		if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
877 			inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
878 
879 		/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
880 		if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
881 		    sctp_sstate(sk, ESTABLISHED)) {
882 			inet_sk_set_state(sk, SCTP_SS_CLOSING);
883 			sk->sk_shutdown |= RCV_SHUTDOWN;
884 		}
885 	}
886 
887 	if (sctp_state(asoc, COOKIE_WAIT)) {
888 		/* Reset init timeouts since they may have been
889 		 * increased due to timer expirations.
890 		 */
891 		asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
892 						asoc->rto_initial;
893 		asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
894 						asoc->rto_initial;
895 	}
896 
897 	if (sctp_state(asoc, ESTABLISHED)) {
898 		kfree(asoc->peer.cookie);
899 		asoc->peer.cookie = NULL;
900 	}
901 
902 	if (sctp_state(asoc, ESTABLISHED) ||
903 	    sctp_state(asoc, CLOSED) ||
904 	    sctp_state(asoc, SHUTDOWN_RECEIVED)) {
905 		/* Wake up any processes waiting in the asoc's wait queue in
906 		 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
907 		 */
908 		if (waitqueue_active(&asoc->wait))
909 			wake_up_interruptible(&asoc->wait);
910 
911 		/* Wake up any processes waiting in the sk's sleep queue of
912 		 * a TCP-style or UDP-style peeled-off socket in
913 		 * sctp_wait_for_accept() or sctp_wait_for_packet().
914 		 * For a UDP-style socket, the waiters are woken up by the
915 		 * notifications.
916 		 */
917 		if (!sctp_style(sk, UDP))
918 			sk->sk_state_change(sk);
919 	}
920 
921 	if (sctp_state(asoc, SHUTDOWN_PENDING) &&
922 	    !sctp_outq_is_empty(&asoc->outqueue))
923 		sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC);
924 }
925 
926 /* Helper function to delete an association. */
927 static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds,
928 				struct sctp_association *asoc)
929 {
930 	struct sock *sk = asoc->base.sk;
931 
932 	/* If it is a non-temporary association belonging to a TCP-style
933 	 * listening socket that is not closed, do not free it so that accept()
934 	 * can pick it up later.
935 	 */
936 	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
937 	    (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
938 		return;
939 
940 	sctp_association_free(asoc);
941 }
942 
943 /*
944  * ADDIP Section 4.1 ASCONF Chunk Procedures
945  * A4) Start a T-4 RTO timer, using the RTO value of the selected
946  * destination address (we use active path instead of primary path just
947  * because primary path may be inactive.
948  */
949 static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds,
950 			      struct sctp_association *asoc,
951 			      struct sctp_chunk *chunk)
952 {
953 	struct sctp_transport *t;
954 
955 	t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
956 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
957 	chunk->transport = t;
958 }
959 
960 /* Process an incoming Operation Error Chunk. */
961 static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
962 				   struct sctp_association *asoc,
963 				   struct sctp_chunk *chunk)
964 {
965 	struct sctp_errhdr *err_hdr;
966 	struct sctp_ulpevent *ev;
967 
968 	while (chunk->chunk_end > chunk->skb->data) {
969 		err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
970 
971 		ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
972 						     GFP_ATOMIC);
973 		if (!ev)
974 			return;
975 
976 		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
977 
978 		switch (err_hdr->cause) {
979 		case SCTP_ERROR_UNKNOWN_CHUNK:
980 		{
981 			struct sctp_chunkhdr *unk_chunk_hdr;
982 
983 			unk_chunk_hdr = (struct sctp_chunkhdr *)
984 							err_hdr->variable;
985 			switch (unk_chunk_hdr->type) {
986 			/* ADDIP 4.1 A9) If the peer responds to an ASCONF with
987 			 * an ERROR chunk reporting that it did not recognized
988 			 * the ASCONF chunk type, the sender of the ASCONF MUST
989 			 * NOT send any further ASCONF chunks and MUST stop its
990 			 * T-4 timer.
991 			 */
992 			case SCTP_CID_ASCONF:
993 				if (asoc->peer.asconf_capable == 0)
994 					break;
995 
996 				asoc->peer.asconf_capable = 0;
997 				sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
998 					SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
999 				break;
1000 			default:
1001 				break;
1002 			}
1003 			break;
1004 		}
1005 		default:
1006 			break;
1007 		}
1008 	}
1009 }
1010 
1011 /* Helper function to remove the association non-primary peer
1012  * transports.
1013  */
1014 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
1015 {
1016 	struct sctp_transport *t;
1017 	struct list_head *temp;
1018 	struct list_head *pos;
1019 
1020 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1021 		t = list_entry(pos, struct sctp_transport, transports);
1022 		if (!sctp_cmp_addr_exact(&t->ipaddr,
1023 					 &asoc->peer.primary_addr)) {
1024 			sctp_assoc_rm_peer(asoc, t);
1025 		}
1026 	}
1027 }
1028 
1029 /* Helper function to set sk_err on a 1-1 style socket. */
1030 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
1031 {
1032 	struct sock *sk = asoc->base.sk;
1033 
1034 	if (!sctp_style(sk, UDP))
1035 		sk->sk_err = error;
1036 }
1037 
1038 /* Helper function to generate an association change event */
1039 static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
1040 				  struct sctp_association *asoc,
1041 				  u8 state)
1042 {
1043 	struct sctp_ulpevent *ev;
1044 
1045 	ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
1046 					    asoc->c.sinit_num_ostreams,
1047 					    asoc->c.sinit_max_instreams,
1048 					    NULL, GFP_ATOMIC);
1049 	if (ev)
1050 		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1051 }
1052 
1053 static void sctp_cmd_peer_no_auth(struct sctp_cmd_seq *commands,
1054 				  struct sctp_association *asoc)
1055 {
1056 	struct sctp_ulpevent *ev;
1057 
1058 	ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH, GFP_ATOMIC);
1059 	if (ev)
1060 		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1061 }
1062 
1063 /* Helper function to generate an adaptation indication event */
1064 static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
1065 				    struct sctp_association *asoc)
1066 {
1067 	struct sctp_ulpevent *ev;
1068 
1069 	ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
1070 
1071 	if (ev)
1072 		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1073 }
1074 
1075 
1076 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
1077 				     enum sctp_event_timeout timer,
1078 				     char *name)
1079 {
1080 	struct sctp_transport *t;
1081 
1082 	t = asoc->init_last_sent_to;
1083 	asoc->init_err_counter++;
1084 
1085 	if (t->init_sent_count > (asoc->init_cycle + 1)) {
1086 		asoc->timeouts[timer] *= 2;
1087 		if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1088 			asoc->timeouts[timer] = asoc->max_init_timeo;
1089 		}
1090 		asoc->init_cycle++;
1091 
1092 		pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1093 			 " cycle:%d timeout:%ld\n", __func__, name,
1094 			 asoc->init_err_counter, asoc->init_cycle,
1095 			 asoc->timeouts[timer]);
1096 	}
1097 
1098 }
1099 
1100 /* Send the whole message, chunk by chunk, to the outqueue.
1101  * This way the whole message is queued up and bundling if
1102  * encouraged for small fragments.
1103  */
1104 static void sctp_cmd_send_msg(struct sctp_association *asoc,
1105 			      struct sctp_datamsg *msg, gfp_t gfp)
1106 {
1107 	struct sctp_chunk *chunk;
1108 
1109 	list_for_each_entry(chunk, &msg->chunks, frag_list)
1110 		sctp_outq_tail(&asoc->outqueue, chunk, gfp);
1111 
1112 	asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
1113 }
1114 
1115 
1116 /* These three macros allow us to pull the debugging code out of the
1117  * main flow of sctp_do_sm() to keep attention focused on the real
1118  * functionality there.
1119  */
1120 #define debug_pre_sfn() \
1121 	pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1122 		 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype),   \
1123 		 asoc, sctp_state_tbl[state], state_fn->name)
1124 
1125 #define debug_post_sfn() \
1126 	pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1127 		 sctp_status_tbl[status])
1128 
1129 #define debug_post_sfx() \
1130 	pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1131 		 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1132 		 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1133 
1134 /*
1135  * This is the master state machine processing function.
1136  *
1137  * If you want to understand all of lksctp, this is a
1138  * good place to start.
1139  */
1140 int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
1141 	       union sctp_subtype subtype, enum sctp_state state,
1142 	       struct sctp_endpoint *ep, struct sctp_association *asoc,
1143 	       void *event_arg, gfp_t gfp)
1144 {
1145 	typedef const char *(printfn_t)(union sctp_subtype);
1146 	static printfn_t *table[] = {
1147 		NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
1148 	};
1149 	printfn_t *debug_fn  __attribute__ ((unused)) = table[event_type];
1150 	const struct sctp_sm_table_entry *state_fn;
1151 	struct sctp_cmd_seq commands;
1152 	enum sctp_disposition status;
1153 	int error = 0;
1154 
1155 	/* Look up the state function, run it, and then process the
1156 	 * side effects.  These three steps are the heart of lksctp.
1157 	 */
1158 	state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1159 
1160 	sctp_init_cmd_seq(&commands);
1161 
1162 	debug_pre_sfn();
1163 	status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
1164 	debug_post_sfn();
1165 
1166 	error = sctp_side_effects(event_type, subtype, state,
1167 				  ep, &asoc, event_arg, status,
1168 				  &commands, gfp);
1169 	debug_post_sfx();
1170 
1171 	return error;
1172 }
1173 
1174 /*****************************************************************
1175  * This the master state function side effect processing function.
1176  *****************************************************************/
1177 static int sctp_side_effects(enum sctp_event_type event_type,
1178 			     union sctp_subtype subtype,
1179 			     enum sctp_state state,
1180 			     struct sctp_endpoint *ep,
1181 			     struct sctp_association **asoc,
1182 			     void *event_arg,
1183 			     enum sctp_disposition status,
1184 			     struct sctp_cmd_seq *commands,
1185 			     gfp_t gfp)
1186 {
1187 	int error;
1188 
1189 	/* FIXME - Most of the dispositions left today would be categorized
1190 	 * as "exceptional" dispositions.  For those dispositions, it
1191 	 * may not be proper to run through any of the commands at all.
1192 	 * For example, the command interpreter might be run only with
1193 	 * disposition SCTP_DISPOSITION_CONSUME.
1194 	 */
1195 	if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1196 					       ep, *asoc,
1197 					       event_arg, status,
1198 					       commands, gfp)))
1199 		goto bail;
1200 
1201 	switch (status) {
1202 	case SCTP_DISPOSITION_DISCARD:
1203 		pr_debug("%s: ignored sctp protocol event - state:%d, "
1204 			 "event_type:%d, event_id:%d\n", __func__, state,
1205 			 event_type, subtype.chunk);
1206 		break;
1207 
1208 	case SCTP_DISPOSITION_NOMEM:
1209 		/* We ran out of memory, so we need to discard this
1210 		 * packet.
1211 		 */
1212 		/* BUG--we should now recover some memory, probably by
1213 		 * reneging...
1214 		 */
1215 		error = -ENOMEM;
1216 		break;
1217 
1218 	case SCTP_DISPOSITION_DELETE_TCB:
1219 	case SCTP_DISPOSITION_ABORT:
1220 		/* This should now be a command. */
1221 		*asoc = NULL;
1222 		break;
1223 
1224 	case SCTP_DISPOSITION_CONSUME:
1225 		/*
1226 		 * We should no longer have much work to do here as the
1227 		 * real work has been done as explicit commands above.
1228 		 */
1229 		break;
1230 
1231 	case SCTP_DISPOSITION_VIOLATION:
1232 		net_err_ratelimited("protocol violation state %d chunkid %d\n",
1233 				    state, subtype.chunk);
1234 		break;
1235 
1236 	case SCTP_DISPOSITION_NOT_IMPL:
1237 		pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1238 			state, event_type, subtype.chunk);
1239 		break;
1240 
1241 	case SCTP_DISPOSITION_BUG:
1242 		pr_err("bug in state %d, event_type %d, event_id %d\n",
1243 		       state, event_type, subtype.chunk);
1244 		BUG();
1245 		break;
1246 
1247 	default:
1248 		pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1249 		       status, state, event_type, subtype.chunk);
1250 		BUG();
1251 		break;
1252 	}
1253 
1254 bail:
1255 	return error;
1256 }
1257 
1258 /********************************************************************
1259  * 2nd Level Abstractions
1260  ********************************************************************/
1261 
1262 /* This is the side-effect interpreter.  */
1263 static int sctp_cmd_interpreter(enum sctp_event_type event_type,
1264 				union sctp_subtype subtype,
1265 				enum sctp_state state,
1266 				struct sctp_endpoint *ep,
1267 				struct sctp_association *asoc,
1268 				void *event_arg,
1269 				enum sctp_disposition status,
1270 				struct sctp_cmd_seq *commands,
1271 				gfp_t gfp)
1272 {
1273 	struct sctp_sock *sp = sctp_sk(ep->base.sk);
1274 	struct sctp_chunk *chunk = NULL, *new_obj;
1275 	struct sctp_packet *packet;
1276 	struct sctp_sackhdr sackh;
1277 	struct timer_list *timer;
1278 	struct sctp_transport *t;
1279 	unsigned long timeout;
1280 	struct sctp_cmd *cmd;
1281 	int local_cork = 0;
1282 	int error = 0;
1283 	int force;
1284 
1285 	if (SCTP_EVENT_T_TIMEOUT != event_type)
1286 		chunk = event_arg;
1287 
1288 	/* Note:  This whole file is a huge candidate for rework.
1289 	 * For example, each command could either have its own handler, so
1290 	 * the loop would look like:
1291 	 *     while (cmds)
1292 	 *         cmd->handle(x, y, z)
1293 	 * --jgrimm
1294 	 */
1295 	while (NULL != (cmd = sctp_next_cmd(commands))) {
1296 		switch (cmd->verb) {
1297 		case SCTP_CMD_NOP:
1298 			/* Do nothing. */
1299 			break;
1300 
1301 		case SCTP_CMD_NEW_ASOC:
1302 			/* Register a new association.  */
1303 			if (local_cork) {
1304 				sctp_outq_uncork(&asoc->outqueue, gfp);
1305 				local_cork = 0;
1306 			}
1307 
1308 			/* Register with the endpoint.  */
1309 			asoc = cmd->obj.asoc;
1310 			BUG_ON(asoc->peer.primary_path == NULL);
1311 			sctp_endpoint_add_asoc(ep, asoc);
1312 			break;
1313 
1314 		case SCTP_CMD_PURGE_OUTQUEUE:
1315 		       sctp_outq_teardown(&asoc->outqueue);
1316 		       break;
1317 
1318 		case SCTP_CMD_DELETE_TCB:
1319 			if (local_cork) {
1320 				sctp_outq_uncork(&asoc->outqueue, gfp);
1321 				local_cork = 0;
1322 			}
1323 			/* Delete the current association.  */
1324 			sctp_cmd_delete_tcb(commands, asoc);
1325 			asoc = NULL;
1326 			break;
1327 
1328 		case SCTP_CMD_NEW_STATE:
1329 			/* Enter a new state.  */
1330 			sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1331 			break;
1332 
1333 		case SCTP_CMD_REPORT_TSN:
1334 			/* Record the arrival of a TSN.  */
1335 			error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1336 						 cmd->obj.u32, NULL);
1337 			break;
1338 
1339 		case SCTP_CMD_REPORT_FWDTSN:
1340 			asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
1341 			break;
1342 
1343 		case SCTP_CMD_PROCESS_FWDTSN:
1344 			asoc->stream.si->handle_ftsn(&asoc->ulpq,
1345 						     cmd->obj.chunk);
1346 			break;
1347 
1348 		case SCTP_CMD_GEN_SACK:
1349 			/* Generate a Selective ACK.
1350 			 * The argument tells us whether to just count
1351 			 * the packet and MAYBE generate a SACK, or
1352 			 * force a SACK out.
1353 			 */
1354 			force = cmd->obj.i32;
1355 			error = sctp_gen_sack(asoc, force, commands);
1356 			break;
1357 
1358 		case SCTP_CMD_PROCESS_SACK:
1359 			/* Process an inbound SACK.  */
1360 			error = sctp_cmd_process_sack(commands, asoc,
1361 						      cmd->obj.chunk);
1362 			break;
1363 
1364 		case SCTP_CMD_GEN_INIT_ACK:
1365 			/* Generate an INIT ACK chunk.  */
1366 			new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1367 						     0);
1368 			if (!new_obj) {
1369 				error = -ENOMEM;
1370 				break;
1371 			}
1372 
1373 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1374 					SCTP_CHUNK(new_obj));
1375 			break;
1376 
1377 		case SCTP_CMD_PEER_INIT:
1378 			/* Process a unified INIT from the peer.
1379 			 * Note: Only used during INIT-ACK processing.  If
1380 			 * there is an error just return to the outter
1381 			 * layer which will bail.
1382 			 */
1383 			error = sctp_cmd_process_init(commands, asoc, chunk,
1384 						      cmd->obj.init, gfp);
1385 			break;
1386 
1387 		case SCTP_CMD_GEN_COOKIE_ECHO:
1388 			/* Generate a COOKIE ECHO chunk.  */
1389 			new_obj = sctp_make_cookie_echo(asoc, chunk);
1390 			if (!new_obj) {
1391 				if (cmd->obj.chunk)
1392 					sctp_chunk_free(cmd->obj.chunk);
1393 				error = -ENOMEM;
1394 				break;
1395 			}
1396 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1397 					SCTP_CHUNK(new_obj));
1398 
1399 			/* If there is an ERROR chunk to be sent along with
1400 			 * the COOKIE_ECHO, send it, too.
1401 			 */
1402 			if (cmd->obj.chunk)
1403 				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1404 						SCTP_CHUNK(cmd->obj.chunk));
1405 
1406 			if (new_obj->transport) {
1407 				new_obj->transport->init_sent_count++;
1408 				asoc->init_last_sent_to = new_obj->transport;
1409 			}
1410 
1411 			/* FIXME - Eventually come up with a cleaner way to
1412 			 * enabling COOKIE-ECHO + DATA bundling during
1413 			 * multihoming stale cookie scenarios, the following
1414 			 * command plays with asoc->peer.retran_path to
1415 			 * avoid the problem of sending the COOKIE-ECHO and
1416 			 * DATA in different paths, which could result
1417 			 * in the association being ABORTed if the DATA chunk
1418 			 * is processed first by the server.  Checking the
1419 			 * init error counter simply causes this command
1420 			 * to be executed only during failed attempts of
1421 			 * association establishment.
1422 			 */
1423 			if ((asoc->peer.retran_path !=
1424 			     asoc->peer.primary_path) &&
1425 			    (asoc->init_err_counter > 0)) {
1426 				sctp_add_cmd_sf(commands,
1427 						SCTP_CMD_FORCE_PRIM_RETRAN,
1428 						SCTP_NULL());
1429 			}
1430 
1431 			break;
1432 
1433 		case SCTP_CMD_GEN_SHUTDOWN:
1434 			/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1435 			 * Reset error counts.
1436 			 */
1437 			asoc->overall_error_count = 0;
1438 
1439 			/* Generate a SHUTDOWN chunk.  */
1440 			new_obj = sctp_make_shutdown(asoc, chunk);
1441 			if (!new_obj) {
1442 				error = -ENOMEM;
1443 				break;
1444 			}
1445 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1446 					SCTP_CHUNK(new_obj));
1447 			break;
1448 
1449 		case SCTP_CMD_CHUNK_ULP:
1450 			/* Send a chunk to the sockets layer.  */
1451 			pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1452 				 __func__, cmd->obj.chunk, &asoc->ulpq);
1453 
1454 			asoc->stream.si->ulpevent_data(&asoc->ulpq,
1455 						       cmd->obj.chunk,
1456 						       GFP_ATOMIC);
1457 			break;
1458 
1459 		case SCTP_CMD_EVENT_ULP:
1460 			/* Send a notification to the sockets layer.  */
1461 			pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1462 				 __func__, cmd->obj.ulpevent, &asoc->ulpq);
1463 
1464 			asoc->stream.si->enqueue_event(&asoc->ulpq,
1465 						       cmd->obj.ulpevent);
1466 			break;
1467 
1468 		case SCTP_CMD_REPLY:
1469 			/* If an caller has not already corked, do cork. */
1470 			if (!asoc->outqueue.cork) {
1471 				sctp_outq_cork(&asoc->outqueue);
1472 				local_cork = 1;
1473 			}
1474 			/* Send a chunk to our peer.  */
1475 			sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
1476 			break;
1477 
1478 		case SCTP_CMD_SEND_PKT:
1479 			/* Send a full packet to our peer.  */
1480 			packet = cmd->obj.packet;
1481 			sctp_packet_transmit(packet, gfp);
1482 			sctp_ootb_pkt_free(packet);
1483 			break;
1484 
1485 		case SCTP_CMD_T1_RETRAN:
1486 			/* Mark a transport for retransmission.  */
1487 			sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1488 					SCTP_RTXR_T1_RTX);
1489 			break;
1490 
1491 		case SCTP_CMD_RETRAN:
1492 			/* Mark a transport for retransmission.  */
1493 			sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1494 					SCTP_RTXR_T3_RTX);
1495 			break;
1496 
1497 		case SCTP_CMD_ECN_CE:
1498 			/* Do delayed CE processing.   */
1499 			sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1500 			break;
1501 
1502 		case SCTP_CMD_ECN_ECNE:
1503 			/* Do delayed ECNE processing. */
1504 			new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1505 							chunk);
1506 			if (new_obj)
1507 				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1508 						SCTP_CHUNK(new_obj));
1509 			break;
1510 
1511 		case SCTP_CMD_ECN_CWR:
1512 			/* Do delayed CWR processing.  */
1513 			sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1514 			break;
1515 
1516 		case SCTP_CMD_SETUP_T2:
1517 			sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1518 			break;
1519 
1520 		case SCTP_CMD_TIMER_START_ONCE:
1521 			timer = &asoc->timers[cmd->obj.to];
1522 
1523 			if (timer_pending(timer))
1524 				break;
1525 			fallthrough;
1526 
1527 		case SCTP_CMD_TIMER_START:
1528 			timer = &asoc->timers[cmd->obj.to];
1529 			timeout = asoc->timeouts[cmd->obj.to];
1530 			BUG_ON(!timeout);
1531 
1532 			/*
1533 			 * SCTP has a hard time with timer starts.  Because we process
1534 			 * timer starts as side effects, it can be hard to tell if we
1535 			 * have already started a timer or not, which leads to BUG
1536 			 * halts when we call add_timer. So here, instead of just starting
1537 			 * a timer, if the timer is already started, and just mod
1538 			 * the timer with the shorter of the two expiration times
1539 			 */
1540 			if (!timer_pending(timer))
1541 				sctp_association_hold(asoc);
1542 			timer_reduce(timer, jiffies + timeout);
1543 			break;
1544 
1545 		case SCTP_CMD_TIMER_RESTART:
1546 			timer = &asoc->timers[cmd->obj.to];
1547 			timeout = asoc->timeouts[cmd->obj.to];
1548 			if (!mod_timer(timer, jiffies + timeout))
1549 				sctp_association_hold(asoc);
1550 			break;
1551 
1552 		case SCTP_CMD_TIMER_STOP:
1553 			timer = &asoc->timers[cmd->obj.to];
1554 			if (del_timer(timer))
1555 				sctp_association_put(asoc);
1556 			break;
1557 
1558 		case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1559 			chunk = cmd->obj.chunk;
1560 			t = sctp_assoc_choose_alter_transport(asoc,
1561 						asoc->init_last_sent_to);
1562 			asoc->init_last_sent_to = t;
1563 			chunk->transport = t;
1564 			t->init_sent_count++;
1565 			/* Set the new transport as primary */
1566 			sctp_assoc_set_primary(asoc, t);
1567 			break;
1568 
1569 		case SCTP_CMD_INIT_RESTART:
1570 			/* Do the needed accounting and updates
1571 			 * associated with restarting an initialization
1572 			 * timer. Only multiply the timeout by two if
1573 			 * all transports have been tried at the current
1574 			 * timeout.
1575 			 */
1576 			sctp_cmd_t1_timer_update(asoc,
1577 						SCTP_EVENT_TIMEOUT_T1_INIT,
1578 						"INIT");
1579 
1580 			sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1581 					SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1582 			break;
1583 
1584 		case SCTP_CMD_COOKIEECHO_RESTART:
1585 			/* Do the needed accounting and updates
1586 			 * associated with restarting an initialization
1587 			 * timer. Only multiply the timeout by two if
1588 			 * all transports have been tried at the current
1589 			 * timeout.
1590 			 */
1591 			sctp_cmd_t1_timer_update(asoc,
1592 						SCTP_EVENT_TIMEOUT_T1_COOKIE,
1593 						"COOKIE");
1594 
1595 			/* If we've sent any data bundled with
1596 			 * COOKIE-ECHO we need to resend.
1597 			 */
1598 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
1599 					transports) {
1600 				sctp_retransmit_mark(&asoc->outqueue, t,
1601 					    SCTP_RTXR_T1_RTX);
1602 			}
1603 
1604 			sctp_add_cmd_sf(commands,
1605 					SCTP_CMD_TIMER_RESTART,
1606 					SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1607 			break;
1608 
1609 		case SCTP_CMD_INIT_FAILED:
1610 			sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
1611 			break;
1612 
1613 		case SCTP_CMD_ASSOC_FAILED:
1614 			sctp_cmd_assoc_failed(commands, asoc, event_type,
1615 					      subtype, chunk, cmd->obj.u16);
1616 			break;
1617 
1618 		case SCTP_CMD_INIT_COUNTER_INC:
1619 			asoc->init_err_counter++;
1620 			break;
1621 
1622 		case SCTP_CMD_INIT_COUNTER_RESET:
1623 			asoc->init_err_counter = 0;
1624 			asoc->init_cycle = 0;
1625 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
1626 					    transports) {
1627 				t->init_sent_count = 0;
1628 			}
1629 			break;
1630 
1631 		case SCTP_CMD_REPORT_DUP:
1632 			sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1633 					     cmd->obj.u32);
1634 			break;
1635 
1636 		case SCTP_CMD_REPORT_BAD_TAG:
1637 			pr_debug("%s: vtag mismatch!\n", __func__);
1638 			break;
1639 
1640 		case SCTP_CMD_STRIKE:
1641 			/* Mark one strike against a transport.  */
1642 			sctp_do_8_2_transport_strike(commands, asoc,
1643 						    cmd->obj.transport, 0);
1644 			break;
1645 
1646 		case SCTP_CMD_TRANSPORT_IDLE:
1647 			t = cmd->obj.transport;
1648 			sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
1649 			break;
1650 
1651 		case SCTP_CMD_TRANSPORT_HB_SENT:
1652 			t = cmd->obj.transport;
1653 			sctp_do_8_2_transport_strike(commands, asoc,
1654 						     t, 1);
1655 			t->hb_sent = 1;
1656 			break;
1657 
1658 		case SCTP_CMD_TRANSPORT_ON:
1659 			t = cmd->obj.transport;
1660 			sctp_cmd_transport_on(commands, asoc, t, chunk);
1661 			break;
1662 
1663 		case SCTP_CMD_HB_TIMERS_START:
1664 			sctp_cmd_hb_timers_start(commands, asoc);
1665 			break;
1666 
1667 		case SCTP_CMD_HB_TIMER_UPDATE:
1668 			t = cmd->obj.transport;
1669 			sctp_transport_reset_hb_timer(t);
1670 			break;
1671 
1672 		case SCTP_CMD_HB_TIMERS_STOP:
1673 			sctp_cmd_hb_timers_stop(commands, asoc);
1674 			break;
1675 
1676 		case SCTP_CMD_PROBE_TIMER_UPDATE:
1677 			t = cmd->obj.transport;
1678 			sctp_transport_reset_probe_timer(t);
1679 			break;
1680 
1681 		case SCTP_CMD_REPORT_ERROR:
1682 			error = cmd->obj.error;
1683 			break;
1684 
1685 		case SCTP_CMD_PROCESS_CTSN:
1686 			/* Dummy up a SACK for processing. */
1687 			sackh.cum_tsn_ack = cmd->obj.be32;
1688 			sackh.a_rwnd = htonl(asoc->peer.rwnd +
1689 					     asoc->outqueue.outstanding_bytes);
1690 			sackh.num_gap_ack_blocks = 0;
1691 			sackh.num_dup_tsns = 0;
1692 			chunk->subh.sack_hdr = &sackh;
1693 			sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1694 					SCTP_CHUNK(chunk));
1695 			break;
1696 
1697 		case SCTP_CMD_DISCARD_PACKET:
1698 			/* We need to discard the whole packet.
1699 			 * Uncork the queue since there might be
1700 			 * responses pending
1701 			 */
1702 			chunk->pdiscard = 1;
1703 			if (asoc) {
1704 				sctp_outq_uncork(&asoc->outqueue, gfp);
1705 				local_cork = 0;
1706 			}
1707 			break;
1708 
1709 		case SCTP_CMD_RTO_PENDING:
1710 			t = cmd->obj.transport;
1711 			t->rto_pending = 1;
1712 			break;
1713 
1714 		case SCTP_CMD_PART_DELIVER:
1715 			asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
1716 			break;
1717 
1718 		case SCTP_CMD_RENEGE:
1719 			asoc->stream.si->renege_events(&asoc->ulpq,
1720 						       cmd->obj.chunk,
1721 						       GFP_ATOMIC);
1722 			break;
1723 
1724 		case SCTP_CMD_SETUP_T4:
1725 			sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1726 			break;
1727 
1728 		case SCTP_CMD_PROCESS_OPERR:
1729 			sctp_cmd_process_operr(commands, asoc, chunk);
1730 			break;
1731 		case SCTP_CMD_CLEAR_INIT_TAG:
1732 			asoc->peer.i.init_tag = 0;
1733 			break;
1734 		case SCTP_CMD_DEL_NON_PRIMARY:
1735 			sctp_cmd_del_non_primary(asoc);
1736 			break;
1737 		case SCTP_CMD_T3_RTX_TIMERS_STOP:
1738 			sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1739 			break;
1740 		case SCTP_CMD_FORCE_PRIM_RETRAN:
1741 			t = asoc->peer.retran_path;
1742 			asoc->peer.retran_path = asoc->peer.primary_path;
1743 			sctp_outq_uncork(&asoc->outqueue, gfp);
1744 			local_cork = 0;
1745 			asoc->peer.retran_path = t;
1746 			break;
1747 		case SCTP_CMD_SET_SK_ERR:
1748 			sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1749 			break;
1750 		case SCTP_CMD_ASSOC_CHANGE:
1751 			sctp_cmd_assoc_change(commands, asoc,
1752 					      cmd->obj.u8);
1753 			break;
1754 		case SCTP_CMD_ADAPTATION_IND:
1755 			sctp_cmd_adaptation_ind(commands, asoc);
1756 			break;
1757 		case SCTP_CMD_PEER_NO_AUTH:
1758 			sctp_cmd_peer_no_auth(commands, asoc);
1759 			break;
1760 
1761 		case SCTP_CMD_ASSOC_SHKEY:
1762 			error = sctp_auth_asoc_init_active_key(asoc,
1763 						GFP_ATOMIC);
1764 			break;
1765 		case SCTP_CMD_UPDATE_INITTAG:
1766 			asoc->peer.i.init_tag = cmd->obj.u32;
1767 			break;
1768 		case SCTP_CMD_SEND_MSG:
1769 			if (!asoc->outqueue.cork) {
1770 				sctp_outq_cork(&asoc->outqueue);
1771 				local_cork = 1;
1772 			}
1773 			sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
1774 			break;
1775 		case SCTP_CMD_PURGE_ASCONF_QUEUE:
1776 			sctp_asconf_queue_teardown(asoc);
1777 			break;
1778 
1779 		case SCTP_CMD_SET_ASOC:
1780 			if (asoc && local_cork) {
1781 				sctp_outq_uncork(&asoc->outqueue, gfp);
1782 				local_cork = 0;
1783 			}
1784 			asoc = cmd->obj.asoc;
1785 			break;
1786 
1787 		default:
1788 			pr_warn("Impossible command: %u\n",
1789 				cmd->verb);
1790 			break;
1791 		}
1792 
1793 		if (error) {
1794 			cmd = sctp_next_cmd(commands);
1795 			while (cmd) {
1796 				if (cmd->verb == SCTP_CMD_REPLY)
1797 					sctp_chunk_free(cmd->obj.chunk);
1798 				cmd = sctp_next_cmd(commands);
1799 			}
1800 			break;
1801 		}
1802 	}
1803 
1804 	/* If this is in response to a received chunk, wait until
1805 	 * we are done with the packet to open the queue so that we don't
1806 	 * send multiple packets in response to a single request.
1807 	 */
1808 	if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1809 		if (chunk->end_of_packet || chunk->singleton)
1810 			sctp_outq_uncork(&asoc->outqueue, gfp);
1811 	} else if (local_cork)
1812 		sctp_outq_uncork(&asoc->outqueue, gfp);
1813 
1814 	if (sp->data_ready_signalled)
1815 		sp->data_ready_signalled = 0;
1816 
1817 	return error;
1818 }
1819