xref: /openbmc/linux/net/dccp/input.c (revision ed735ccbefaf7e5e3ef61418f7e209b8c59308a7)
1 /*
2  *  net/dccp/input.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *	This program is free software; you can redistribute it and/or
8  *	modify it under the terms of the GNU General Public License
9  *	as published by the Free Software Foundation; either version
10  *	2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
16 
17 #include <net/sock.h>
18 
19 #include "ccid.h"
20 #include "dccp.h"
21 
22 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
23 {
24 	sk->sk_shutdown |= RCV_SHUTDOWN;
25 	sock_set_flag(sk, SOCK_DONE);
26 	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
27 	__skb_queue_tail(&sk->sk_receive_queue, skb);
28 	skb_set_owner_r(skb, sk);
29 	sk->sk_data_ready(sk, 0);
30 }
31 
32 static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
33 {
34 	dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
35 	dccp_fin(sk, skb);
36 	dccp_set_state(sk, DCCP_CLOSED);
37 	sk_wake_async(sk, 1, POLL_HUP);
38 }
39 
40 static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
41 {
42 	/*
43 	 *   Step 7: Check for unexpected packet types
44 	 *      If (S.is_server and P.type == CloseReq)
45 	 *	  Send Sync packet acknowledging P.seqno
46 	 *	  Drop packet and return
47 	 */
48 	if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
49 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
50 		return;
51 	}
52 
53 	dccp_set_state(sk, DCCP_CLOSING);
54 	dccp_send_close(sk, 0);
55 }
56 
57 static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
58 {
59 	struct dccp_sock *dp = dccp_sk(sk);
60 
61 	if (dp->dccps_options.dccpo_send_ack_vector)
62 		dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk,
63 					     DCCP_SKB_CB(skb)->dccpd_ack_seq);
64 }
65 
66 static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
67 {
68 	const struct dccp_hdr *dh = dccp_hdr(skb);
69 	struct dccp_sock *dp = dccp_sk(sk);
70 	u64 lswl, lawl;
71 
72 	/*
73 	 *   Step 5: Prepare sequence numbers for Sync
74 	 *     If P.type == Sync or P.type == SyncAck,
75 	 *	  If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
76 	 *	     / * P is valid, so update sequence number variables
77 	 *		 accordingly.  After this update, P will pass the tests
78 	 *		 in Step 6.  A SyncAck is generated if necessary in
79 	 *		 Step 15 * /
80 	 *	     Update S.GSR, S.SWL, S.SWH
81 	 *	  Otherwise,
82 	 *	     Drop packet and return
83 	 */
84 	if (dh->dccph_type == DCCP_PKT_SYNC ||
85 	    dh->dccph_type == DCCP_PKT_SYNCACK) {
86 		if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
87 			      dp->dccps_awl, dp->dccps_awh) &&
88 		    !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
89 			dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
90 		else
91 			return -1;
92 	}
93 
94 	/*
95 	 *   Step 6: Check sequence numbers
96 	 *      Let LSWL = S.SWL and LAWL = S.AWL
97 	 *      If P.type == CloseReq or P.type == Close or P.type == Reset,
98 	 *	  LSWL := S.GSR + 1, LAWL := S.GAR
99 	 *      If LSWL <= P.seqno <= S.SWH
100 	 *	     and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
101 	 *	  Update S.GSR, S.SWL, S.SWH
102 	 *	  If P.type != Sync,
103 	 *	     Update S.GAR
104 	 *      Otherwise,
105 	 *	  Send Sync packet acknowledging P.seqno
106 	 *	  Drop packet and return
107 	 */
108 	lswl = dp->dccps_swl;
109 	lawl = dp->dccps_awl;
110 
111 	if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
112 	    dh->dccph_type == DCCP_PKT_CLOSE ||
113 	    dh->dccph_type == DCCP_PKT_RESET) {
114 		lswl = dp->dccps_gsr;
115 		dccp_inc_seqno(&lswl);
116 		lawl = dp->dccps_gar;
117 	}
118 
119 	if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) &&
120 	    (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ ||
121 	     between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
122 		       lawl, dp->dccps_awh))) {
123 		dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
124 
125 		if (dh->dccph_type != DCCP_PKT_SYNC &&
126 		    (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
127 		     DCCP_PKT_WITHOUT_ACK_SEQ))
128 			dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
129 	} else {
130 		LIMIT_NETDEBUG(KERN_WARNING "DCCP: Step 6 failed for %s packet, "
131 					    "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
132 					    "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
133 					    "sending SYNC...\n",
134 			       dccp_packet_name(dh->dccph_type),
135 			       (unsigned long long) lswl,
136 			       (unsigned long long)
137 			       DCCP_SKB_CB(skb)->dccpd_seq,
138 			       (unsigned long long) dp->dccps_swh,
139 			       (DCCP_SKB_CB(skb)->dccpd_ack_seq ==
140 			        DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
141 			       (unsigned long long) lawl,
142 			       (unsigned long long)
143 			       DCCP_SKB_CB(skb)->dccpd_ack_seq,
144 			       (unsigned long long) dp->dccps_awh);
145 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
146 		return -1;
147 	}
148 
149 	return 0;
150 }
151 
152 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
153 			 const struct dccp_hdr *dh, const unsigned len)
154 {
155 	struct dccp_sock *dp = dccp_sk(sk);
156 
157 	if (dccp_check_seqno(sk, skb))
158 		goto discard;
159 
160 	if (dccp_parse_options(sk, skb))
161 		goto discard;
162 
163 	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
164 		dccp_event_ack_recv(sk, skb);
165 
166 	/*
167 	 * FIXME: check ECN to see if we should use
168 	 * DCCP_ACKPKTS_STATE_ECN_MARKED
169 	 */
170 	if (dp->dccps_options.dccpo_send_ack_vector) {
171 		struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
172 
173 		if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
174 				     DCCP_SKB_CB(skb)->dccpd_seq,
175 				     DCCP_ACKPKTS_STATE_RECEIVED)) {
176 			LIMIT_NETDEBUG(KERN_WARNING "DCCP: acknowledgeable "
177 						    "packets buffer full!\n");
178 			ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
179 			inet_csk_schedule_ack(sk);
180 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
181 						  TCP_DELACK_MIN,
182 						  DCCP_RTO_MAX);
183 			goto discard;
184 		}
185 
186 		/*
187 		 * FIXME: this activation is probably wrong, have to study more
188 		 * TCP delack machinery and how it fits into DCCP draft, but
189 		 * for now it kinda "works" 8)
190 		 */
191 		if (!inet_csk_ack_scheduled(sk)) {
192 			inet_csk_schedule_ack(sk);
193 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ,
194 						  DCCP_RTO_MAX);
195 		}
196 	}
197 
198 	ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
199 	ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
200 
201 	switch (dccp_hdr(skb)->dccph_type) {
202 	case DCCP_PKT_DATAACK:
203 	case DCCP_PKT_DATA:
204 		/*
205 		 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
206 		 * option if it is.
207 		 */
208 		__skb_pull(skb, dh->dccph_doff * 4);
209 		__skb_queue_tail(&sk->sk_receive_queue, skb);
210 		skb_set_owner_r(skb, sk);
211 		sk->sk_data_ready(sk, 0);
212 		return 0;
213 	case DCCP_PKT_ACK:
214 		goto discard;
215 	case DCCP_PKT_RESET:
216 		/*
217 		 *  Step 9: Process Reset
218 		 *	If P.type == Reset,
219 		 *		Tear down connection
220 		 *		S.state := TIMEWAIT
221 		 *		Set TIMEWAIT timer
222 		 *		Drop packet and return
223 		*/
224 		dccp_fin(sk, skb);
225 		dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
226 		return 0;
227 	case DCCP_PKT_CLOSEREQ:
228 		dccp_rcv_closereq(sk, skb);
229 		goto discard;
230 	case DCCP_PKT_CLOSE:
231 		dccp_rcv_close(sk, skb);
232 		return 0;
233 	case DCCP_PKT_REQUEST:
234 		/* Step 7
235             	 *   or (S.is_server and P.type == Response)
236 		 *   or (S.is_client and P.type == Request)
237 		 *   or (S.state >= OPEN and P.type == Request
238 		 *	and P.seqno >= S.OSR)
239 		 *    or (S.state >= OPEN and P.type == Response
240 		 *	and P.seqno >= S.OSR)
241 		 *    or (S.state == RESPOND and P.type == Data),
242 		 *  Send Sync packet acknowledging P.seqno
243 		 *  Drop packet and return
244 		 */
245 		if (dp->dccps_role != DCCP_ROLE_LISTEN)
246 			goto send_sync;
247 		goto check_seq;
248 	case DCCP_PKT_RESPONSE:
249 		if (dp->dccps_role != DCCP_ROLE_CLIENT)
250 			goto send_sync;
251 check_seq:
252 		if (!before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_osr)) {
253 send_sync:
254 			dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
255 				       DCCP_PKT_SYNC);
256 		}
257 		break;
258 	case DCCP_PKT_SYNC:
259 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
260 			       DCCP_PKT_SYNCACK);
261 		/*
262 		 * From the draft:
263 		 *
264 		 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
265 		 * MAY have non-zero-length application data areas, whose
266 		 * contents * receivers MUST ignore.
267 		 */
268 		goto discard;
269 	}
270 
271 	DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
272 discard:
273 	__kfree_skb(skb);
274 	return 0;
275 }
276 
277 static int dccp_rcv_request_sent_state_process(struct sock *sk,
278 					       struct sk_buff *skb,
279 					       const struct dccp_hdr *dh,
280 					       const unsigned len)
281 {
282 	/*
283 	 *  Step 4: Prepare sequence numbers in REQUEST
284 	 *     If S.state == REQUEST,
285 	 *	  If (P.type == Response or P.type == Reset)
286 	 *		and S.AWL <= P.ackno <= S.AWH,
287 	 *	     / * Set sequence number variables corresponding to the
288 	 *		other endpoint, so P will pass the tests in Step 6 * /
289 	 *	     Set S.GSR, S.ISR, S.SWL, S.SWH
290 	 *	     / * Response processing continues in Step 10; Reset
291 	 *		processing continues in Step 9 * /
292 	*/
293 	if (dh->dccph_type == DCCP_PKT_RESPONSE) {
294 		const struct inet_connection_sock *icsk = inet_csk(sk);
295 		struct dccp_sock *dp = dccp_sk(sk);
296 
297 		/* Stop the REQUEST timer */
298 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
299 		BUG_TRAP(sk->sk_send_head != NULL);
300 		__kfree_skb(sk->sk_send_head);
301 		sk->sk_send_head = NULL;
302 
303 		if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
304 			       dp->dccps_awl, dp->dccps_awh)) {
305 			dccp_pr_debug("invalid ackno: S.AWL=%llu, "
306 				      "P.ackno=%llu, S.AWH=%llu \n",
307 				      (unsigned long long)dp->dccps_awl,
308 			   (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
309 				      (unsigned long long)dp->dccps_awh);
310 			goto out_invalid_packet;
311 		}
312 
313 		dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
314 		dccp_update_gsr(sk, dp->dccps_isr);
315 		/*
316 		 * SWL and AWL are initially adjusted so that they are not less than
317 		 * the initial Sequence Numbers received and sent, respectively:
318 		 *	SWL := max(GSR + 1 - floor(W/4), ISR),
319 		 *	AWL := max(GSS - W' + 1, ISS).
320 		 * These adjustments MUST be applied only at the beginning of the
321 		 * connection.
322 		 *
323 		 * AWL was adjusted in dccp_v4_connect -acme
324 		 */
325 		dccp_set_seqno(&dp->dccps_swl,
326 			       max48(dp->dccps_swl, dp->dccps_isr));
327 
328 		if (ccid_hc_rx_init(dp->dccps_hc_rx_ccid, sk) != 0 ||
329 		    ccid_hc_tx_init(dp->dccps_hc_tx_ccid, sk) != 0) {
330 			ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
331 			ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
332 			/* FIXME: send appropriate RESET code */
333 			goto out_invalid_packet;
334 		}
335 
336 		dccp_sync_mss(sk, dp->dccps_pmtu_cookie);
337 
338 		/*
339 		 *    Step 10: Process REQUEST state (second part)
340 		 *       If S.state == REQUEST,
341 		 *	  / * If we get here, P is a valid Response from the
342 		 *	      server (see Step 4), and we should move to
343 		 *	      PARTOPEN state. PARTOPEN means send an Ack,
344 		 *	      don't send Data packets, retransmit Acks
345 		 *	      periodically, and always include any Init Cookie
346 		 *	      from the Response * /
347 		 *	  S.state := PARTOPEN
348 		 *	  Set PARTOPEN timer
349 		 * 	  Continue with S.state == PARTOPEN
350 		 *	  / * Step 12 will send the Ack completing the
351 		 *	      three-way handshake * /
352 		 */
353 		dccp_set_state(sk, DCCP_PARTOPEN);
354 
355 		/* Make sure socket is routed, for correct metrics. */
356 		inet_sk_rebuild_header(sk);
357 
358 		if (!sock_flag(sk, SOCK_DEAD)) {
359 			sk->sk_state_change(sk);
360 			sk_wake_async(sk, 0, POLL_OUT);
361 		}
362 
363 		if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
364 		    icsk->icsk_accept_queue.rskq_defer_accept) {
365 			/* Save one ACK. Data will be ready after
366 			 * several ticks, if write_pending is set.
367 			 *
368 			 * It may be deleted, but with this feature tcpdumps
369 			 * look so _wonderfully_ clever, that I was not able
370 			 * to stand against the temptation 8)     --ANK
371 			 */
372 			/*
373 			 * OK, in DCCP we can as well do a similar trick, its
374 			 * even in the draft, but there is no need for us to
375 			 * schedule an ack here, as dccp_sendmsg does this for
376 			 * us, also stated in the draft. -acme
377 			 */
378 			__kfree_skb(skb);
379 			return 0;
380 		}
381 		dccp_send_ack(sk);
382 		return -1;
383 	}
384 
385 out_invalid_packet:
386 	return 1; /* dccp_v4_do_rcv will send a reset, but...
387 		     FIXME: the reset code should be
388 			    DCCP_RESET_CODE_PACKET_ERROR */
389 }
390 
391 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
392 						   struct sk_buff *skb,
393 						   const struct dccp_hdr *dh,
394 						   const unsigned len)
395 {
396 	int queued = 0;
397 
398 	switch (dh->dccph_type) {
399 	case DCCP_PKT_RESET:
400 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
401 		break;
402 	case DCCP_PKT_DATAACK:
403 	case DCCP_PKT_ACK:
404 		/*
405 		 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
406 		 * here but only if we haven't used the DELACK timer for
407 		 * something else, like sending a delayed ack for a TIMESTAMP
408 		 * echo, etc, for now were not clearing it, sending an extra
409 		 * ACK when there is nothing else to do in DELACK is not a big
410 		 * deal after all.
411 		 */
412 
413 		/* Stop the PARTOPEN timer */
414 		if (sk->sk_state == DCCP_PARTOPEN)
415 			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
416 
417 		dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
418 		dccp_set_state(sk, DCCP_OPEN);
419 
420 		if (dh->dccph_type == DCCP_PKT_DATAACK) {
421 			dccp_rcv_established(sk, skb, dh, len);
422 			queued = 1; /* packet was queued
423 				       (by dccp_rcv_established) */
424 		}
425 		break;
426 	}
427 
428 	return queued;
429 }
430 
431 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
432 			   struct dccp_hdr *dh, unsigned len)
433 {
434 	struct dccp_sock *dp = dccp_sk(sk);
435 	const int old_state = sk->sk_state;
436 	int queued = 0;
437 
438 	/*
439 	 *  Step 3: Process LISTEN state
440 	 *  	(Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv)
441 	 *
442 	 *     If S.state == LISTEN,
443 	 *	  If P.type == Request or P contains a valid Init Cookie
444 	 *	  	option,
445 	 *	     * Must scan the packet's options to check for an Init
446 	 *		Cookie.  Only the Init Cookie is processed here,
447 	 *		however; other options are processed in Step 8.  This
448 	 *		scan need only be performed if the endpoint uses Init
449 	 *		Cookies *
450 	 *	     * Generate a new socket and switch to that socket *
451 	 *	     Set S := new socket for this port pair
452 	 *	     S.state = RESPOND
453 	 *	     Choose S.ISS (initial seqno) or set from Init Cookie
454 	 *	     Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
455 	 *	     Continue with S.state == RESPOND
456 	 *	     * A Response packet will be generated in Step 11 *
457 	 *	  Otherwise,
458 	 *	     Generate Reset(No Connection) unless P.type == Reset
459 	 *	     Drop packet and return
460 	 *
461 	 * NOTE: the check for the packet types is done in
462 	 *	 dccp_rcv_state_process
463 	 */
464 	if (sk->sk_state == DCCP_LISTEN) {
465 		if (dh->dccph_type == DCCP_PKT_REQUEST) {
466 			if (dccp_v4_conn_request(sk, skb) < 0)
467 				return 1;
468 
469 			/* FIXME: do congestion control initialization */
470 			goto discard;
471 		}
472 		if (dh->dccph_type == DCCP_PKT_RESET)
473 			goto discard;
474 
475 		/* Caller (dccp_v4_do_rcv) will send Reset(No Connection)*/
476 		return 1;
477 	}
478 
479 	if (sk->sk_state != DCCP_REQUESTING) {
480 		if (dccp_check_seqno(sk, skb))
481 			goto discard;
482 
483 		/*
484 		 * Step 8: Process options and mark acknowledgeable
485 		 */
486 		if (dccp_parse_options(sk, skb))
487 			goto discard;
488 
489 		if (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
490 		    DCCP_PKT_WITHOUT_ACK_SEQ)
491 			dccp_event_ack_recv(sk, skb);
492 
493 		ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
494 		ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
495 
496 		/*
497 		 * FIXME: check ECN to see if we should use
498 		 * DCCP_ACKPKTS_STATE_ECN_MARKED
499 		 */
500 		if (dp->dccps_options.dccpo_send_ack_vector) {
501 			if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
502 					     DCCP_SKB_CB(skb)->dccpd_seq,
503 					     DCCP_ACKPKTS_STATE_RECEIVED))
504 				goto discard;
505 			/*
506 			 * FIXME: this activation is probably wrong, have to
507 			 * study more TCP delack machinery and how it fits into
508 			 * DCCP draft, but for now it kinda "works" 8)
509 			 */
510 			if ((dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno ==
511 			     DCCP_MAX_SEQNO + 1) &&
512 			    !inet_csk_ack_scheduled(sk)) {
513 				inet_csk_schedule_ack(sk);
514 				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
515 							  TCP_DELACK_MIN,
516 							  DCCP_RTO_MAX);
517 			}
518 		}
519 	}
520 
521 	/*
522 	 *  Step 9: Process Reset
523 	 *	If P.type == Reset,
524 	 *		Tear down connection
525 	 *		S.state := TIMEWAIT
526 	 *		Set TIMEWAIT timer
527 	 *		Drop packet and return
528 	*/
529 	if (dh->dccph_type == DCCP_PKT_RESET) {
530 		/*
531 		 * Queue the equivalent of TCP fin so that dccp_recvmsg
532 		 * exits the loop
533 		 */
534 		dccp_fin(sk, skb);
535 		dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
536 		return 0;
537 		/*
538 		 *   Step 7: Check for unexpected packet types
539 		 *      If (S.is_server and P.type == CloseReq)
540 		 *	    or (S.is_server and P.type == Response)
541 		 *	    or (S.is_client and P.type == Request)
542 		 *	    or (S.state == RESPOND and P.type == Data),
543 		 *	  Send Sync packet acknowledging P.seqno
544 		 *	  Drop packet and return
545 		 */
546 	} else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
547 		    (dh->dccph_type == DCCP_PKT_RESPONSE ||
548 		     dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
549 		    (dp->dccps_role == DCCP_ROLE_CLIENT &&
550 		     dh->dccph_type == DCCP_PKT_REQUEST) ||
551 		    (sk->sk_state == DCCP_RESPOND &&
552 		     dh->dccph_type == DCCP_PKT_DATA)) {
553 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
554 			       DCCP_PKT_SYNC);
555 		goto discard;
556 	} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
557 		dccp_rcv_closereq(sk, skb);
558 		goto discard;
559 	} else if (dh->dccph_type == DCCP_PKT_CLOSE) {
560 		dccp_rcv_close(sk, skb);
561 		return 0;
562 	}
563 
564 	switch (sk->sk_state) {
565 	case DCCP_CLOSED:
566 		return 1;
567 
568 	case DCCP_REQUESTING:
569 		/* FIXME: do congestion control initialization */
570 
571 		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
572 		if (queued >= 0)
573 			return queued;
574 
575 		__kfree_skb(skb);
576 		return 0;
577 
578 	case DCCP_RESPOND:
579 	case DCCP_PARTOPEN:
580 		queued = dccp_rcv_respond_partopen_state_process(sk, skb,
581 								 dh, len);
582 		break;
583 	}
584 
585 	if (dh->dccph_type == DCCP_PKT_ACK ||
586 	    dh->dccph_type == DCCP_PKT_DATAACK) {
587 		switch (old_state) {
588 		case DCCP_PARTOPEN:
589 			sk->sk_state_change(sk);
590 			sk_wake_async(sk, 0, POLL_OUT);
591 			break;
592 		}
593 	}
594 
595 	if (!queued) {
596 discard:
597 		__kfree_skb(skb);
598 	}
599 	return 0;
600 }
601