xref: /openbmc/linux/net/dccp/input.c (revision 03ace394ac9bcad38043a381ae5f4860b9c9fa1c)
1 /*
2  *  net/dccp/input.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *	This program is free software; you can redistribute it and/or
8  *	modify it under the terms of the GNU General Public License
9  *	as published by the Free Software Foundation; either version
10  *	2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
16 
17 #include <net/sock.h>
18 
19 #include "ccid.h"
20 #include "dccp.h"
21 
22 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
23 {
24 	sk->sk_shutdown |= RCV_SHUTDOWN;
25 	sock_set_flag(sk, SOCK_DONE);
26 	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
27 	__skb_queue_tail(&sk->sk_receive_queue, skb);
28 	skb_set_owner_r(skb, sk);
29 	sk->sk_data_ready(sk, 0);
30 }
31 
32 static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
33 {
34 	switch (sk->sk_state) {
35 	case DCCP_PARTOPEN:
36 	case DCCP_OPEN:
37 		dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
38 		dccp_fin(sk, skb);
39 		dccp_set_state(sk, DCCP_CLOSED);
40 		break;
41 	}
42 }
43 
44 static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
45 {
46 	/*
47 	 *   Step 7: Check for unexpected packet types
48 	 *      If (S.is_server and P.type == CloseReq)
49 	 *	  Send Sync packet acknowledging P.seqno
50 	 *	  Drop packet and return
51 	 */
52 	if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
53 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
54 		return;
55 	}
56 
57 	switch (sk->sk_state) {
58 	case DCCP_PARTOPEN:
59 	case DCCP_OPEN:
60 		dccp_set_state(sk, DCCP_CLOSING);
61 		dccp_send_close(sk);
62 		break;
63 	}
64 }
65 
66 static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
67 {
68 	struct dccp_sock *dp = dccp_sk(sk);
69 
70 	if (dp->dccps_options.dccpo_send_ack_vector)
71 		dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk,
72 					     DCCP_SKB_CB(skb)->dccpd_ack_seq);
73 }
74 
75 static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
76 {
77 	const struct dccp_hdr *dh = dccp_hdr(skb);
78 	struct dccp_sock *dp = dccp_sk(sk);
79 	u64 lswl, lawl;
80 
81 	/*
82 	 *   Step 5: Prepare sequence numbers for Sync
83 	 *     If P.type == Sync or P.type == SyncAck,
84 	 *	  If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
85 	 *	     / * P is valid, so update sequence number variables
86 	 *		 accordingly.  After this update, P will pass the tests
87 	 *		 in Step 6.  A SyncAck is generated if necessary in
88 	 *		 Step 15 * /
89 	 *	     Update S.GSR, S.SWL, S.SWH
90 	 *	  Otherwise,
91 	 *	     Drop packet and return
92 	 */
93 	if (dh->dccph_type == DCCP_PKT_SYNC ||
94 	    dh->dccph_type == DCCP_PKT_SYNCACK) {
95 		if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
96 			      dp->dccps_awl, dp->dccps_awh) &&
97 		    !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
98 			dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
99 		else
100 			return -1;
101 	}
102 
103 	/*
104 	 *   Step 6: Check sequence numbers
105 	 *      Let LSWL = S.SWL and LAWL = S.AWL
106 	 *      If P.type == CloseReq or P.type == Close or P.type == Reset,
107 	 *	  LSWL := S.GSR + 1, LAWL := S.GAR
108 	 *      If LSWL <= P.seqno <= S.SWH
109 	 *	     and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
110 	 *	  Update S.GSR, S.SWL, S.SWH
111 	 *	  If P.type != Sync,
112 	 *	     Update S.GAR
113 	 *      Otherwise,
114 	 *	  Send Sync packet acknowledging P.seqno
115 	 *	  Drop packet and return
116 	 */
117 	lswl = dp->dccps_swl;
118 	lawl = dp->dccps_awl;
119 
120 	if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
121 	    dh->dccph_type == DCCP_PKT_CLOSE ||
122 	    dh->dccph_type == DCCP_PKT_RESET) {
123 		lswl = dp->dccps_gsr;
124 		dccp_inc_seqno(&lswl);
125 		lawl = dp->dccps_gar;
126 	}
127 
128 	if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) &&
129 	    (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ ||
130 	     between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
131 		       lawl, dp->dccps_awh))) {
132 		dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
133 
134 		if (dh->dccph_type != DCCP_PKT_SYNC &&
135 		    (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
136 		     DCCP_PKT_WITHOUT_ACK_SEQ))
137 			dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
138 	} else {
139 		LIMIT_NETDEBUG(KERN_WARNING "DCCP: Step 6 failed for %s packet, "
140 					    "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
141 					    "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
142 					    "sending SYNC...\n",
143 			       dccp_packet_name(dh->dccph_type),
144 			       lswl, DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swh,
145 			       (DCCP_SKB_CB(skb)->dccpd_ack_seq ==
146 			        DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
147 			       lawl, DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awh);
148 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
149 		return -1;
150 	}
151 
152 	return 0;
153 }
154 
155 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
156 			 const struct dccp_hdr *dh, const unsigned len)
157 {
158 	struct dccp_sock *dp = dccp_sk(sk);
159 
160 	if (dccp_check_seqno(sk, skb))
161 		goto discard;
162 
163 	if (dccp_parse_options(sk, skb))
164 		goto discard;
165 
166 	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
167 		dccp_event_ack_recv(sk, skb);
168 
169 	/*
170 	 * FIXME: check ECN to see if we should use
171 	 * DCCP_ACKPKTS_STATE_ECN_MARKED
172 	 */
173 	if (dp->dccps_options.dccpo_send_ack_vector) {
174 		struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
175 
176 		if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
177 				     DCCP_SKB_CB(skb)->dccpd_seq,
178 				     DCCP_ACKPKTS_STATE_RECEIVED)) {
179 			LIMIT_NETDEBUG(KERN_WARNING "DCCP: acknowledgeable "
180 						    "packets buffer full!\n");
181 			ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
182 			inet_csk_schedule_ack(sk);
183 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
184 						  TCP_DELACK_MIN,
185 						  DCCP_RTO_MAX);
186 			goto discard;
187 		}
188 
189 		/*
190 		 * FIXME: this activation is probably wrong, have to study more
191 		 * TCP delack machinery and how it fits into DCCP draft, but
192 		 * for now it kinda "works" 8)
193 		 */
194 		if (!inet_csk_ack_scheduled(sk)) {
195 			inet_csk_schedule_ack(sk);
196 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ,
197 						  DCCP_RTO_MAX);
198 		}
199 	}
200 
201 	ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
202 	ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
203 
204 	switch (dccp_hdr(skb)->dccph_type) {
205 	case DCCP_PKT_DATAACK:
206 	case DCCP_PKT_DATA:
207 		/*
208 		 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
209 		 * option if it is.
210 		 */
211 		__skb_pull(skb, dh->dccph_doff * 4);
212 		__skb_queue_tail(&sk->sk_receive_queue, skb);
213 		skb_set_owner_r(skb, sk);
214 		sk->sk_data_ready(sk, 0);
215 		return 0;
216 	case DCCP_PKT_ACK:
217 		goto discard;
218 	case DCCP_PKT_RESET:
219 		/*
220 		 *  Step 9: Process Reset
221 		 *	If P.type == Reset,
222 		 *		Tear down connection
223 		 *		S.state := TIMEWAIT
224 		 *		Set TIMEWAIT timer
225 		 *		Drop packet and return
226 		*/
227 		dccp_fin(sk, skb);
228 		dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
229 		return 0;
230 	case DCCP_PKT_CLOSEREQ:
231 		dccp_rcv_closereq(sk, skb);
232 		goto discard;
233 	case DCCP_PKT_CLOSE:
234 		dccp_rcv_close(sk, skb);
235 		return 0;
236 	case DCCP_PKT_REQUEST:
237 		/* Step 7
238             	 *   or (S.is_server and P.type == Response)
239 		 *   or (S.is_client and P.type == Request)
240 		 *   or (S.state >= OPEN and P.type == Request
241 		 *	and P.seqno >= S.OSR)
242 		 *    or (S.state >= OPEN and P.type == Response
243 		 *	and P.seqno >= S.OSR)
244 		 *    or (S.state == RESPOND and P.type == Data),
245 		 *  Send Sync packet acknowledging P.seqno
246 		 *  Drop packet and return
247 		 */
248 		if (dp->dccps_role != DCCP_ROLE_LISTEN)
249 			goto send_sync;
250 		goto check_seq;
251 	case DCCP_PKT_RESPONSE:
252 		if (dp->dccps_role != DCCP_ROLE_CLIENT)
253 			goto send_sync;
254 check_seq:
255 		if (!before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_osr)) {
256 send_sync:
257 			dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
258 				       DCCP_PKT_SYNC);
259 		}
260 		break;
261 	case DCCP_PKT_SYNC:
262 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
263 			       DCCP_PKT_SYNCACK);
264 		/*
265 		 * From the draft:
266 		 *
267 		 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
268 		 * MAY have non-zero-length application data areas, whose
269 		 * contents * receivers MUST ignore.
270 		 */
271 		goto discard;
272 	}
273 
274 	DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
275 discard:
276 	__kfree_skb(skb);
277 	return 0;
278 }
279 
280 static int dccp_rcv_request_sent_state_process(struct sock *sk,
281 					       struct sk_buff *skb,
282 					       const struct dccp_hdr *dh,
283 					       const unsigned len)
284 {
285 	/*
286 	 *  Step 4: Prepare sequence numbers in REQUEST
287 	 *     If S.state == REQUEST,
288 	 *	  If (P.type == Response or P.type == Reset)
289 	 *		and S.AWL <= P.ackno <= S.AWH,
290 	 *	     / * Set sequence number variables corresponding to the
291 	 *		other endpoint, so P will pass the tests in Step 6 * /
292 	 *	     Set S.GSR, S.ISR, S.SWL, S.SWH
293 	 *	     / * Response processing continues in Step 10; Reset
294 	 *		processing continues in Step 9 * /
295 	*/
296 	if (dh->dccph_type == DCCP_PKT_RESPONSE) {
297 		const struct inet_connection_sock *icsk = inet_csk(sk);
298 		struct dccp_sock *dp = dccp_sk(sk);
299 
300 		/* Stop the REQUEST timer */
301 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
302 		BUG_TRAP(sk->sk_send_head != NULL);
303 		__kfree_skb(sk->sk_send_head);
304 		sk->sk_send_head = NULL;
305 
306 		if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
307 			       dp->dccps_awl, dp->dccps_awh)) {
308 			dccp_pr_debug("invalid ackno: S.AWL=%llu, "
309 				      "P.ackno=%llu, S.AWH=%llu \n",
310 				      (unsigned long long)dp->dccps_awl,
311 			   (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
312 				      (unsigned long long)dp->dccps_awh);
313 			goto out_invalid_packet;
314 		}
315 
316 		dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
317 		dccp_update_gsr(sk, dp->dccps_isr);
318 		/*
319 		 * SWL and AWL are initially adjusted so that they are not less than
320 		 * the initial Sequence Numbers received and sent, respectively:
321 		 *	SWL := max(GSR + 1 - floor(W/4), ISR),
322 		 *	AWL := max(GSS - W' + 1, ISS).
323 		 * These adjustments MUST be applied only at the beginning of the
324 		 * connection.
325 		 *
326 		 * AWL was adjusted in dccp_v4_connect -acme
327 		 */
328 		dccp_set_seqno(&dp->dccps_swl,
329 			       max48(dp->dccps_swl, dp->dccps_isr));
330 
331 		if (ccid_hc_rx_init(dp->dccps_hc_rx_ccid, sk) != 0 ||
332 		    ccid_hc_tx_init(dp->dccps_hc_tx_ccid, sk) != 0) {
333 			ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
334 			ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
335 			/* FIXME: send appropriate RESET code */
336 			goto out_invalid_packet;
337 		}
338 
339 		dccp_sync_mss(sk, dp->dccps_pmtu_cookie);
340 
341 		/*
342 		 *    Step 10: Process REQUEST state (second part)
343 		 *       If S.state == REQUEST,
344 		 *	  / * If we get here, P is a valid Response from the
345 		 *	      server (see Step 4), and we should move to
346 		 *	      PARTOPEN state. PARTOPEN means send an Ack,
347 		 *	      don't send Data packets, retransmit Acks
348 		 *	      periodically, and always include any Init Cookie
349 		 *	      from the Response * /
350 		 *	  S.state := PARTOPEN
351 		 *	  Set PARTOPEN timer
352 		 * 	  Continue with S.state == PARTOPEN
353 		 *	  / * Step 12 will send the Ack completing the
354 		 *	      three-way handshake * /
355 		 */
356 		dccp_set_state(sk, DCCP_PARTOPEN);
357 
358 		/* Make sure socket is routed, for correct metrics. */
359 		inet_sk_rebuild_header(sk);
360 
361 		if (!sock_flag(sk, SOCK_DEAD)) {
362 			sk->sk_state_change(sk);
363 			sk_wake_async(sk, 0, POLL_OUT);
364 		}
365 
366 		if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
367 		    icsk->icsk_accept_queue.rskq_defer_accept) {
368 			/* Save one ACK. Data will be ready after
369 			 * several ticks, if write_pending is set.
370 			 *
371 			 * It may be deleted, but with this feature tcpdumps
372 			 * look so _wonderfully_ clever, that I was not able
373 			 * to stand against the temptation 8)     --ANK
374 			 */
375 			/*
376 			 * OK, in DCCP we can as well do a similar trick, its
377 			 * even in the draft, but there is no need for us to
378 			 * schedule an ack here, as dccp_sendmsg does this for
379 			 * us, also stated in the draft. -acme
380 			 */
381 			__kfree_skb(skb);
382 			return 0;
383 		}
384 		dccp_send_ack(sk);
385 		return -1;
386 	}
387 
388 out_invalid_packet:
389 	return 1; /* dccp_v4_do_rcv will send a reset, but...
390 		     FIXME: the reset code should be
391 			    DCCP_RESET_CODE_PACKET_ERROR */
392 }
393 
394 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
395 						   struct sk_buff *skb,
396 						   const struct dccp_hdr *dh,
397 						   const unsigned len)
398 {
399 	int queued = 0;
400 
401 	switch (dh->dccph_type) {
402 	case DCCP_PKT_RESET:
403 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
404 		break;
405 	case DCCP_PKT_DATAACK:
406 	case DCCP_PKT_ACK:
407 		/*
408 		 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
409 		 * here but only if we haven't used the DELACK timer for
410 		 * something else, like sending a delayed ack for a TIMESTAMP
411 		 * echo, etc, for now were not clearing it, sending an extra
412 		 * ACK when there is nothing else to do in DELACK is not a big
413 		 * deal after all.
414 		 */
415 
416 		/* Stop the PARTOPEN timer */
417 		if (sk->sk_state == DCCP_PARTOPEN)
418 			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
419 
420 		dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
421 		dccp_set_state(sk, DCCP_OPEN);
422 
423 		if (dh->dccph_type == DCCP_PKT_DATAACK) {
424 			dccp_rcv_established(sk, skb, dh, len);
425 			queued = 1; /* packet was queued
426 				       (by dccp_rcv_established) */
427 		}
428 		break;
429 	}
430 
431 	return queued;
432 }
433 
434 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
435 			   struct dccp_hdr *dh, unsigned len)
436 {
437 	struct dccp_sock *dp = dccp_sk(sk);
438 	const int old_state = sk->sk_state;
439 	int queued = 0;
440 
441 	/*
442 	 *  Step 3: Process LISTEN state
443 	 *  	(Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv)
444 	 *
445 	 *     If S.state == LISTEN,
446 	 *	  If P.type == Request or P contains a valid Init Cookie
447 	 *	  	option,
448 	 *	     * Must scan the packet's options to check for an Init
449 	 *		Cookie.  Only the Init Cookie is processed here,
450 	 *		however; other options are processed in Step 8.  This
451 	 *		scan need only be performed if the endpoint uses Init
452 	 *		Cookies *
453 	 *	     * Generate a new socket and switch to that socket *
454 	 *	     Set S := new socket for this port pair
455 	 *	     S.state = RESPOND
456 	 *	     Choose S.ISS (initial seqno) or set from Init Cookie
457 	 *	     Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
458 	 *	     Continue with S.state == RESPOND
459 	 *	     * A Response packet will be generated in Step 11 *
460 	 *	  Otherwise,
461 	 *	     Generate Reset(No Connection) unless P.type == Reset
462 	 *	     Drop packet and return
463 	 *
464 	 * NOTE: the check for the packet types is done in
465 	 *	 dccp_rcv_state_process
466 	 */
467 	if (sk->sk_state == DCCP_LISTEN) {
468 		if (dh->dccph_type == DCCP_PKT_REQUEST) {
469 			if (dccp_v4_conn_request(sk, skb) < 0)
470 				return 1;
471 
472 			/* FIXME: do congestion control initialization */
473 			goto discard;
474 		}
475 		if (dh->dccph_type == DCCP_PKT_RESET)
476 			goto discard;
477 
478 		/* Caller (dccp_v4_do_rcv) will send Reset(No Connection)*/
479 		return 1;
480 	}
481 
482 	if (sk->sk_state != DCCP_REQUESTING) {
483 		if (dccp_check_seqno(sk, skb))
484 			goto discard;
485 
486 		/*
487 		 * Step 8: Process options and mark acknowledgeable
488 		 */
489 		if (dccp_parse_options(sk, skb))
490 			goto discard;
491 
492 		if (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
493 		    DCCP_PKT_WITHOUT_ACK_SEQ)
494 			dccp_event_ack_recv(sk, skb);
495 
496 		ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
497 		ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
498 
499 		/*
500 		 * FIXME: check ECN to see if we should use
501 		 * DCCP_ACKPKTS_STATE_ECN_MARKED
502 		 */
503 		if (dp->dccps_options.dccpo_send_ack_vector) {
504 			if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
505 					     DCCP_SKB_CB(skb)->dccpd_seq,
506 					     DCCP_ACKPKTS_STATE_RECEIVED))
507 				goto discard;
508 			/*
509 			 * FIXME: this activation is probably wrong, have to
510 			 * study more TCP delack machinery and how it fits into
511 			 * DCCP draft, but for now it kinda "works" 8)
512 			 */
513 			if ((dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno ==
514 			     DCCP_MAX_SEQNO + 1) &&
515 			    !inet_csk_ack_scheduled(sk)) {
516 				inet_csk_schedule_ack(sk);
517 				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
518 							  TCP_DELACK_MIN,
519 							  DCCP_RTO_MAX);
520 			}
521 		}
522 	}
523 
524 	/*
525 	 *  Step 9: Process Reset
526 	 *	If P.type == Reset,
527 	 *		Tear down connection
528 	 *		S.state := TIMEWAIT
529 	 *		Set TIMEWAIT timer
530 	 *		Drop packet and return
531 	*/
532 	if (dh->dccph_type == DCCP_PKT_RESET) {
533 		/*
534 		 * Queue the equivalent of TCP fin so that dccp_recvmsg
535 		 * exits the loop
536 		 */
537 		dccp_fin(sk, skb);
538 		dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
539 		return 0;
540 		/*
541 		 *   Step 7: Check for unexpected packet types
542 		 *      If (S.is_server and P.type == CloseReq)
543 		 *	    or (S.is_server and P.type == Response)
544 		 *	    or (S.is_client and P.type == Request)
545 		 *	    or (S.state == RESPOND and P.type == Data),
546 		 *	  Send Sync packet acknowledging P.seqno
547 		 *	  Drop packet and return
548 		 */
549 	} else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
550 		    (dh->dccph_type == DCCP_PKT_RESPONSE ||
551 		     dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
552 		    (dp->dccps_role == DCCP_ROLE_CLIENT &&
553 		     dh->dccph_type == DCCP_PKT_REQUEST) ||
554 		    (sk->sk_state == DCCP_RESPOND &&
555 		     dh->dccph_type == DCCP_PKT_DATA)) {
556 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
557 			       DCCP_PKT_SYNC);
558 		goto discard;
559 	}
560 
561 	switch (sk->sk_state) {
562 	case DCCP_CLOSED:
563 		return 1;
564 
565 	case DCCP_REQUESTING:
566 		/* FIXME: do congestion control initialization */
567 
568 		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
569 		if (queued >= 0)
570 			return queued;
571 
572 		__kfree_skb(skb);
573 		return 0;
574 
575 	case DCCP_RESPOND:
576 	case DCCP_PARTOPEN:
577 		queued = dccp_rcv_respond_partopen_state_process(sk, skb,
578 								 dh, len);
579 		break;
580 	}
581 
582 	if (dh->dccph_type == DCCP_PKT_ACK ||
583 	    dh->dccph_type == DCCP_PKT_DATAACK) {
584 		switch (old_state) {
585 		case DCCP_PARTOPEN:
586 			sk->sk_state_change(sk);
587 			sk_wake_async(sk, 0, POLL_OUT);
588 			break;
589 		}
590 	}
591 
592 	if (!queued) {
593 discard:
594 		__kfree_skb(skb);
595 	}
596 	return 0;
597 }
598