xref: /openbmc/linux/net/dccp/input.c (revision 64ce207306debd7157f47282be94770407bec01c)
1 /*
2  *  net/dccp/input.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *	This program is free software; you can redistribute it and/or
8  *	modify it under the terms of the GNU General Public License
9  *	as published by the Free Software Foundation; either version
10  *	2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
16 
17 #include <net/sock.h>
18 
19 #include "ccid.h"
20 #include "dccp.h"
21 
22 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
23 {
24 	sk->sk_shutdown |= RCV_SHUTDOWN;
25 	sock_set_flag(sk, SOCK_DONE);
26 	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
27 	__skb_queue_tail(&sk->sk_receive_queue, skb);
28 	skb_set_owner_r(skb, sk);
29 	sk->sk_data_ready(sk, 0);
30 }
31 
32 static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
33 {
34 	switch (sk->sk_state) {
35 	case DCCP_PARTOPEN:
36 	case DCCP_OPEN:
37 		dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
38 		dccp_fin(sk, skb);
39 		dccp_set_state(sk, DCCP_CLOSED);
40 		break;
41 	}
42 }
43 
44 static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
45 {
46 	/*
47 	 *   Step 7: Check for unexpected packet types
48 	 *      If (S.is_server and P.type == CloseReq)
49 	 *	  Send Sync packet acknowledging P.seqno
50 	 *	  Drop packet and return
51 	 */
52 	if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
53 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
54 		return;
55 	}
56 
57 	switch (sk->sk_state) {
58 	case DCCP_PARTOPEN:
59 	case DCCP_OPEN:
60 		dccp_set_state(sk, DCCP_CLOSING);
61 		dccp_send_close(sk);
62 		break;
63 	}
64 }
65 
66 static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
67 {
68 	struct dccp_sock *dp = dccp_sk(sk);
69 
70 	if (dp->dccps_options.dccpo_send_ack_vector)
71 		dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk,
72 					     DCCP_SKB_CB(skb)->dccpd_ack_seq);
73 }
74 
75 static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
76 {
77 	const struct dccp_hdr *dh = dccp_hdr(skb);
78 	struct dccp_sock *dp = dccp_sk(sk);
79 	u64 lswl = dp->dccps_swl;
80 	u64 lawl = dp->dccps_awl;
81 
82 	/*
83 	 *   Step 5: Prepare sequence numbers for Sync
84 	 *     If P.type == Sync or P.type == SyncAck,
85 	 *	  If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
86 	 *	     / * P is valid, so update sequence number variables
87 	 *		 accordingly.  After this update, P will pass the tests
88 	 *		 in Step 6.  A SyncAck is generated if necessary in
89 	 *		 Step 15 * /
90 	 *	     Update S.GSR, S.SWL, S.SWH
91 	 *	  Otherwise,
92 	 *	     Drop packet and return
93 	 */
94 	if (dh->dccph_type == DCCP_PKT_SYNC ||
95 	    dh->dccph_type == DCCP_PKT_SYNCACK) {
96 		if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awl, dp->dccps_awh) &&
97 		    !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
98 			dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
99 		else
100 			return -1;
101 	/*
102 	 *   Step 6: Check sequence numbers
103 	 *      Let LSWL = S.SWL and LAWL = S.AWL
104 	 *      If P.type == CloseReq or P.type == Close or P.type == Reset,
105 	 *	  LSWL := S.GSR + 1, LAWL := S.GAR
106 	 *      If LSWL <= P.seqno <= S.SWH
107 	 *	     and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
108 	 *	  Update S.GSR, S.SWL, S.SWH
109 	 *	  If P.type != Sync,
110 	 *	     Update S.GAR
111 	 *      Otherwise,
112 	 *	  Send Sync packet acknowledging P.seqno
113 	 *	  Drop packet and return
114 	 */
115 	} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
116 		   dh->dccph_type == DCCP_PKT_CLOSE ||
117 		   dh->dccph_type == DCCP_PKT_RESET) {
118 		lswl = dp->dccps_gsr;
119 		dccp_inc_seqno(&lswl);
120 		lawl = dp->dccps_gar;
121 	}
122 
123 	if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) &&
124 	    (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ ||
125 	     between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, lawl, dp->dccps_awh))) {
126 		dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
127 
128 		if (dh->dccph_type != DCCP_PKT_SYNC &&
129 		    DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
130 			dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
131 	} else {
132 		dccp_pr_debug("Step 6 failed, sending SYNC...\n");
133 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
134 		return -1;
135 	}
136 
137 	return 0;
138 }
139 
140 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
141 			 const struct dccp_hdr *dh, const unsigned len)
142 {
143 	struct dccp_sock *dp = dccp_sk(sk);
144 
145 	if (dccp_check_seqno(sk, skb))
146 		goto discard;
147 
148 	if (dccp_parse_options(sk, skb))
149 		goto discard;
150 
151 	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
152 		dccp_event_ack_recv(sk, skb);
153 
154 	/*
155 	 * FIXME: check ECN to see if we should use
156 	 * DCCP_ACKPKTS_STATE_ECN_MARKED
157 	 */
158 	if (dp->dccps_options.dccpo_send_ack_vector) {
159 		struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
160 
161 		if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
162 				     DCCP_SKB_CB(skb)->dccpd_seq,
163 				     DCCP_ACKPKTS_STATE_RECEIVED)) {
164 			LIMIT_NETDEBUG(KERN_INFO "DCCP: acknowledgeable packets buffer full!\n");
165 			ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
166 			inet_csk_schedule_ack(sk);
167 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MIN, TCP_RTO_MAX);
168 			goto discard;
169 		}
170 
171 		/*
172 		 * FIXME: this activation is probably wrong, have to study more
173 		 * TCP delack machinery and how it fits into DCCP draft, but
174 		 * for now it kinda "works" 8)
175 		 */
176 		if (!inet_csk_ack_scheduled(sk)) {
177 			inet_csk_schedule_ack(sk);
178 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ, TCP_RTO_MAX);
179 		}
180 	}
181 
182 	ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
183 	ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
184 
185 	switch (dccp_hdr(skb)->dccph_type) {
186 	case DCCP_PKT_DATAACK:
187 	case DCCP_PKT_DATA:
188 		/*
189 		 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED option
190 		 * if it is.
191 		 */
192 		__skb_pull(skb, dh->dccph_doff * 4);
193 		__skb_queue_tail(&sk->sk_receive_queue, skb);
194 		skb_set_owner_r(skb, sk);
195 		sk->sk_data_ready(sk, 0);
196 		return 0;
197 	case DCCP_PKT_ACK:
198 		goto discard;
199 	case DCCP_PKT_RESET:
200 		/*
201 		 *  Step 9: Process Reset
202 		 *	If P.type == Reset,
203 		 *		Tear down connection
204 		 *		S.state := TIMEWAIT
205 		 *		Set TIMEWAIT timer
206 		 *		Drop packet and return
207 		*/
208 		dccp_fin(sk, skb);
209 		dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
210 		return 0;
211 	case DCCP_PKT_CLOSEREQ:
212 		dccp_rcv_closereq(sk, skb);
213 		goto discard;
214 	case DCCP_PKT_CLOSE:
215 		dccp_rcv_close(sk, skb);
216 		return 0;
217 	case DCCP_PKT_REQUEST:
218 		/* Step 7
219             	 *   or (S.is_server and P.type == Response)
220 		 *   or (S.is_client and P.type == Request)
221 		 *   or (S.state >= OPEN and P.type == Request
222 		 *	and P.seqno >= S.OSR)
223 		 *    or (S.state >= OPEN and P.type == Response
224 		 *	and P.seqno >= S.OSR)
225 		 *    or (S.state == RESPOND and P.type == Data),
226 		 *  Send Sync packet acknowledging P.seqno
227 		 *  Drop packet and return
228 		 */
229 		if (dp->dccps_role != DCCP_ROLE_LISTEN)
230 			goto send_sync;
231 		goto check_seq;
232 	case DCCP_PKT_RESPONSE:
233 		if (dp->dccps_role != DCCP_ROLE_CLIENT)
234 			goto send_sync;
235 check_seq:
236 		if (!before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_osr)) {
237 send_sync:
238 			dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
239 		}
240 		break;
241 	}
242 
243 	DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
244 discard:
245 	__kfree_skb(skb);
246 	return 0;
247 }
248 
249 static int dccp_rcv_request_sent_state_process(struct sock *sk,
250 					       struct sk_buff *skb,
251 					       const struct dccp_hdr *dh,
252 					       const unsigned len)
253 {
254 	/*
255 	 *  Step 4: Prepare sequence numbers in REQUEST
256 	 *     If S.state == REQUEST,
257 	 *	  If (P.type == Response or P.type == Reset)
258 	 *		and S.AWL <= P.ackno <= S.AWH,
259 	 *	     / * Set sequence number variables corresponding to the
260 	 *		other endpoint, so P will pass the tests in Step 6 * /
261 	 *	     Set S.GSR, S.ISR, S.SWL, S.SWH
262 	 *	     / * Response processing continues in Step 10; Reset
263 	 *		processing continues in Step 9 * /
264 	*/
265 	if (dh->dccph_type == DCCP_PKT_RESPONSE) {
266 		const struct inet_connection_sock *icsk = inet_csk(sk);
267 		struct dccp_sock *dp = dccp_sk(sk);
268 
269 		/* Stop the REQUEST timer */
270 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
271 		BUG_TRAP(sk->sk_send_head != NULL);
272 		__kfree_skb(sk->sk_send_head);
273 		sk->sk_send_head = NULL;
274 
275 		if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awl, dp->dccps_awh)) {
276 			dccp_pr_debug("invalid ackno: S.AWL=%llu, P.ackno=%llu, S.AWH=%llu \n",
277 				      (unsigned long long) dp->dccps_awl,
278 				      (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq,
279 				      (unsigned long long) dp->dccps_awh);
280 			goto out_invalid_packet;
281 		}
282 
283 		dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
284 		dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
285 
286 		if (ccid_hc_rx_init(dp->dccps_hc_rx_ccid, sk) != 0 ||
287 		    ccid_hc_tx_init(dp->dccps_hc_tx_ccid, sk) != 0) {
288 			ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
289 			ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
290 			/* FIXME: send appropriate RESET code */
291 			goto out_invalid_packet;
292 		}
293 
294 		dccp_sync_mss(sk, dp->dccps_pmtu_cookie);
295 
296 		/*
297 		 *    Step 10: Process REQUEST state (second part)
298 		 *       If S.state == REQUEST,
299 		 *	  / * If we get here, P is a valid Response from the server (see
300 		 *	     Step 4), and we should move to PARTOPEN state.  PARTOPEN
301 		 *	     means send an Ack, don't send Data packets, retransmit
302 		 *	     Acks periodically, and always include any Init Cookie from
303 		 *	     the Response * /
304 		 *	  S.state := PARTOPEN
305 		 *	  Set PARTOPEN timer
306 		 * 	  Continue with S.state == PARTOPEN
307 		 *	  / * Step 12 will send the Ack completing the three-way
308 		 *	     handshake * /
309 		 */
310 		dccp_set_state(sk, DCCP_PARTOPEN);
311 
312 		/* Make sure socket is routed, for correct metrics. */
313 		inet_sk_rebuild_header(sk);
314 
315 		if (!sock_flag(sk, SOCK_DEAD)) {
316 			sk->sk_state_change(sk);
317 			sk_wake_async(sk, 0, POLL_OUT);
318 		}
319 
320 		if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
321 		    icsk->icsk_accept_queue.rskq_defer_accept) {
322 			/* Save one ACK. Data will be ready after
323 			 * several ticks, if write_pending is set.
324 			 *
325 			 * It may be deleted, but with this feature tcpdumps
326 			 * look so _wonderfully_ clever, that I was not able
327 			 * to stand against the temptation 8)     --ANK
328 			 */
329 			/*
330 			 * OK, in DCCP we can as well do a similar trick, its
331 			 * even in the draft, but there is no need for us to
332 			 * schedule an ack here, as dccp_sendmsg does this for
333 			 * us, also stated in the draft. -acme
334 			 */
335 			__kfree_skb(skb);
336 			return 0;
337 		}
338 		dccp_send_ack(sk);
339 		return -1;
340 	}
341 
342 out_invalid_packet:
343 	return 1; /* dccp_v4_do_rcv will send a reset, but...
344 		     FIXME: the reset code should be DCCP_RESET_CODE_PACKET_ERROR  */
345 }
346 
347 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
348 						   struct sk_buff *skb,
349 						   const struct dccp_hdr *dh,
350 						   const unsigned len)
351 {
352 	int queued = 0;
353 
354 	switch (dh->dccph_type) {
355 	case DCCP_PKT_RESET:
356 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
357 		break;
358 	case DCCP_PKT_DATAACK:
359 	case DCCP_PKT_ACK:
360 		/*
361 		 * FIXME: we should be reseting the PARTOPEN (DELACK) timer here,
362 		 * but only if we haven't used the DELACK timer for something else,
363 		 * like sending a delayed ack for a TIMESTAMP echo, etc, for now
364 		 * were not clearing it, sending an extra ACK when there is nothing
365 		 * else to do in DELACK is not a big deal after all.
366 		 */
367 
368 		/* Stop the PARTOPEN timer */
369 		if (sk->sk_state == DCCP_PARTOPEN)
370 			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
371 
372 		dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
373 		dccp_set_state(sk, DCCP_OPEN);
374 
375 		if (dh->dccph_type == DCCP_PKT_DATAACK) {
376 			dccp_rcv_established(sk, skb, dh, len);
377 			queued = 1; /* packet was queued (by dccp_rcv_established) */
378 		}
379 		break;
380 	}
381 
382 	return queued;
383 }
384 
385 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
386 			   struct dccp_hdr *dh, unsigned len)
387 {
388 	struct dccp_sock *dp = dccp_sk(sk);
389 	const int old_state = sk->sk_state;
390 	int queued = 0;
391 
392 	if (sk->sk_state != DCCP_LISTEN && sk->sk_state != DCCP_REQUESTING) {
393 		if (dccp_check_seqno(sk, skb))
394 			goto discard;
395 
396 		/*
397 		 * Step 8: Process options and mark acknowledgeable
398 		 */
399 		if (dccp_parse_options(sk, skb))
400 			goto discard;
401 
402 		if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
403 			dccp_event_ack_recv(sk, skb);
404 
405 		ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
406 		ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
407 
408 		/*
409 		 * FIXME: check ECN to see if we should use
410 		 * DCCP_ACKPKTS_STATE_ECN_MARKED
411 		 */
412 		if (dp->dccps_options.dccpo_send_ack_vector) {
413 			if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
414 					     DCCP_SKB_CB(skb)->dccpd_seq,
415 					     DCCP_ACKPKTS_STATE_RECEIVED))
416 				goto discard;
417 			/*
418 			 * FIXME: this activation is probably wrong, have to study more
419 			 * TCP delack machinery and how it fits into DCCP draft, but
420 			 * for now it kinda "works" 8)
421 			 */
422 			if (dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1 &&
423 			    !inet_csk_ack_scheduled(sk)) {
424 				inet_csk_schedule_ack(sk);
425 				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MIN, TCP_RTO_MAX);
426 			}
427 		}
428 	}
429 
430 	/*
431 	 *  Step 9: Process Reset
432 	 *	If P.type == Reset,
433 	 *		Tear down connection
434 	 *		S.state := TIMEWAIT
435 	 *		Set TIMEWAIT timer
436 	 *		Drop packet and return
437 	*/
438 	if (dh->dccph_type == DCCP_PKT_RESET) {
439 		/* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
440 		dccp_fin(sk, skb);
441 		dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
442 		return 0;
443 		/*
444 		 *   Step 7: Check for unexpected packet types
445 		 *      If (S.is_server and P.type == CloseReq)
446 		 *	    or (S.is_server and P.type == Response)
447 		 *	    or (S.is_client and P.type == Request)
448 		 *	    or (S.state == RESPOND and P.type == Data),
449 		 *	  Send Sync packet acknowledging P.seqno
450 		 *	  Drop packet and return
451 		 */
452 	} else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
453 		    (dh->dccph_type == DCCP_PKT_RESPONSE || dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
454 		    (dp->dccps_role == DCCP_ROLE_CLIENT &&
455 		     dh->dccph_type == DCCP_PKT_REQUEST) ||
456 		    (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
457 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
458 		goto discard;
459 	}
460 
461 	switch (sk->sk_state) {
462 	case DCCP_CLOSED:
463 		return 1;
464 
465 	case DCCP_LISTEN:
466 		if (dh->dccph_type == DCCP_PKT_ACK ||
467 		    dh->dccph_type == DCCP_PKT_DATAACK)
468 			return 1;
469 
470 		if (dh->dccph_type == DCCP_PKT_RESET)
471 			goto discard;
472 
473 		if (dh->dccph_type == DCCP_PKT_REQUEST) {
474 			if (dccp_v4_conn_request(sk, skb) < 0)
475 				return 1;
476 
477 			/* FIXME: do congestion control initialization */
478 			goto discard;
479 		}
480 		goto discard;
481 
482 	case DCCP_REQUESTING:
483 		/* FIXME: do congestion control initialization */
484 
485 		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
486 		if (queued >= 0)
487 			return queued;
488 
489 		__kfree_skb(skb);
490 		return 0;
491 
492 	case DCCP_RESPOND:
493 	case DCCP_PARTOPEN:
494 		queued = dccp_rcv_respond_partopen_state_process(sk, skb, dh, len);
495 		break;
496 	}
497 
498 	if (dh->dccph_type == DCCP_PKT_ACK || dh->dccph_type == DCCP_PKT_DATAACK) {
499 		switch (old_state) {
500 		case DCCP_PARTOPEN:
501 			sk->sk_state_change(sk);
502 			sk_wake_async(sk, 0, POLL_OUT);
503 			break;
504 		}
505 	}
506 
507 	if (!queued) {
508 discard:
509 		__kfree_skb(skb);
510 	}
511 	return 0;
512 }
513