xref: /openbmc/linux/net/rxrpc/input.c (revision 72d25643)
1 /* RxRPC packet reception
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <net/sock.h>
24 #include <net/af_rxrpc.h>
25 #include <net/ip.h>
26 #include <net/udp.h>
27 #include <net/net_namespace.h>
28 #include "ar-internal.h"
29 
30 /*
31  * queue a packet for recvmsg to pass to userspace
32  * - the caller must hold a lock on call->lock
33  * - must not be called with interrupts disabled (sk_filter() disables BH's)
34  * - eats the packet whether successful or not
35  * - there must be just one reference to the packet, which the caller passes to
36  *   this function
37  */
38 int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
39 			bool force, bool terminal)
40 {
41 	struct rxrpc_skb_priv *sp;
42 	struct rxrpc_sock *rx = call->socket;
43 	struct sock *sk;
44 	int ret;
45 
46 	_enter(",,%d,%d", force, terminal);
47 
48 	ASSERT(!irqs_disabled());
49 
50 	sp = rxrpc_skb(skb);
51 	ASSERTCMP(sp->call, ==, call);
52 
53 	/* if we've already posted the terminal message for a call, then we
54 	 * don't post any more */
55 	if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
56 		_debug("already terminated");
57 		ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
58 		rxrpc_free_skb(skb);
59 		return 0;
60 	}
61 
62 	sk = &rx->sk;
63 
64 	if (!force) {
65 		/* cast skb->rcvbuf to unsigned...  It's pointless, but
66 		 * reduces number of warnings when compiling with -W
67 		 * --ANK */
68 //		ret = -ENOBUFS;
69 //		if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
70 //		    (unsigned int) sk->sk_rcvbuf)
71 //			goto out;
72 
73 		ret = sk_filter(sk, skb);
74 		if (ret < 0)
75 			goto out;
76 	}
77 
78 	spin_lock_bh(&sk->sk_receive_queue.lock);
79 	if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
80 	    !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
81 	    call->socket->sk.sk_state != RXRPC_CLOSE) {
82 		skb->destructor = rxrpc_packet_destructor;
83 		skb->dev = NULL;
84 		skb->sk = sk;
85 		atomic_add(skb->truesize, &sk->sk_rmem_alloc);
86 
87 		if (terminal) {
88 			_debug("<<<< TERMINAL MESSAGE >>>>");
89 			set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
90 		}
91 
92 		/* allow interception by a kernel service */
93 		if (rx->interceptor) {
94 			rx->interceptor(sk, call->user_call_ID, skb);
95 			spin_unlock_bh(&sk->sk_receive_queue.lock);
96 		} else {
97 			_net("post skb %p", skb);
98 			__skb_queue_tail(&sk->sk_receive_queue, skb);
99 			spin_unlock_bh(&sk->sk_receive_queue.lock);
100 
101 			if (!sock_flag(sk, SOCK_DEAD))
102 				sk->sk_data_ready(sk);
103 		}
104 		skb = NULL;
105 	} else {
106 		spin_unlock_bh(&sk->sk_receive_queue.lock);
107 	}
108 	ret = 0;
109 
110 out:
111 	rxrpc_free_skb(skb);
112 
113 	_leave(" = %d", ret);
114 	return ret;
115 }
116 
117 /*
118  * process a DATA packet, posting the packet to the appropriate queue
119  * - eats the packet if successful
120  */
121 static int rxrpc_fast_process_data(struct rxrpc_call *call,
122 				   struct sk_buff *skb, u32 seq)
123 {
124 	struct rxrpc_skb_priv *sp;
125 	bool terminal;
126 	int ret, ackbit, ack;
127 
128 	_enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
129 
130 	sp = rxrpc_skb(skb);
131 	ASSERTCMP(sp->call, ==, NULL);
132 
133 	spin_lock(&call->lock);
134 
135 	if (call->state > RXRPC_CALL_COMPLETE)
136 		goto discard;
137 
138 	ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
139 	ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
140 	ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
141 
142 	if (seq < call->rx_data_post) {
143 		_debug("dup #%u [-%u]", seq, call->rx_data_post);
144 		ack = RXRPC_ACK_DUPLICATE;
145 		ret = -ENOBUFS;
146 		goto discard_and_ack;
147 	}
148 
149 	/* we may already have the packet in the out of sequence queue */
150 	ackbit = seq - (call->rx_data_eaten + 1);
151 	ASSERTCMP(ackbit, >=, 0);
152 	if (__test_and_set_bit(ackbit, call->ackr_window)) {
153 		_debug("dup oos #%u [%u,%u]",
154 		       seq, call->rx_data_eaten, call->rx_data_post);
155 		ack = RXRPC_ACK_DUPLICATE;
156 		goto discard_and_ack;
157 	}
158 
159 	if (seq >= call->ackr_win_top) {
160 		_debug("exceed #%u [%u]", seq, call->ackr_win_top);
161 		__clear_bit(ackbit, call->ackr_window);
162 		ack = RXRPC_ACK_EXCEEDS_WINDOW;
163 		goto discard_and_ack;
164 	}
165 
166 	if (seq == call->rx_data_expect) {
167 		clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
168 		call->rx_data_expect++;
169 	} else if (seq > call->rx_data_expect) {
170 		_debug("oos #%u [%u]", seq, call->rx_data_expect);
171 		call->rx_data_expect = seq + 1;
172 		if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
173 			ack = RXRPC_ACK_OUT_OF_SEQUENCE;
174 			goto enqueue_and_ack;
175 		}
176 		goto enqueue_packet;
177 	}
178 
179 	if (seq != call->rx_data_post) {
180 		_debug("ahead #%u [%u]", seq, call->rx_data_post);
181 		goto enqueue_packet;
182 	}
183 
184 	if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
185 		goto protocol_error;
186 
187 	/* if the packet need security things doing to it, then it goes down
188 	 * the slow path */
189 	if (call->conn->security_ix)
190 		goto enqueue_packet;
191 
192 	sp->call = call;
193 	rxrpc_get_call(call);
194 	atomic_inc(&call->skb_count);
195 	terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
196 		    !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
197 	ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
198 	if (ret < 0) {
199 		if (ret == -ENOMEM || ret == -ENOBUFS) {
200 			__clear_bit(ackbit, call->ackr_window);
201 			ack = RXRPC_ACK_NOSPACE;
202 			goto discard_and_ack;
203 		}
204 		goto out;
205 	}
206 
207 	skb = NULL;
208 
209 	_debug("post #%u", seq);
210 	ASSERTCMP(call->rx_data_post, ==, seq);
211 	call->rx_data_post++;
212 
213 	if (sp->hdr.flags & RXRPC_LAST_PACKET)
214 		set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
215 
216 	/* if we've reached an out of sequence packet then we need to drain
217 	 * that queue into the socket Rx queue now */
218 	if (call->rx_data_post == call->rx_first_oos) {
219 		_debug("drain rx oos now");
220 		read_lock(&call->state_lock);
221 		if (call->state < RXRPC_CALL_COMPLETE &&
222 		    !test_and_set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events))
223 			rxrpc_queue_call(call);
224 		read_unlock(&call->state_lock);
225 	}
226 
227 	spin_unlock(&call->lock);
228 	atomic_inc(&call->ackr_not_idle);
229 	rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
230 	_leave(" = 0 [posted]");
231 	return 0;
232 
233 protocol_error:
234 	ret = -EBADMSG;
235 out:
236 	spin_unlock(&call->lock);
237 	_leave(" = %d", ret);
238 	return ret;
239 
240 discard_and_ack:
241 	_debug("discard and ACK packet %p", skb);
242 	__rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
243 discard:
244 	spin_unlock(&call->lock);
245 	rxrpc_free_skb(skb);
246 	_leave(" = 0 [discarded]");
247 	return 0;
248 
249 enqueue_and_ack:
250 	__rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
251 enqueue_packet:
252 	_net("defer skb %p", skb);
253 	spin_unlock(&call->lock);
254 	skb_queue_tail(&call->rx_queue, skb);
255 	atomic_inc(&call->ackr_not_idle);
256 	read_lock(&call->state_lock);
257 	if (call->state < RXRPC_CALL_DEAD)
258 		rxrpc_queue_call(call);
259 	read_unlock(&call->state_lock);
260 	_leave(" = 0 [queued]");
261 	return 0;
262 }
263 
264 /*
265  * assume an implicit ACKALL of the transmission phase of a client socket upon
266  * reception of the first reply packet
267  */
268 static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
269 {
270 	write_lock_bh(&call->state_lock);
271 
272 	switch (call->state) {
273 	case RXRPC_CALL_CLIENT_AWAIT_REPLY:
274 		call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
275 		call->acks_latest = serial;
276 
277 		_debug("implicit ACKALL %%%u", call->acks_latest);
278 		set_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events);
279 		write_unlock_bh(&call->state_lock);
280 
281 		if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
282 			clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
283 			clear_bit(RXRPC_CALL_EV_RESEND, &call->events);
284 			clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
285 		}
286 		break;
287 
288 	default:
289 		write_unlock_bh(&call->state_lock);
290 		break;
291 	}
292 }
293 
294 /*
295  * post an incoming packet to the nominated call to deal with
296  * - must get rid of the sk_buff, either by freeing it or by queuing it
297  */
298 void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
299 {
300 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
301 	__be32 wtmp;
302 	u32 hi_serial, abort_code;
303 
304 	_enter("%p,%p", call, skb);
305 
306 	ASSERT(!irqs_disabled());
307 
308 #if 0 // INJECT RX ERROR
309 	if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
310 		static int skip = 0;
311 		if (++skip == 3) {
312 			printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
313 			skip = 0;
314 			goto free_packet;
315 		}
316 	}
317 #endif
318 
319 	/* track the latest serial number on this connection for ACK packet
320 	 * information */
321 	hi_serial = atomic_read(&call->conn->hi_serial);
322 	while (sp->hdr.serial > hi_serial)
323 		hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
324 					   sp->hdr.serial);
325 
326 	/* request ACK generation for any ACK or DATA packet that requests
327 	 * it */
328 	if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
329 		_proto("ACK Requested on %%%u", sp->hdr.serial);
330 		rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, false);
331 	}
332 
333 	switch (sp->hdr.type) {
334 	case RXRPC_PACKET_TYPE_ABORT:
335 		_debug("abort");
336 
337 		if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0)
338 			goto protocol_error;
339 
340 		abort_code = ntohl(wtmp);
341 		_proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
342 
343 		write_lock_bh(&call->state_lock);
344 		if (call->state < RXRPC_CALL_COMPLETE) {
345 			call->state = RXRPC_CALL_REMOTELY_ABORTED;
346 			call->remote_abort = abort_code;
347 			set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
348 			rxrpc_queue_call(call);
349 		}
350 		goto free_packet_unlock;
351 
352 	case RXRPC_PACKET_TYPE_BUSY:
353 		_proto("Rx BUSY %%%u", sp->hdr.serial);
354 
355 		if (rxrpc_conn_is_service(call->conn))
356 			goto protocol_error;
357 
358 		write_lock_bh(&call->state_lock);
359 		switch (call->state) {
360 		case RXRPC_CALL_CLIENT_SEND_REQUEST:
361 			call->state = RXRPC_CALL_SERVER_BUSY;
362 			set_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
363 			rxrpc_queue_call(call);
364 		case RXRPC_CALL_SERVER_BUSY:
365 			goto free_packet_unlock;
366 		default:
367 			goto protocol_error_locked;
368 		}
369 
370 	default:
371 		_proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
372 		goto protocol_error;
373 
374 	case RXRPC_PACKET_TYPE_DATA:
375 		_proto("Rx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
376 
377 		if (sp->hdr.seq == 0)
378 			goto protocol_error;
379 
380 		call->ackr_prev_seq = sp->hdr.seq;
381 
382 		/* received data implicitly ACKs all of the request packets we
383 		 * sent when we're acting as a client */
384 		if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
385 			rxrpc_assume_implicit_ackall(call, sp->hdr.serial);
386 
387 		switch (rxrpc_fast_process_data(call, skb, sp->hdr.seq)) {
388 		case 0:
389 			skb = NULL;
390 			goto done;
391 
392 		default:
393 			BUG();
394 
395 			/* data packet received beyond the last packet */
396 		case -EBADMSG:
397 			goto protocol_error;
398 		}
399 
400 	case RXRPC_PACKET_TYPE_ACKALL:
401 	case RXRPC_PACKET_TYPE_ACK:
402 		/* ACK processing is done in process context */
403 		read_lock_bh(&call->state_lock);
404 		if (call->state < RXRPC_CALL_DEAD) {
405 			skb_queue_tail(&call->rx_queue, skb);
406 			rxrpc_queue_call(call);
407 			skb = NULL;
408 		}
409 		read_unlock_bh(&call->state_lock);
410 		goto free_packet;
411 	}
412 
413 protocol_error:
414 	_debug("protocol error");
415 	write_lock_bh(&call->state_lock);
416 protocol_error_locked:
417 	if (call->state <= RXRPC_CALL_COMPLETE) {
418 		call->state = RXRPC_CALL_LOCALLY_ABORTED;
419 		call->local_abort = RX_PROTOCOL_ERROR;
420 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
421 		rxrpc_queue_call(call);
422 	}
423 free_packet_unlock:
424 	write_unlock_bh(&call->state_lock);
425 free_packet:
426 	rxrpc_free_skb(skb);
427 done:
428 	_leave("");
429 }
430 
431 /*
432  * split up a jumbo data packet
433  */
434 static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
435 				       struct sk_buff *jumbo)
436 {
437 	struct rxrpc_jumbo_header jhdr;
438 	struct rxrpc_skb_priv *sp;
439 	struct sk_buff *part;
440 
441 	_enter(",{%u,%u}", jumbo->data_len, jumbo->len);
442 
443 	sp = rxrpc_skb(jumbo);
444 
445 	do {
446 		sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
447 
448 		/* make a clone to represent the first subpacket in what's left
449 		 * of the jumbo packet */
450 		part = skb_clone(jumbo, GFP_ATOMIC);
451 		if (!part) {
452 			/* simply ditch the tail in the event of ENOMEM */
453 			pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
454 			break;
455 		}
456 		rxrpc_new_skb(part);
457 
458 		pskb_trim(part, RXRPC_JUMBO_DATALEN);
459 
460 		if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
461 			goto protocol_error;
462 
463 		if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
464 			goto protocol_error;
465 		if (!pskb_pull(jumbo, sizeof(jhdr)))
466 			BUG();
467 
468 		sp->hdr.seq	+= 1;
469 		sp->hdr.serial	+= 1;
470 		sp->hdr.flags	= jhdr.flags;
471 		sp->hdr._rsvd	= ntohs(jhdr._rsvd);
472 
473 		_proto("Rx DATA Jumbo %%%u", sp->hdr.serial - 1);
474 
475 		rxrpc_fast_process_packet(call, part);
476 		part = NULL;
477 
478 	} while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
479 
480 	rxrpc_fast_process_packet(call, jumbo);
481 	_leave("");
482 	return;
483 
484 protocol_error:
485 	_debug("protocol error");
486 	rxrpc_free_skb(part);
487 	rxrpc_free_skb(jumbo);
488 	write_lock_bh(&call->state_lock);
489 	if (call->state <= RXRPC_CALL_COMPLETE) {
490 		call->state = RXRPC_CALL_LOCALLY_ABORTED;
491 		call->local_abort = RX_PROTOCOL_ERROR;
492 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
493 		rxrpc_queue_call(call);
494 	}
495 	write_unlock_bh(&call->state_lock);
496 	_leave("");
497 }
498 
499 /*
500  * post an incoming packet to the appropriate call/socket to deal with
501  * - must get rid of the sk_buff, either by freeing it or by queuing it
502  */
503 static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
504 				      struct sk_buff *skb)
505 {
506 	struct rxrpc_skb_priv *sp;
507 
508 	_enter("%p,%p", call, skb);
509 
510 	sp = rxrpc_skb(skb);
511 
512 	_debug("extant call [%d]", call->state);
513 
514 	read_lock(&call->state_lock);
515 	switch (call->state) {
516 	case RXRPC_CALL_LOCALLY_ABORTED:
517 		if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
518 			rxrpc_queue_call(call);
519 			goto free_unlock;
520 		}
521 	case RXRPC_CALL_REMOTELY_ABORTED:
522 	case RXRPC_CALL_NETWORK_ERROR:
523 	case RXRPC_CALL_DEAD:
524 		goto dead_call;
525 	case RXRPC_CALL_COMPLETE:
526 	case RXRPC_CALL_CLIENT_FINAL_ACK:
527 		/* complete server call */
528 		if (rxrpc_conn_is_service(call->conn))
529 			goto dead_call;
530 		/* resend last packet of a completed call */
531 		_debug("final ack again");
532 		rxrpc_get_call(call);
533 		set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
534 		rxrpc_queue_call(call);
535 		goto free_unlock;
536 	default:
537 		break;
538 	}
539 
540 	read_unlock(&call->state_lock);
541 	rxrpc_get_call(call);
542 
543 	if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
544 	    sp->hdr.flags & RXRPC_JUMBO_PACKET)
545 		rxrpc_process_jumbo_packet(call, skb);
546 	else
547 		rxrpc_fast_process_packet(call, skb);
548 
549 	rxrpc_put_call(call);
550 	goto done;
551 
552 dead_call:
553 	if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
554 		skb->priority = RX_CALL_DEAD;
555 		rxrpc_reject_packet(call->conn->params.local, skb);
556 		goto unlock;
557 	}
558 free_unlock:
559 	rxrpc_free_skb(skb);
560 unlock:
561 	read_unlock(&call->state_lock);
562 done:
563 	_leave("");
564 }
565 
566 /*
567  * post connection-level events to the connection
568  * - this includes challenges, responses and some aborts
569  */
570 static bool rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
571 				      struct sk_buff *skb)
572 {
573 	_enter("%p,%p", conn, skb);
574 
575 	skb_queue_tail(&conn->rx_queue, skb);
576 	return rxrpc_queue_conn(conn);
577 }
578 
579 /*
580  * post endpoint-level events to the local endpoint
581  * - this includes debug and version messages
582  */
583 static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
584 				       struct sk_buff *skb)
585 {
586 	_enter("%p,%p", local, skb);
587 
588 	skb_queue_tail(&local->event_queue, skb);
589 	rxrpc_queue_local(local);
590 }
591 
592 /*
593  * Extract the wire header from a packet and translate the byte order.
594  */
595 static noinline
596 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
597 {
598 	struct rxrpc_wire_header whdr;
599 
600 	/* dig out the RxRPC connection details */
601 	if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
602 		return -EBADMSG;
603 	if (!pskb_pull(skb, sizeof(whdr)))
604 		BUG();
605 
606 	memset(sp, 0, sizeof(*sp));
607 	sp->hdr.epoch		= ntohl(whdr.epoch);
608 	sp->hdr.cid		= ntohl(whdr.cid);
609 	sp->hdr.callNumber	= ntohl(whdr.callNumber);
610 	sp->hdr.seq		= ntohl(whdr.seq);
611 	sp->hdr.serial		= ntohl(whdr.serial);
612 	sp->hdr.flags		= whdr.flags;
613 	sp->hdr.type		= whdr.type;
614 	sp->hdr.userStatus	= whdr.userStatus;
615 	sp->hdr.securityIndex	= whdr.securityIndex;
616 	sp->hdr._rsvd		= ntohs(whdr._rsvd);
617 	sp->hdr.serviceId	= ntohs(whdr.serviceId);
618 	return 0;
619 }
620 
621 /*
622  * handle data received on the local endpoint
623  * - may be called in interrupt context
624  *
625  * The socket is locked by the caller and this prevents the socket from being
626  * shut down and the local endpoint from going away, thus sk_user_data will not
627  * be cleared until this function returns.
628  */
629 void rxrpc_data_ready(struct sock *sk)
630 {
631 	struct rxrpc_connection *conn;
632 	struct rxrpc_skb_priv *sp;
633 	struct rxrpc_local *local = sk->sk_user_data;
634 	struct sk_buff *skb;
635 	int ret;
636 
637 	_enter("%p", sk);
638 
639 	ASSERT(!irqs_disabled());
640 
641 	skb = skb_recv_datagram(sk, 0, 1, &ret);
642 	if (!skb) {
643 		if (ret == -EAGAIN)
644 			return;
645 		_debug("UDP socket error %d", ret);
646 		return;
647 	}
648 
649 	rxrpc_new_skb(skb);
650 
651 	_net("recv skb %p", skb);
652 
653 	/* we'll probably need to checksum it (didn't call sock_recvmsg) */
654 	if (skb_checksum_complete(skb)) {
655 		rxrpc_free_skb(skb);
656 		__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
657 		_leave(" [CSUM failed]");
658 		return;
659 	}
660 
661 	__UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
662 
663 	/* The socket buffer we have is owned by UDP, with UDP's data all over
664 	 * it, but we really want our own data there.
665 	 */
666 	skb_orphan(skb);
667 	sp = rxrpc_skb(skb);
668 
669 	_net("Rx UDP packet from %08x:%04hu",
670 	     ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
671 
672 	/* dig out the RxRPC connection details */
673 	if (rxrpc_extract_header(sp, skb) < 0)
674 		goto bad_message;
675 
676 	_net("Rx RxRPC %s ep=%x call=%x:%x",
677 	     sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
678 	     sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
679 
680 	if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
681 	    !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
682 		_proto("Rx Bad Packet Type %u", sp->hdr.type);
683 		goto bad_message;
684 	}
685 
686 	if (sp->hdr.type == RXRPC_PACKET_TYPE_VERSION) {
687 		rxrpc_post_packet_to_local(local, skb);
688 		goto out;
689 	}
690 
691 	if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
692 	    (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
693 		goto bad_message;
694 
695 	rcu_read_lock();
696 
697 retry_find_conn:
698 	conn = rxrpc_find_connection_rcu(local, skb);
699 	if (!conn)
700 		goto cant_route_call;
701 
702 	if (sp->hdr.callNumber == 0) {
703 		/* Connection-level packet */
704 		_debug("CONN %p {%d}", conn, conn->debug_id);
705 		if (!rxrpc_post_packet_to_conn(conn, skb))
706 			goto retry_find_conn;
707 	} else {
708 		/* Call-bound packets are routed by connection channel. */
709 		unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
710 		struct rxrpc_channel *chan = &conn->channels[channel];
711 		struct rxrpc_call *call = rcu_dereference(chan->call);
712 
713 		if (!call || atomic_read(&call->usage) == 0)
714 			goto cant_route_call;
715 
716 		rxrpc_post_packet_to_call(call, skb);
717 	}
718 
719 	rcu_read_unlock();
720 out:
721 	return;
722 
723 cant_route_call:
724 	rcu_read_unlock();
725 
726 	_debug("can't route call");
727 	if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
728 	    sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
729 		if (sp->hdr.seq == 1) {
730 			_debug("first packet");
731 			skb_queue_tail(&local->accept_queue, skb);
732 			rxrpc_queue_work(&local->processor);
733 			_leave(" [incoming]");
734 			return;
735 		}
736 		skb->priority = RX_INVALID_OPERATION;
737 	} else {
738 		skb->priority = RX_CALL_DEAD;
739 	}
740 
741 	if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
742 		_debug("reject type %d",sp->hdr.type);
743 		rxrpc_reject_packet(local, skb);
744 	}
745 	_leave(" [no call]");
746 	return;
747 
748 bad_message:
749 	skb->priority = RX_PROTOCOL_ERROR;
750 	rxrpc_reject_packet(local, skb);
751 	_leave(" [badmsg]");
752 }
753