xref: /openbmc/linux/net/rxrpc/input.c (revision a17922de)
1 /* RxRPC packet reception
2  *
3  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <net/sock.h>
24 #include <net/af_rxrpc.h>
25 #include <net/ip.h>
26 #include <net/udp.h>
27 #include <net/net_namespace.h>
28 #include "ar-internal.h"
29 
30 static void rxrpc_proto_abort(const char *why,
31 			      struct rxrpc_call *call, rxrpc_seq_t seq)
32 {
33 	if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) {
34 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
35 		rxrpc_queue_call(call);
36 	}
37 }
38 
39 /*
40  * Do TCP-style congestion management [RFC 5681].
41  */
42 static void rxrpc_congestion_management(struct rxrpc_call *call,
43 					struct sk_buff *skb,
44 					struct rxrpc_ack_summary *summary,
45 					rxrpc_serial_t acked_serial)
46 {
47 	enum rxrpc_congest_change change = rxrpc_cong_no_change;
48 	unsigned int cumulative_acks = call->cong_cumul_acks;
49 	unsigned int cwnd = call->cong_cwnd;
50 	bool resend = false;
51 
52 	summary->flight_size =
53 		(call->tx_top - call->tx_hard_ack) - summary->nr_acks;
54 
55 	if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
56 		summary->retrans_timeo = true;
57 		call->cong_ssthresh = max_t(unsigned int,
58 					    summary->flight_size / 2, 2);
59 		cwnd = 1;
60 		if (cwnd >= call->cong_ssthresh &&
61 		    call->cong_mode == RXRPC_CALL_SLOW_START) {
62 			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
63 			call->cong_tstamp = skb->tstamp;
64 			cumulative_acks = 0;
65 		}
66 	}
67 
68 	cumulative_acks += summary->nr_new_acks;
69 	cumulative_acks += summary->nr_rot_new_acks;
70 	if (cumulative_acks > 255)
71 		cumulative_acks = 255;
72 
73 	summary->mode = call->cong_mode;
74 	summary->cwnd = call->cong_cwnd;
75 	summary->ssthresh = call->cong_ssthresh;
76 	summary->cumulative_acks = cumulative_acks;
77 	summary->dup_acks = call->cong_dup_acks;
78 
79 	switch (call->cong_mode) {
80 	case RXRPC_CALL_SLOW_START:
81 		if (summary->nr_nacks > 0)
82 			goto packet_loss_detected;
83 		if (summary->cumulative_acks > 0)
84 			cwnd += 1;
85 		if (cwnd >= call->cong_ssthresh) {
86 			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
87 			call->cong_tstamp = skb->tstamp;
88 		}
89 		goto out;
90 
91 	case RXRPC_CALL_CONGEST_AVOIDANCE:
92 		if (summary->nr_nacks > 0)
93 			goto packet_loss_detected;
94 
95 		/* We analyse the number of packets that get ACK'd per RTT
96 		 * period and increase the window if we managed to fill it.
97 		 */
98 		if (call->peer->rtt_usage == 0)
99 			goto out;
100 		if (ktime_before(skb->tstamp,
101 				 ktime_add_ns(call->cong_tstamp,
102 					      call->peer->rtt)))
103 			goto out_no_clear_ca;
104 		change = rxrpc_cong_rtt_window_end;
105 		call->cong_tstamp = skb->tstamp;
106 		if (cumulative_acks >= cwnd)
107 			cwnd++;
108 		goto out;
109 
110 	case RXRPC_CALL_PACKET_LOSS:
111 		if (summary->nr_nacks == 0)
112 			goto resume_normality;
113 
114 		if (summary->new_low_nack) {
115 			change = rxrpc_cong_new_low_nack;
116 			call->cong_dup_acks = 1;
117 			if (call->cong_extra > 1)
118 				call->cong_extra = 1;
119 			goto send_extra_data;
120 		}
121 
122 		call->cong_dup_acks++;
123 		if (call->cong_dup_acks < 3)
124 			goto send_extra_data;
125 
126 		change = rxrpc_cong_begin_retransmission;
127 		call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
128 		call->cong_ssthresh = max_t(unsigned int,
129 					    summary->flight_size / 2, 2);
130 		cwnd = call->cong_ssthresh + 3;
131 		call->cong_extra = 0;
132 		call->cong_dup_acks = 0;
133 		resend = true;
134 		goto out;
135 
136 	case RXRPC_CALL_FAST_RETRANSMIT:
137 		if (!summary->new_low_nack) {
138 			if (summary->nr_new_acks == 0)
139 				cwnd += 1;
140 			call->cong_dup_acks++;
141 			if (call->cong_dup_acks == 2) {
142 				change = rxrpc_cong_retransmit_again;
143 				call->cong_dup_acks = 0;
144 				resend = true;
145 			}
146 		} else {
147 			change = rxrpc_cong_progress;
148 			cwnd = call->cong_ssthresh;
149 			if (summary->nr_nacks == 0)
150 				goto resume_normality;
151 		}
152 		goto out;
153 
154 	default:
155 		BUG();
156 		goto out;
157 	}
158 
159 resume_normality:
160 	change = rxrpc_cong_cleared_nacks;
161 	call->cong_dup_acks = 0;
162 	call->cong_extra = 0;
163 	call->cong_tstamp = skb->tstamp;
164 	if (cwnd < call->cong_ssthresh)
165 		call->cong_mode = RXRPC_CALL_SLOW_START;
166 	else
167 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
168 out:
169 	cumulative_acks = 0;
170 out_no_clear_ca:
171 	if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1)
172 		cwnd = RXRPC_RXTX_BUFF_SIZE - 1;
173 	call->cong_cwnd = cwnd;
174 	call->cong_cumul_acks = cumulative_acks;
175 	trace_rxrpc_congest(call, summary, acked_serial, change);
176 	if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
177 		rxrpc_queue_call(call);
178 	return;
179 
180 packet_loss_detected:
181 	change = rxrpc_cong_saw_nack;
182 	call->cong_mode = RXRPC_CALL_PACKET_LOSS;
183 	call->cong_dup_acks = 0;
184 	goto send_extra_data;
185 
186 send_extra_data:
187 	/* Send some previously unsent DATA if we have some to advance the ACK
188 	 * state.
189 	 */
190 	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
191 	    RXRPC_TX_ANNO_LAST ||
192 	    summary->nr_acks != call->tx_top - call->tx_hard_ack) {
193 		call->cong_extra++;
194 		wake_up(&call->waitq);
195 	}
196 	goto out_no_clear_ca;
197 }
198 
199 /*
200  * Ping the other end to fill our RTT cache and to retrieve the rwind
201  * and MTU parameters.
202  */
203 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
204 			    int skew)
205 {
206 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
207 	ktime_t now = skb->tstamp;
208 
209 	if (call->peer->rtt_usage < 3 ||
210 	    ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
211 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
212 				  true, true,
213 				  rxrpc_propose_ack_ping_for_params);
214 }
215 
216 /*
217  * Apply a hard ACK by advancing the Tx window.
218  */
219 static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
220 				   struct rxrpc_ack_summary *summary)
221 {
222 	struct sk_buff *skb, *list = NULL;
223 	int ix;
224 	u8 annotation;
225 
226 	if (call->acks_lowest_nak == call->tx_hard_ack) {
227 		call->acks_lowest_nak = to;
228 	} else if (before_eq(call->acks_lowest_nak, to)) {
229 		summary->new_low_nack = true;
230 		call->acks_lowest_nak = to;
231 	}
232 
233 	spin_lock(&call->lock);
234 
235 	while (before(call->tx_hard_ack, to)) {
236 		call->tx_hard_ack++;
237 		ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
238 		skb = call->rxtx_buffer[ix];
239 		annotation = call->rxtx_annotations[ix];
240 		rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
241 		call->rxtx_buffer[ix] = NULL;
242 		call->rxtx_annotations[ix] = 0;
243 		skb->next = list;
244 		list = skb;
245 
246 		if (annotation & RXRPC_TX_ANNO_LAST)
247 			set_bit(RXRPC_CALL_TX_LAST, &call->flags);
248 		if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
249 			summary->nr_rot_new_acks++;
250 	}
251 
252 	spin_unlock(&call->lock);
253 
254 	trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
255 				    rxrpc_transmit_rotate_last :
256 				    rxrpc_transmit_rotate));
257 	wake_up(&call->waitq);
258 
259 	while (list) {
260 		skb = list;
261 		list = skb->next;
262 		skb->next = NULL;
263 		rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
264 	}
265 }
266 
267 /*
268  * End the transmission phase of a call.
269  *
270  * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
271  * or a final ACK packet.
272  */
273 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
274 			       const char *abort_why)
275 {
276 
277 	ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
278 
279 	write_lock(&call->state_lock);
280 
281 	switch (call->state) {
282 	case RXRPC_CALL_CLIENT_SEND_REQUEST:
283 	case RXRPC_CALL_CLIENT_AWAIT_REPLY:
284 		if (reply_begun)
285 			call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
286 		else
287 			call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
288 		break;
289 
290 	case RXRPC_CALL_SERVER_AWAIT_ACK:
291 		__rxrpc_call_completed(call);
292 		rxrpc_notify_socket(call);
293 		break;
294 
295 	default:
296 		goto bad_state;
297 	}
298 
299 	write_unlock(&call->state_lock);
300 	if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
301 		trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
302 	} else {
303 		trace_rxrpc_transmit(call, rxrpc_transmit_end);
304 	}
305 	_leave(" = ok");
306 	return true;
307 
308 bad_state:
309 	write_unlock(&call->state_lock);
310 	kdebug("end_tx %s", rxrpc_call_states[call->state]);
311 	rxrpc_proto_abort(abort_why, call, call->tx_top);
312 	return false;
313 }
314 
315 /*
316  * Begin the reply reception phase of a call.
317  */
318 static bool rxrpc_receiving_reply(struct rxrpc_call *call)
319 {
320 	struct rxrpc_ack_summary summary = { 0 };
321 	unsigned long now, timo;
322 	rxrpc_seq_t top = READ_ONCE(call->tx_top);
323 
324 	if (call->ackr_reason) {
325 		spin_lock_bh(&call->lock);
326 		call->ackr_reason = 0;
327 		spin_unlock_bh(&call->lock);
328 		now = jiffies;
329 		timo = now + MAX_JIFFY_OFFSET;
330 		WRITE_ONCE(call->resend_at, timo);
331 		WRITE_ONCE(call->ack_at, timo);
332 		trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
333 	}
334 
335 	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
336 		rxrpc_rotate_tx_window(call, top, &summary);
337 	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
338 		rxrpc_proto_abort("TXL", call, top);
339 		return false;
340 	}
341 	if (!rxrpc_end_tx_phase(call, true, "ETD"))
342 		return false;
343 	call->tx_phase = false;
344 	return true;
345 }
346 
347 /*
348  * Scan a jumbo packet to validate its structure and to work out how many
349  * subpackets it contains.
350  *
351  * A jumbo packet is a collection of consecutive packets glued together with
352  * little headers between that indicate how to change the initial header for
353  * each subpacket.
354  *
355  * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
356  * the last are RXRPC_JUMBO_DATALEN in size.  The last subpacket may be of any
357  * size.
358  */
359 static bool rxrpc_validate_jumbo(struct sk_buff *skb)
360 {
361 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
362 	unsigned int offset = sizeof(struct rxrpc_wire_header);
363 	unsigned int len = skb->len;
364 	int nr_jumbo = 1;
365 	u8 flags = sp->hdr.flags;
366 
367 	do {
368 		nr_jumbo++;
369 		if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
370 			goto protocol_error;
371 		if (flags & RXRPC_LAST_PACKET)
372 			goto protocol_error;
373 		offset += RXRPC_JUMBO_DATALEN;
374 		if (skb_copy_bits(skb, offset, &flags, 1) < 0)
375 			goto protocol_error;
376 		offset += sizeof(struct rxrpc_jumbo_header);
377 	} while (flags & RXRPC_JUMBO_PACKET);
378 
379 	sp->nr_jumbo = nr_jumbo;
380 	return true;
381 
382 protocol_error:
383 	return false;
384 }
385 
386 /*
387  * Handle reception of a duplicate packet.
388  *
389  * We have to take care to avoid an attack here whereby we're given a series of
390  * jumbograms, each with a sequence number one before the preceding one and
391  * filled up to maximum UDP size.  If they never send us the first packet in
392  * the sequence, they can cause us to have to hold on to around 2MiB of kernel
393  * space until the call times out.
394  *
395  * We limit the space usage by only accepting three duplicate jumbo packets per
396  * call.  After that, we tell the other side we're no longer accepting jumbos
397  * (that information is encoded in the ACK packet).
398  */
399 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
400 				 u8 annotation, bool *_jumbo_bad)
401 {
402 	/* Discard normal packets that are duplicates. */
403 	if (annotation == 0)
404 		return;
405 
406 	/* Skip jumbo subpackets that are duplicates.  When we've had three or
407 	 * more partially duplicate jumbo packets, we refuse to take any more
408 	 * jumbos for this call.
409 	 */
410 	if (!*_jumbo_bad) {
411 		call->nr_jumbo_bad++;
412 		*_jumbo_bad = true;
413 	}
414 }
415 
416 /*
417  * Process a DATA packet, adding the packet to the Rx ring.
418  */
419 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
420 			     u16 skew)
421 {
422 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
423 	enum rxrpc_call_state state;
424 	unsigned int offset = sizeof(struct rxrpc_wire_header);
425 	unsigned int ix;
426 	rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
427 	rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
428 	bool immediate_ack = false, jumbo_bad = false, queued;
429 	u16 len;
430 	u8 ack = 0, flags, annotation = 0;
431 
432 	_enter("{%u,%u},{%u,%u}",
433 	       call->rx_hard_ack, call->rx_top, skb->len, seq);
434 
435 	_proto("Rx DATA %%%u { #%u f=%02x }",
436 	       sp->hdr.serial, seq, sp->hdr.flags);
437 
438 	state = READ_ONCE(call->state);
439 	if (state >= RXRPC_CALL_COMPLETE)
440 		return;
441 
442 	if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
443 		unsigned long timo = READ_ONCE(call->next_req_timo);
444 		unsigned long now, expect_req_by;
445 
446 		if (timo) {
447 			now = jiffies;
448 			expect_req_by = now + timo;
449 			WRITE_ONCE(call->expect_req_by, expect_req_by);
450 			rxrpc_reduce_call_timer(call, expect_req_by, now,
451 						rxrpc_timer_set_for_idle);
452 		}
453 	}
454 
455 	/* Received data implicitly ACKs all of the request packets we sent
456 	 * when we're acting as a client.
457 	 */
458 	if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
459 	     state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
460 	    !rxrpc_receiving_reply(call))
461 		return;
462 
463 	call->ackr_prev_seq = seq;
464 
465 	hard_ack = READ_ONCE(call->rx_hard_ack);
466 	if (after(seq, hard_ack + call->rx_winsize)) {
467 		ack = RXRPC_ACK_EXCEEDS_WINDOW;
468 		ack_serial = serial;
469 		goto ack;
470 	}
471 
472 	flags = sp->hdr.flags;
473 	if (flags & RXRPC_JUMBO_PACKET) {
474 		if (call->nr_jumbo_bad > 3) {
475 			ack = RXRPC_ACK_NOSPACE;
476 			ack_serial = serial;
477 			goto ack;
478 		}
479 		annotation = 1;
480 	}
481 
482 next_subpacket:
483 	queued = false;
484 	ix = seq & RXRPC_RXTX_BUFF_MASK;
485 	len = skb->len;
486 	if (flags & RXRPC_JUMBO_PACKET)
487 		len = RXRPC_JUMBO_DATALEN;
488 
489 	if (flags & RXRPC_LAST_PACKET) {
490 		if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
491 		    seq != call->rx_top)
492 			return rxrpc_proto_abort("LSN", call, seq);
493 	} else {
494 		if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
495 		    after_eq(seq, call->rx_top))
496 			return rxrpc_proto_abort("LSA", call, seq);
497 	}
498 
499 	trace_rxrpc_rx_data(call, seq, serial, flags, annotation);
500 	if (before_eq(seq, hard_ack)) {
501 		ack = RXRPC_ACK_DUPLICATE;
502 		ack_serial = serial;
503 		goto skip;
504 	}
505 
506 	if (flags & RXRPC_REQUEST_ACK && !ack) {
507 		ack = RXRPC_ACK_REQUESTED;
508 		ack_serial = serial;
509 	}
510 
511 	if (call->rxtx_buffer[ix]) {
512 		rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
513 		if (ack != RXRPC_ACK_DUPLICATE) {
514 			ack = RXRPC_ACK_DUPLICATE;
515 			ack_serial = serial;
516 		}
517 		immediate_ack = true;
518 		goto skip;
519 	}
520 
521 	/* Queue the packet.  We use a couple of memory barriers here as need
522 	 * to make sure that rx_top is perceived to be set after the buffer
523 	 * pointer and that the buffer pointer is set after the annotation and
524 	 * the skb data.
525 	 *
526 	 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
527 	 * and also rxrpc_fill_out_ack().
528 	 */
529 	rxrpc_get_skb(skb, rxrpc_skb_rx_got);
530 	call->rxtx_annotations[ix] = annotation;
531 	smp_wmb();
532 	call->rxtx_buffer[ix] = skb;
533 	if (after(seq, call->rx_top)) {
534 		smp_store_release(&call->rx_top, seq);
535 	} else if (before(seq, call->rx_top)) {
536 		/* Send an immediate ACK if we fill in a hole */
537 		if (!ack) {
538 			ack = RXRPC_ACK_DELAY;
539 			ack_serial = serial;
540 		}
541 		immediate_ack = true;
542 	}
543 	if (flags & RXRPC_LAST_PACKET) {
544 		set_bit(RXRPC_CALL_RX_LAST, &call->flags);
545 		trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
546 	} else {
547 		trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
548 	}
549 	queued = true;
550 
551 	if (after_eq(seq, call->rx_expect_next)) {
552 		if (after(seq, call->rx_expect_next)) {
553 			_net("OOS %u > %u", seq, call->rx_expect_next);
554 			ack = RXRPC_ACK_OUT_OF_SEQUENCE;
555 			ack_serial = serial;
556 		}
557 		call->rx_expect_next = seq + 1;
558 	}
559 
560 skip:
561 	offset += len;
562 	if (flags & RXRPC_JUMBO_PACKET) {
563 		if (skb_copy_bits(skb, offset, &flags, 1) < 0)
564 			return rxrpc_proto_abort("XJF", call, seq);
565 		offset += sizeof(struct rxrpc_jumbo_header);
566 		seq++;
567 		serial++;
568 		annotation++;
569 		if (flags & RXRPC_JUMBO_PACKET)
570 			annotation |= RXRPC_RX_ANNO_JLAST;
571 		if (after(seq, hard_ack + call->rx_winsize)) {
572 			ack = RXRPC_ACK_EXCEEDS_WINDOW;
573 			ack_serial = serial;
574 			if (!jumbo_bad) {
575 				call->nr_jumbo_bad++;
576 				jumbo_bad = true;
577 			}
578 			goto ack;
579 		}
580 
581 		_proto("Rx DATA Jumbo %%%u", serial);
582 		goto next_subpacket;
583 	}
584 
585 	if (queued && flags & RXRPC_LAST_PACKET && !ack) {
586 		ack = RXRPC_ACK_DELAY;
587 		ack_serial = serial;
588 	}
589 
590 ack:
591 	if (ack)
592 		rxrpc_propose_ACK(call, ack, skew, ack_serial,
593 				  immediate_ack, true,
594 				  rxrpc_propose_ack_input_data);
595 
596 	if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1)
597 		rxrpc_notify_socket(call);
598 	_leave(" [queued]");
599 }
600 
601 /*
602  * Process a requested ACK.
603  */
604 static void rxrpc_input_requested_ack(struct rxrpc_call *call,
605 				      ktime_t resp_time,
606 				      rxrpc_serial_t orig_serial,
607 				      rxrpc_serial_t ack_serial)
608 {
609 	struct rxrpc_skb_priv *sp;
610 	struct sk_buff *skb;
611 	ktime_t sent_at;
612 	int ix;
613 
614 	for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
615 		skb = call->rxtx_buffer[ix];
616 		if (!skb)
617 			continue;
618 
619 		sp = rxrpc_skb(skb);
620 		if (sp->hdr.serial != orig_serial)
621 			continue;
622 		smp_rmb();
623 		sent_at = skb->tstamp;
624 		goto found;
625 	}
626 	return;
627 
628 found:
629 	rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
630 			   orig_serial, ack_serial, sent_at, resp_time);
631 }
632 
633 /*
634  * Process the response to a ping that we sent to find out if we lost an ACK.
635  *
636  * If we got back a ping response that indicates a lower tx_top than what we
637  * had at the time of the ping transmission, we adjudge all the DATA packets
638  * sent between the response tx_top and the ping-time tx_top to have been lost.
639  */
640 static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
641 {
642 	rxrpc_seq_t top, bottom, seq;
643 	bool resend = false;
644 
645 	spin_lock_bh(&call->lock);
646 
647 	bottom = call->tx_hard_ack + 1;
648 	top = call->acks_lost_top;
649 	if (before(bottom, top)) {
650 		for (seq = bottom; before_eq(seq, top); seq++) {
651 			int ix = seq & RXRPC_RXTX_BUFF_MASK;
652 			u8 annotation = call->rxtx_annotations[ix];
653 			u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
654 
655 			if (anno_type != RXRPC_TX_ANNO_UNACK)
656 				continue;
657 			annotation &= ~RXRPC_TX_ANNO_MASK;
658 			annotation |= RXRPC_TX_ANNO_RETRANS;
659 			call->rxtx_annotations[ix] = annotation;
660 			resend = true;
661 		}
662 	}
663 
664 	spin_unlock_bh(&call->lock);
665 
666 	if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
667 		rxrpc_queue_call(call);
668 }
669 
670 /*
671  * Process a ping response.
672  */
673 static void rxrpc_input_ping_response(struct rxrpc_call *call,
674 				      ktime_t resp_time,
675 				      rxrpc_serial_t orig_serial,
676 				      rxrpc_serial_t ack_serial)
677 {
678 	rxrpc_serial_t ping_serial;
679 	ktime_t ping_time;
680 
681 	ping_time = call->ping_time;
682 	smp_rmb();
683 	ping_serial = call->ping_serial;
684 
685 	if (orig_serial == call->acks_lost_ping)
686 		rxrpc_input_check_for_lost_ack(call);
687 
688 	if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
689 	    before(orig_serial, ping_serial))
690 		return;
691 	clear_bit(RXRPC_CALL_PINGING, &call->flags);
692 	if (after(orig_serial, ping_serial))
693 		return;
694 
695 	rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
696 			   orig_serial, ack_serial, ping_time, resp_time);
697 }
698 
699 /*
700  * Process the extra information that may be appended to an ACK packet
701  */
702 static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
703 				struct rxrpc_ackinfo *ackinfo)
704 {
705 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
706 	struct rxrpc_peer *peer;
707 	unsigned int mtu;
708 	bool wake = false;
709 	u32 rwind = ntohl(ackinfo->rwind);
710 
711 	_proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
712 	       sp->hdr.serial,
713 	       ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
714 	       rwind, ntohl(ackinfo->jumbo_max));
715 
716 	if (call->tx_winsize != rwind) {
717 		if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
718 			rwind = RXRPC_RXTX_BUFF_SIZE - 1;
719 		if (rwind > call->tx_winsize)
720 			wake = true;
721 		trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
722 					    ntohl(ackinfo->rwind), wake);
723 		call->tx_winsize = rwind;
724 	}
725 
726 	if (call->cong_ssthresh > rwind)
727 		call->cong_ssthresh = rwind;
728 
729 	mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
730 
731 	peer = call->peer;
732 	if (mtu < peer->maxdata) {
733 		spin_lock_bh(&peer->lock);
734 		peer->maxdata = mtu;
735 		peer->mtu = mtu + peer->hdrsize;
736 		spin_unlock_bh(&peer->lock);
737 		_net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
738 	}
739 
740 	if (wake)
741 		wake_up(&call->waitq);
742 }
743 
744 /*
745  * Process individual soft ACKs.
746  *
747  * Each ACK in the array corresponds to one packet and can be either an ACK or
748  * a NAK.  If we get find an explicitly NAK'd packet we resend immediately;
749  * packets that lie beyond the end of the ACK list are scheduled for resend by
750  * the timer on the basis that the peer might just not have processed them at
751  * the time the ACK was sent.
752  */
753 static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
754 				  rxrpc_seq_t seq, int nr_acks,
755 				  struct rxrpc_ack_summary *summary)
756 {
757 	int ix;
758 	u8 annotation, anno_type;
759 
760 	for (; nr_acks > 0; nr_acks--, seq++) {
761 		ix = seq & RXRPC_RXTX_BUFF_MASK;
762 		annotation = call->rxtx_annotations[ix];
763 		anno_type = annotation & RXRPC_TX_ANNO_MASK;
764 		annotation &= ~RXRPC_TX_ANNO_MASK;
765 		switch (*acks++) {
766 		case RXRPC_ACK_TYPE_ACK:
767 			summary->nr_acks++;
768 			if (anno_type == RXRPC_TX_ANNO_ACK)
769 				continue;
770 			summary->nr_new_acks++;
771 			call->rxtx_annotations[ix] =
772 				RXRPC_TX_ANNO_ACK | annotation;
773 			break;
774 		case RXRPC_ACK_TYPE_NACK:
775 			if (!summary->nr_nacks &&
776 			    call->acks_lowest_nak != seq) {
777 				call->acks_lowest_nak = seq;
778 				summary->new_low_nack = true;
779 			}
780 			summary->nr_nacks++;
781 			if (anno_type == RXRPC_TX_ANNO_NAK)
782 				continue;
783 			summary->nr_new_nacks++;
784 			if (anno_type == RXRPC_TX_ANNO_RETRANS)
785 				continue;
786 			call->rxtx_annotations[ix] =
787 				RXRPC_TX_ANNO_NAK | annotation;
788 			break;
789 		default:
790 			return rxrpc_proto_abort("SFT", call, 0);
791 		}
792 	}
793 }
794 
795 /*
796  * Process an ACK packet.
797  *
798  * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
799  * in the ACK array.  Anything before that is hard-ACK'd and may be discarded.
800  *
801  * A hard-ACK means that a packet has been processed and may be discarded; a
802  * soft-ACK means that the packet may be discarded and retransmission
803  * requested.  A phase is complete when all packets are hard-ACK'd.
804  */
805 static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
806 			    u16 skew)
807 {
808 	struct rxrpc_ack_summary summary = { 0 };
809 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
810 	union {
811 		struct rxrpc_ackpacket ack;
812 		struct rxrpc_ackinfo info;
813 		u8 acks[RXRPC_MAXACKS];
814 	} buf;
815 	rxrpc_serial_t acked_serial;
816 	rxrpc_seq_t first_soft_ack, hard_ack;
817 	int nr_acks, offset, ioffset;
818 
819 	_enter("");
820 
821 	offset = sizeof(struct rxrpc_wire_header);
822 	if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
823 		_debug("extraction failure");
824 		return rxrpc_proto_abort("XAK", call, 0);
825 	}
826 	offset += sizeof(buf.ack);
827 
828 	acked_serial = ntohl(buf.ack.serial);
829 	first_soft_ack = ntohl(buf.ack.firstPacket);
830 	hard_ack = first_soft_ack - 1;
831 	nr_acks = buf.ack.nAcks;
832 	summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
833 			      buf.ack.reason : RXRPC_ACK__INVALID);
834 
835 	trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
836 			   first_soft_ack, ntohl(buf.ack.previousPacket),
837 			   summary.ack_reason, nr_acks);
838 
839 	if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
840 		rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
841 					  sp->hdr.serial);
842 	if (buf.ack.reason == RXRPC_ACK_REQUESTED)
843 		rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
844 					  sp->hdr.serial);
845 
846 	if (buf.ack.reason == RXRPC_ACK_PING) {
847 		_proto("Rx ACK %%%u PING Request", sp->hdr.serial);
848 		rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
849 				  skew, sp->hdr.serial, true, true,
850 				  rxrpc_propose_ack_respond_to_ping);
851 	} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
852 		rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
853 				  skew, sp->hdr.serial, true, true,
854 				  rxrpc_propose_ack_respond_to_ack);
855 	}
856 
857 	ioffset = offset + nr_acks + 3;
858 	if (skb->len >= ioffset + sizeof(buf.info)) {
859 		if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
860 			return rxrpc_proto_abort("XAI", call, 0);
861 		rxrpc_input_ackinfo(call, skb, &buf.info);
862 	}
863 
864 	if (first_soft_ack == 0)
865 		return rxrpc_proto_abort("AK0", call, 0);
866 
867 	/* Ignore ACKs unless we are or have just been transmitting. */
868 	switch (READ_ONCE(call->state)) {
869 	case RXRPC_CALL_CLIENT_SEND_REQUEST:
870 	case RXRPC_CALL_CLIENT_AWAIT_REPLY:
871 	case RXRPC_CALL_SERVER_SEND_REPLY:
872 	case RXRPC_CALL_SERVER_AWAIT_ACK:
873 		break;
874 	default:
875 		return;
876 	}
877 
878 	/* Discard any out-of-order or duplicate ACKs. */
879 	if (before_eq(sp->hdr.serial, call->acks_latest)) {
880 		_debug("discard ACK %d <= %d",
881 		       sp->hdr.serial, call->acks_latest);
882 		return;
883 	}
884 	call->acks_latest_ts = skb->tstamp;
885 	call->acks_latest = sp->hdr.serial;
886 
887 	if (before(hard_ack, call->tx_hard_ack) ||
888 	    after(hard_ack, call->tx_top))
889 		return rxrpc_proto_abort("AKW", call, 0);
890 	if (nr_acks > call->tx_top - hard_ack)
891 		return rxrpc_proto_abort("AKN", call, 0);
892 
893 	if (after(hard_ack, call->tx_hard_ack))
894 		rxrpc_rotate_tx_window(call, hard_ack, &summary);
895 
896 	if (nr_acks > 0) {
897 		if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
898 			return rxrpc_proto_abort("XSA", call, 0);
899 		rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
900 				      &summary);
901 	}
902 
903 	if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
904 		rxrpc_end_tx_phase(call, false, "ETA");
905 		return;
906 	}
907 
908 	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
909 	    RXRPC_TX_ANNO_LAST &&
910 	    summary.nr_acks == call->tx_top - hard_ack &&
911 	    rxrpc_is_client_call(call))
912 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
913 				  false, true,
914 				  rxrpc_propose_ack_ping_for_lost_reply);
915 
916 	return rxrpc_congestion_management(call, skb, &summary, acked_serial);
917 }
918 
919 /*
920  * Process an ACKALL packet.
921  */
922 static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
923 {
924 	struct rxrpc_ack_summary summary = { 0 };
925 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
926 
927 	_proto("Rx ACKALL %%%u", sp->hdr.serial);
928 
929 	rxrpc_rotate_tx_window(call, call->tx_top, &summary);
930 	if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
931 		rxrpc_end_tx_phase(call, false, "ETL");
932 }
933 
934 /*
935  * Process an ABORT packet directed at a call.
936  */
937 static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
938 {
939 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
940 	__be32 wtmp;
941 	u32 abort_code = RX_CALL_DEAD;
942 
943 	_enter("");
944 
945 	if (skb->len >= 4 &&
946 	    skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
947 			  &wtmp, sizeof(wtmp)) >= 0)
948 		abort_code = ntohl(wtmp);
949 
950 	trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code);
951 
952 	_proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
953 
954 	if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
955 				      abort_code, -ECONNABORTED))
956 		rxrpc_notify_socket(call);
957 }
958 
959 /*
960  * Process an incoming call packet.
961  */
962 static void rxrpc_input_call_packet(struct rxrpc_call *call,
963 				    struct sk_buff *skb, u16 skew)
964 {
965 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
966 	unsigned long timo;
967 
968 	_enter("%p,%p", call, skb);
969 
970 	timo = READ_ONCE(call->next_rx_timo);
971 	if (timo) {
972 		unsigned long now = jiffies, expect_rx_by;
973 
974 		expect_rx_by = now + timo;
975 		WRITE_ONCE(call->expect_rx_by, expect_rx_by);
976 		rxrpc_reduce_call_timer(call, expect_rx_by, now,
977 					rxrpc_timer_set_for_normal);
978 	}
979 
980 	switch (sp->hdr.type) {
981 	case RXRPC_PACKET_TYPE_DATA:
982 		rxrpc_input_data(call, skb, skew);
983 		break;
984 
985 	case RXRPC_PACKET_TYPE_ACK:
986 		rxrpc_input_ack(call, skb, skew);
987 		break;
988 
989 	case RXRPC_PACKET_TYPE_BUSY:
990 		_proto("Rx BUSY %%%u", sp->hdr.serial);
991 
992 		/* Just ignore BUSY packets from the server; the retry and
993 		 * lifespan timers will take care of business.  BUSY packets
994 		 * from the client don't make sense.
995 		 */
996 		break;
997 
998 	case RXRPC_PACKET_TYPE_ABORT:
999 		rxrpc_input_abort(call, skb);
1000 		break;
1001 
1002 	case RXRPC_PACKET_TYPE_ACKALL:
1003 		rxrpc_input_ackall(call, skb);
1004 		break;
1005 
1006 	default:
1007 		break;
1008 	}
1009 
1010 	_leave("");
1011 }
1012 
1013 /*
1014  * Handle a new call on a channel implicitly completing the preceding call on
1015  * that channel.
1016  *
1017  * TODO: If callNumber > call_id + 1, renegotiate security.
1018  */
1019 static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
1020 					  struct rxrpc_call *call)
1021 {
1022 	switch (READ_ONCE(call->state)) {
1023 	case RXRPC_CALL_SERVER_AWAIT_ACK:
1024 		rxrpc_call_completed(call);
1025 		break;
1026 	case RXRPC_CALL_COMPLETE:
1027 		break;
1028 	default:
1029 		if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) {
1030 			set_bit(RXRPC_CALL_EV_ABORT, &call->events);
1031 			rxrpc_queue_call(call);
1032 		}
1033 		break;
1034 	}
1035 
1036 	trace_rxrpc_improper_term(call);
1037 	__rxrpc_disconnect_call(conn, call);
1038 	rxrpc_notify_socket(call);
1039 }
1040 
1041 /*
1042  * post connection-level events to the connection
1043  * - this includes challenges, responses, some aborts and call terminal packet
1044  *   retransmission.
1045  */
1046 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
1047 				      struct sk_buff *skb)
1048 {
1049 	_enter("%p,%p", conn, skb);
1050 
1051 	skb_queue_tail(&conn->rx_queue, skb);
1052 	rxrpc_queue_conn(conn);
1053 }
1054 
1055 /*
1056  * post endpoint-level events to the local endpoint
1057  * - this includes debug and version messages
1058  */
1059 static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
1060 				       struct sk_buff *skb)
1061 {
1062 	_enter("%p,%p", local, skb);
1063 
1064 	skb_queue_tail(&local->event_queue, skb);
1065 	rxrpc_queue_local(local);
1066 }
1067 
1068 /*
1069  * put a packet up for transport-level abort
1070  */
1071 static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
1072 {
1073 	CHECK_SLAB_OKAY(&local->usage);
1074 
1075 	skb_queue_tail(&local->reject_queue, skb);
1076 	rxrpc_queue_local(local);
1077 }
1078 
1079 /*
1080  * Extract the wire header from a packet and translate the byte order.
1081  */
1082 static noinline
1083 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
1084 {
1085 	struct rxrpc_wire_header whdr;
1086 
1087 	/* dig out the RxRPC connection details */
1088 	if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
1089 		trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
1090 				      tracepoint_string("bad_hdr"));
1091 		return -EBADMSG;
1092 	}
1093 
1094 	memset(sp, 0, sizeof(*sp));
1095 	sp->hdr.epoch		= ntohl(whdr.epoch);
1096 	sp->hdr.cid		= ntohl(whdr.cid);
1097 	sp->hdr.callNumber	= ntohl(whdr.callNumber);
1098 	sp->hdr.seq		= ntohl(whdr.seq);
1099 	sp->hdr.serial		= ntohl(whdr.serial);
1100 	sp->hdr.flags		= whdr.flags;
1101 	sp->hdr.type		= whdr.type;
1102 	sp->hdr.userStatus	= whdr.userStatus;
1103 	sp->hdr.securityIndex	= whdr.securityIndex;
1104 	sp->hdr._rsvd		= ntohs(whdr._rsvd);
1105 	sp->hdr.serviceId	= ntohs(whdr.serviceId);
1106 	return 0;
1107 }
1108 
1109 /*
1110  * handle data received on the local endpoint
1111  * - may be called in interrupt context
1112  *
1113  * The socket is locked by the caller and this prevents the socket from being
1114  * shut down and the local endpoint from going away, thus sk_user_data will not
1115  * be cleared until this function returns.
1116  */
1117 void rxrpc_data_ready(struct sock *udp_sk)
1118 {
1119 	struct rxrpc_connection *conn;
1120 	struct rxrpc_channel *chan;
1121 	struct rxrpc_call *call;
1122 	struct rxrpc_skb_priv *sp;
1123 	struct rxrpc_local *local = udp_sk->sk_user_data;
1124 	struct sk_buff *skb;
1125 	unsigned int channel;
1126 	int ret, skew;
1127 
1128 	_enter("%p", udp_sk);
1129 
1130 	ASSERT(!irqs_disabled());
1131 
1132 	skb = skb_recv_udp(udp_sk, 0, 1, &ret);
1133 	if (!skb) {
1134 		if (ret == -EAGAIN)
1135 			return;
1136 		_debug("UDP socket error %d", ret);
1137 		return;
1138 	}
1139 
1140 	rxrpc_new_skb(skb, rxrpc_skb_rx_received);
1141 
1142 	_net("recv skb %p", skb);
1143 
1144 	/* we'll probably need to checksum it (didn't call sock_recvmsg) */
1145 	if (skb_checksum_complete(skb)) {
1146 		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1147 		__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
1148 		_leave(" [CSUM failed]");
1149 		return;
1150 	}
1151 
1152 	__UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
1153 
1154 	/* The UDP protocol already released all skb resources;
1155 	 * we are free to add our own data there.
1156 	 */
1157 	sp = rxrpc_skb(skb);
1158 
1159 	/* dig out the RxRPC connection details */
1160 	if (rxrpc_extract_header(sp, skb) < 0)
1161 		goto bad_message;
1162 
1163 	if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
1164 		static int lose;
1165 		if ((lose++ & 7) == 7) {
1166 			trace_rxrpc_rx_lose(sp);
1167 			rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
1168 			return;
1169 		}
1170 	}
1171 
1172 	trace_rxrpc_rx_packet(sp);
1173 
1174 	_net("Rx RxRPC %s ep=%x call=%x:%x",
1175 	     sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
1176 	     sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
1177 
1178 	if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
1179 	    !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
1180 		_proto("Rx Bad Packet Type %u", sp->hdr.type);
1181 		goto bad_message;
1182 	}
1183 
1184 	switch (sp->hdr.type) {
1185 	case RXRPC_PACKET_TYPE_VERSION:
1186 		if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
1187 			goto discard;
1188 		rxrpc_post_packet_to_local(local, skb);
1189 		goto out;
1190 
1191 	case RXRPC_PACKET_TYPE_BUSY:
1192 		if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
1193 			goto discard;
1194 		/* Fall through */
1195 
1196 	case RXRPC_PACKET_TYPE_DATA:
1197 		if (sp->hdr.callNumber == 0)
1198 			goto bad_message;
1199 		if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
1200 		    !rxrpc_validate_jumbo(skb))
1201 			goto bad_message;
1202 		break;
1203 
1204 		/* Packet types 9-11 should just be ignored. */
1205 	case RXRPC_PACKET_TYPE_PARAMS:
1206 	case RXRPC_PACKET_TYPE_10:
1207 	case RXRPC_PACKET_TYPE_11:
1208 		goto discard;
1209 	}
1210 
1211 	rcu_read_lock();
1212 
1213 	conn = rxrpc_find_connection_rcu(local, skb);
1214 	if (conn) {
1215 		if (sp->hdr.securityIndex != conn->security_ix)
1216 			goto wrong_security;
1217 
1218 		if (sp->hdr.serviceId != conn->service_id) {
1219 			if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) ||
1220 			    conn->service_id != conn->params.service_id)
1221 				goto reupgrade;
1222 			conn->service_id = sp->hdr.serviceId;
1223 		}
1224 
1225 		if (sp->hdr.callNumber == 0) {
1226 			/* Connection-level packet */
1227 			_debug("CONN %p {%d}", conn, conn->debug_id);
1228 			rxrpc_post_packet_to_conn(conn, skb);
1229 			goto out_unlock;
1230 		}
1231 
1232 		/* Note the serial number skew here */
1233 		skew = (int)sp->hdr.serial - (int)conn->hi_serial;
1234 		if (skew >= 0) {
1235 			if (skew > 0)
1236 				conn->hi_serial = sp->hdr.serial;
1237 		} else {
1238 			skew = -skew;
1239 			skew = min(skew, 65535);
1240 		}
1241 
1242 		/* Call-bound packets are routed by connection channel. */
1243 		channel = sp->hdr.cid & RXRPC_CHANNELMASK;
1244 		chan = &conn->channels[channel];
1245 
1246 		/* Ignore really old calls */
1247 		if (sp->hdr.callNumber < chan->last_call)
1248 			goto discard_unlock;
1249 
1250 		if (sp->hdr.callNumber == chan->last_call) {
1251 			if (chan->call ||
1252 			    sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
1253 				goto discard_unlock;
1254 
1255 			/* For the previous service call, if completed
1256 			 * successfully, we discard all further packets.
1257 			 */
1258 			if (rxrpc_conn_is_service(conn) &&
1259 			    chan->last_type == RXRPC_PACKET_TYPE_ACK)
1260 				goto discard_unlock;
1261 
1262 			/* But otherwise we need to retransmit the final packet
1263 			 * from data cached in the connection record.
1264 			 */
1265 			rxrpc_post_packet_to_conn(conn, skb);
1266 			goto out_unlock;
1267 		}
1268 
1269 		call = rcu_dereference(chan->call);
1270 
1271 		if (sp->hdr.callNumber > chan->call_id) {
1272 			if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
1273 				rcu_read_unlock();
1274 				goto reject_packet;
1275 			}
1276 			if (call)
1277 				rxrpc_input_implicit_end_call(conn, call);
1278 			call = NULL;
1279 		}
1280 
1281 		if (call) {
1282 			if (sp->hdr.serviceId != call->service_id)
1283 				call->service_id = sp->hdr.serviceId;
1284 			if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
1285 				call->rx_serial = sp->hdr.serial;
1286 			if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
1287 				set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
1288 		}
1289 	} else {
1290 		skew = 0;
1291 		call = NULL;
1292 	}
1293 
1294 	if (!call || atomic_read(&call->usage) == 0) {
1295 		if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
1296 		    sp->hdr.callNumber == 0 ||
1297 		    sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
1298 			goto bad_message_unlock;
1299 		if (sp->hdr.seq != 1)
1300 			goto discard_unlock;
1301 		call = rxrpc_new_incoming_call(local, conn, skb);
1302 		if (!call) {
1303 			rcu_read_unlock();
1304 			goto reject_packet;
1305 		}
1306 		rxrpc_send_ping(call, skb, skew);
1307 		mutex_unlock(&call->user_mutex);
1308 	}
1309 
1310 	rxrpc_input_call_packet(call, skb, skew);
1311 	goto discard_unlock;
1312 
1313 discard_unlock:
1314 	rcu_read_unlock();
1315 discard:
1316 	rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1317 out:
1318 	trace_rxrpc_rx_done(0, 0);
1319 	return;
1320 
1321 out_unlock:
1322 	rcu_read_unlock();
1323 	goto out;
1324 
1325 wrong_security:
1326 	rcu_read_unlock();
1327 	trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1328 			  RXKADINCONSISTENCY, EBADMSG);
1329 	skb->priority = RXKADINCONSISTENCY;
1330 	goto post_abort;
1331 
1332 reupgrade:
1333 	rcu_read_unlock();
1334 	trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1335 			  RX_PROTOCOL_ERROR, EBADMSG);
1336 	goto protocol_error;
1337 
1338 bad_message_unlock:
1339 	rcu_read_unlock();
1340 bad_message:
1341 	trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1342 			  RX_PROTOCOL_ERROR, EBADMSG);
1343 protocol_error:
1344 	skb->priority = RX_PROTOCOL_ERROR;
1345 post_abort:
1346 	skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
1347 reject_packet:
1348 	trace_rxrpc_rx_done(skb->mark, skb->priority);
1349 	rxrpc_reject_packet(local, skb);
1350 	_leave(" [badmsg]");
1351 }
1352