1 /* RxRPC packet reception 2 * 3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/net.h> 16 #include <linux/skbuff.h> 17 #include <linux/errqueue.h> 18 #include <linux/udp.h> 19 #include <linux/in.h> 20 #include <linux/in6.h> 21 #include <linux/icmp.h> 22 #include <linux/gfp.h> 23 #include <net/sock.h> 24 #include <net/af_rxrpc.h> 25 #include <net/ip.h> 26 #include <net/udp.h> 27 #include <net/net_namespace.h> 28 #include "ar-internal.h" 29 30 static void rxrpc_proto_abort(const char *why, 31 struct rxrpc_call *call, rxrpc_seq_t seq) 32 { 33 if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) { 34 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 35 rxrpc_queue_call(call); 36 } 37 } 38 39 /* 40 * Do TCP-style congestion management [RFC 5681]. 41 */ 42 static void rxrpc_congestion_management(struct rxrpc_call *call, 43 struct sk_buff *skb, 44 struct rxrpc_ack_summary *summary, 45 rxrpc_serial_t acked_serial) 46 { 47 enum rxrpc_congest_change change = rxrpc_cong_no_change; 48 unsigned int cumulative_acks = call->cong_cumul_acks; 49 unsigned int cwnd = call->cong_cwnd; 50 bool resend = false; 51 52 summary->flight_size = 53 (call->tx_top - call->tx_hard_ack) - summary->nr_acks; 54 55 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) { 56 summary->retrans_timeo = true; 57 call->cong_ssthresh = max_t(unsigned int, 58 summary->flight_size / 2, 2); 59 cwnd = 1; 60 if (cwnd >= call->cong_ssthresh && 61 call->cong_mode == RXRPC_CALL_SLOW_START) { 62 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 63 call->cong_tstamp = skb->tstamp; 64 cumulative_acks = 0; 65 } 66 } 67 68 cumulative_acks += summary->nr_new_acks; 69 cumulative_acks += summary->nr_rot_new_acks; 70 if (cumulative_acks > 255) 71 cumulative_acks = 255; 72 73 summary->mode = call->cong_mode; 74 summary->cwnd = call->cong_cwnd; 75 summary->ssthresh = call->cong_ssthresh; 76 summary->cumulative_acks = cumulative_acks; 77 summary->dup_acks = call->cong_dup_acks; 78 79 switch (call->cong_mode) { 80 case RXRPC_CALL_SLOW_START: 81 if (summary->nr_nacks > 0) 82 goto packet_loss_detected; 83 if (summary->cumulative_acks > 0) 84 cwnd += 1; 85 if (cwnd >= call->cong_ssthresh) { 86 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 87 call->cong_tstamp = skb->tstamp; 88 } 89 goto out; 90 91 case RXRPC_CALL_CONGEST_AVOIDANCE: 92 if (summary->nr_nacks > 0) 93 goto packet_loss_detected; 94 95 /* We analyse the number of packets that get ACK'd per RTT 96 * period and increase the window if we managed to fill it. 97 */ 98 if (call->peer->rtt_usage == 0) 99 goto out; 100 if (ktime_before(skb->tstamp, 101 ktime_add_ns(call->cong_tstamp, 102 call->peer->rtt))) 103 goto out_no_clear_ca; 104 change = rxrpc_cong_rtt_window_end; 105 call->cong_tstamp = skb->tstamp; 106 if (cumulative_acks >= cwnd) 107 cwnd++; 108 goto out; 109 110 case RXRPC_CALL_PACKET_LOSS: 111 if (summary->nr_nacks == 0) 112 goto resume_normality; 113 114 if (summary->new_low_nack) { 115 change = rxrpc_cong_new_low_nack; 116 call->cong_dup_acks = 1; 117 if (call->cong_extra > 1) 118 call->cong_extra = 1; 119 goto send_extra_data; 120 } 121 122 call->cong_dup_acks++; 123 if (call->cong_dup_acks < 3) 124 goto send_extra_data; 125 126 change = rxrpc_cong_begin_retransmission; 127 call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT; 128 call->cong_ssthresh = max_t(unsigned int, 129 summary->flight_size / 2, 2); 130 cwnd = call->cong_ssthresh + 3; 131 call->cong_extra = 0; 132 call->cong_dup_acks = 0; 133 resend = true; 134 goto out; 135 136 case RXRPC_CALL_FAST_RETRANSMIT: 137 if (!summary->new_low_nack) { 138 if (summary->nr_new_acks == 0) 139 cwnd += 1; 140 call->cong_dup_acks++; 141 if (call->cong_dup_acks == 2) { 142 change = rxrpc_cong_retransmit_again; 143 call->cong_dup_acks = 0; 144 resend = true; 145 } 146 } else { 147 change = rxrpc_cong_progress; 148 cwnd = call->cong_ssthresh; 149 if (summary->nr_nacks == 0) 150 goto resume_normality; 151 } 152 goto out; 153 154 default: 155 BUG(); 156 goto out; 157 } 158 159 resume_normality: 160 change = rxrpc_cong_cleared_nacks; 161 call->cong_dup_acks = 0; 162 call->cong_extra = 0; 163 call->cong_tstamp = skb->tstamp; 164 if (cwnd < call->cong_ssthresh) 165 call->cong_mode = RXRPC_CALL_SLOW_START; 166 else 167 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 168 out: 169 cumulative_acks = 0; 170 out_no_clear_ca: 171 if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1) 172 cwnd = RXRPC_RXTX_BUFF_SIZE - 1; 173 call->cong_cwnd = cwnd; 174 call->cong_cumul_acks = cumulative_acks; 175 trace_rxrpc_congest(call, summary, acked_serial, change); 176 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 177 rxrpc_queue_call(call); 178 return; 179 180 packet_loss_detected: 181 change = rxrpc_cong_saw_nack; 182 call->cong_mode = RXRPC_CALL_PACKET_LOSS; 183 call->cong_dup_acks = 0; 184 goto send_extra_data; 185 186 send_extra_data: 187 /* Send some previously unsent DATA if we have some to advance the ACK 188 * state. 189 */ 190 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & 191 RXRPC_TX_ANNO_LAST || 192 summary->nr_acks != call->tx_top - call->tx_hard_ack) { 193 call->cong_extra++; 194 wake_up(&call->waitq); 195 } 196 goto out_no_clear_ca; 197 } 198 199 /* 200 * Ping the other end to fill our RTT cache and to retrieve the rwind 201 * and MTU parameters. 202 */ 203 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, 204 int skew) 205 { 206 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 207 ktime_t now = skb->tstamp; 208 209 if (call->peer->rtt_usage < 3 || 210 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 211 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 212 true, true, 213 rxrpc_propose_ack_ping_for_params); 214 } 215 216 /* 217 * Apply a hard ACK by advancing the Tx window. 218 */ 219 static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, 220 struct rxrpc_ack_summary *summary) 221 { 222 struct sk_buff *skb, *list = NULL; 223 int ix; 224 u8 annotation; 225 226 if (call->acks_lowest_nak == call->tx_hard_ack) { 227 call->acks_lowest_nak = to; 228 } else if (before_eq(call->acks_lowest_nak, to)) { 229 summary->new_low_nack = true; 230 call->acks_lowest_nak = to; 231 } 232 233 spin_lock(&call->lock); 234 235 while (before(call->tx_hard_ack, to)) { 236 call->tx_hard_ack++; 237 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; 238 skb = call->rxtx_buffer[ix]; 239 annotation = call->rxtx_annotations[ix]; 240 rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); 241 call->rxtx_buffer[ix] = NULL; 242 call->rxtx_annotations[ix] = 0; 243 skb->next = list; 244 list = skb; 245 246 if (annotation & RXRPC_TX_ANNO_LAST) 247 set_bit(RXRPC_CALL_TX_LAST, &call->flags); 248 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) 249 summary->nr_rot_new_acks++; 250 } 251 252 spin_unlock(&call->lock); 253 254 trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ? 255 rxrpc_transmit_rotate_last : 256 rxrpc_transmit_rotate)); 257 wake_up(&call->waitq); 258 259 while (list) { 260 skb = list; 261 list = skb->next; 262 skb->next = NULL; 263 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 264 } 265 } 266 267 /* 268 * End the transmission phase of a call. 269 * 270 * This occurs when we get an ACKALL packet, the first DATA packet of a reply, 271 * or a final ACK packet. 272 */ 273 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, 274 const char *abort_why) 275 { 276 277 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); 278 279 write_lock(&call->state_lock); 280 281 switch (call->state) { 282 case RXRPC_CALL_CLIENT_SEND_REQUEST: 283 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 284 if (reply_begun) 285 call->state = RXRPC_CALL_CLIENT_RECV_REPLY; 286 else 287 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 288 break; 289 290 case RXRPC_CALL_SERVER_AWAIT_ACK: 291 __rxrpc_call_completed(call); 292 rxrpc_notify_socket(call); 293 break; 294 295 default: 296 goto bad_state; 297 } 298 299 write_unlock(&call->state_lock); 300 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) { 301 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, 0, false, true, 302 rxrpc_propose_ack_client_tx_end); 303 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); 304 } else { 305 trace_rxrpc_transmit(call, rxrpc_transmit_end); 306 } 307 _leave(" = ok"); 308 return true; 309 310 bad_state: 311 write_unlock(&call->state_lock); 312 kdebug("end_tx %s", rxrpc_call_states[call->state]); 313 rxrpc_proto_abort(abort_why, call, call->tx_top); 314 return false; 315 } 316 317 /* 318 * Begin the reply reception phase of a call. 319 */ 320 static bool rxrpc_receiving_reply(struct rxrpc_call *call) 321 { 322 struct rxrpc_ack_summary summary = { 0 }; 323 rxrpc_seq_t top = READ_ONCE(call->tx_top); 324 325 if (call->ackr_reason) { 326 spin_lock_bh(&call->lock); 327 call->ackr_reason = 0; 328 call->resend_at = call->expire_at; 329 call->ack_at = call->expire_at; 330 spin_unlock_bh(&call->lock); 331 rxrpc_set_timer(call, rxrpc_timer_init_for_reply, 332 ktime_get_real()); 333 } 334 335 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) 336 rxrpc_rotate_tx_window(call, top, &summary); 337 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { 338 rxrpc_proto_abort("TXL", call, top); 339 return false; 340 } 341 if (!rxrpc_end_tx_phase(call, true, "ETD")) 342 return false; 343 call->tx_phase = false; 344 return true; 345 } 346 347 /* 348 * Scan a jumbo packet to validate its structure and to work out how many 349 * subpackets it contains. 350 * 351 * A jumbo packet is a collection of consecutive packets glued together with 352 * little headers between that indicate how to change the initial header for 353 * each subpacket. 354 * 355 * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but 356 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any 357 * size. 358 */ 359 static bool rxrpc_validate_jumbo(struct sk_buff *skb) 360 { 361 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 362 unsigned int offset = sizeof(struct rxrpc_wire_header); 363 unsigned int len = skb->len; 364 int nr_jumbo = 1; 365 u8 flags = sp->hdr.flags; 366 367 do { 368 nr_jumbo++; 369 if (len - offset < RXRPC_JUMBO_SUBPKTLEN) 370 goto protocol_error; 371 if (flags & RXRPC_LAST_PACKET) 372 goto protocol_error; 373 offset += RXRPC_JUMBO_DATALEN; 374 if (skb_copy_bits(skb, offset, &flags, 1) < 0) 375 goto protocol_error; 376 offset += sizeof(struct rxrpc_jumbo_header); 377 } while (flags & RXRPC_JUMBO_PACKET); 378 379 sp->nr_jumbo = nr_jumbo; 380 return true; 381 382 protocol_error: 383 return false; 384 } 385 386 /* 387 * Handle reception of a duplicate packet. 388 * 389 * We have to take care to avoid an attack here whereby we're given a series of 390 * jumbograms, each with a sequence number one before the preceding one and 391 * filled up to maximum UDP size. If they never send us the first packet in 392 * the sequence, they can cause us to have to hold on to around 2MiB of kernel 393 * space until the call times out. 394 * 395 * We limit the space usage by only accepting three duplicate jumbo packets per 396 * call. After that, we tell the other side we're no longer accepting jumbos 397 * (that information is encoded in the ACK packet). 398 */ 399 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, 400 u8 annotation, bool *_jumbo_bad) 401 { 402 /* Discard normal packets that are duplicates. */ 403 if (annotation == 0) 404 return; 405 406 /* Skip jumbo subpackets that are duplicates. When we've had three or 407 * more partially duplicate jumbo packets, we refuse to take any more 408 * jumbos for this call. 409 */ 410 if (!*_jumbo_bad) { 411 call->nr_jumbo_bad++; 412 *_jumbo_bad = true; 413 } 414 } 415 416 /* 417 * Process a DATA packet, adding the packet to the Rx ring. 418 */ 419 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, 420 u16 skew) 421 { 422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 423 unsigned int offset = sizeof(struct rxrpc_wire_header); 424 unsigned int ix; 425 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 426 rxrpc_seq_t seq = sp->hdr.seq, hard_ack; 427 bool immediate_ack = false, jumbo_bad = false, queued; 428 u16 len; 429 u8 ack = 0, flags, annotation = 0; 430 431 _enter("{%u,%u},{%u,%u}", 432 call->rx_hard_ack, call->rx_top, skb->len, seq); 433 434 _proto("Rx DATA %%%u { #%u f=%02x }", 435 sp->hdr.serial, seq, sp->hdr.flags); 436 437 if (call->state >= RXRPC_CALL_COMPLETE) 438 return; 439 440 /* Received data implicitly ACKs all of the request packets we sent 441 * when we're acting as a client. 442 */ 443 if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST || 444 call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 445 !rxrpc_receiving_reply(call)) 446 return; 447 448 call->ackr_prev_seq = seq; 449 450 hard_ack = READ_ONCE(call->rx_hard_ack); 451 if (after(seq, hard_ack + call->rx_winsize)) { 452 ack = RXRPC_ACK_EXCEEDS_WINDOW; 453 ack_serial = serial; 454 goto ack; 455 } 456 457 flags = sp->hdr.flags; 458 if (flags & RXRPC_JUMBO_PACKET) { 459 if (call->nr_jumbo_bad > 3) { 460 ack = RXRPC_ACK_NOSPACE; 461 ack_serial = serial; 462 goto ack; 463 } 464 annotation = 1; 465 } 466 467 next_subpacket: 468 queued = false; 469 ix = seq & RXRPC_RXTX_BUFF_MASK; 470 len = skb->len; 471 if (flags & RXRPC_JUMBO_PACKET) 472 len = RXRPC_JUMBO_DATALEN; 473 474 if (flags & RXRPC_LAST_PACKET) { 475 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 476 seq != call->rx_top) 477 return rxrpc_proto_abort("LSN", call, seq); 478 } else { 479 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 480 after_eq(seq, call->rx_top)) 481 return rxrpc_proto_abort("LSA", call, seq); 482 } 483 484 if (before_eq(seq, hard_ack)) { 485 ack = RXRPC_ACK_DUPLICATE; 486 ack_serial = serial; 487 goto skip; 488 } 489 490 if (flags & RXRPC_REQUEST_ACK && !ack) { 491 ack = RXRPC_ACK_REQUESTED; 492 ack_serial = serial; 493 } 494 495 if (call->rxtx_buffer[ix]) { 496 rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad); 497 if (ack != RXRPC_ACK_DUPLICATE) { 498 ack = RXRPC_ACK_DUPLICATE; 499 ack_serial = serial; 500 } 501 immediate_ack = true; 502 goto skip; 503 } 504 505 /* Queue the packet. We use a couple of memory barriers here as need 506 * to make sure that rx_top is perceived to be set after the buffer 507 * pointer and that the buffer pointer is set after the annotation and 508 * the skb data. 509 * 510 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() 511 * and also rxrpc_fill_out_ack(). 512 */ 513 rxrpc_get_skb(skb, rxrpc_skb_rx_got); 514 call->rxtx_annotations[ix] = annotation; 515 smp_wmb(); 516 call->rxtx_buffer[ix] = skb; 517 if (after(seq, call->rx_top)) { 518 smp_store_release(&call->rx_top, seq); 519 } else if (before(seq, call->rx_top)) { 520 /* Send an immediate ACK if we fill in a hole */ 521 if (!ack) { 522 ack = RXRPC_ACK_DELAY; 523 ack_serial = serial; 524 } 525 immediate_ack = true; 526 } 527 if (flags & RXRPC_LAST_PACKET) { 528 set_bit(RXRPC_CALL_RX_LAST, &call->flags); 529 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); 530 } else { 531 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); 532 } 533 queued = true; 534 535 if (after_eq(seq, call->rx_expect_next)) { 536 if (after(seq, call->rx_expect_next)) { 537 _net("OOS %u > %u", seq, call->rx_expect_next); 538 ack = RXRPC_ACK_OUT_OF_SEQUENCE; 539 ack_serial = serial; 540 } 541 call->rx_expect_next = seq + 1; 542 } 543 544 skip: 545 offset += len; 546 if (flags & RXRPC_JUMBO_PACKET) { 547 if (skb_copy_bits(skb, offset, &flags, 1) < 0) 548 return rxrpc_proto_abort("XJF", call, seq); 549 offset += sizeof(struct rxrpc_jumbo_header); 550 seq++; 551 serial++; 552 annotation++; 553 if (flags & RXRPC_JUMBO_PACKET) 554 annotation |= RXRPC_RX_ANNO_JLAST; 555 if (after(seq, hard_ack + call->rx_winsize)) { 556 ack = RXRPC_ACK_EXCEEDS_WINDOW; 557 ack_serial = serial; 558 if (!jumbo_bad) { 559 call->nr_jumbo_bad++; 560 jumbo_bad = true; 561 } 562 goto ack; 563 } 564 565 _proto("Rx DATA Jumbo %%%u", serial); 566 goto next_subpacket; 567 } 568 569 if (queued && flags & RXRPC_LAST_PACKET && !ack) { 570 ack = RXRPC_ACK_DELAY; 571 ack_serial = serial; 572 } 573 574 ack: 575 if (ack) 576 rxrpc_propose_ACK(call, ack, skew, ack_serial, 577 immediate_ack, true, 578 rxrpc_propose_ack_input_data); 579 580 if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) 581 rxrpc_notify_socket(call); 582 _leave(" [queued]"); 583 } 584 585 /* 586 * Process a requested ACK. 587 */ 588 static void rxrpc_input_requested_ack(struct rxrpc_call *call, 589 ktime_t resp_time, 590 rxrpc_serial_t orig_serial, 591 rxrpc_serial_t ack_serial) 592 { 593 struct rxrpc_skb_priv *sp; 594 struct sk_buff *skb; 595 ktime_t sent_at; 596 int ix; 597 598 for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) { 599 skb = call->rxtx_buffer[ix]; 600 if (!skb) 601 continue; 602 603 sp = rxrpc_skb(skb); 604 if (sp->hdr.serial != orig_serial) 605 continue; 606 smp_rmb(); 607 sent_at = skb->tstamp; 608 goto found; 609 } 610 return; 611 612 found: 613 rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack, 614 orig_serial, ack_serial, sent_at, resp_time); 615 } 616 617 /* 618 * Process a ping response. 619 */ 620 static void rxrpc_input_ping_response(struct rxrpc_call *call, 621 ktime_t resp_time, 622 rxrpc_serial_t orig_serial, 623 rxrpc_serial_t ack_serial) 624 { 625 rxrpc_serial_t ping_serial; 626 ktime_t ping_time; 627 628 ping_time = call->ping_time; 629 smp_rmb(); 630 ping_serial = call->ping_serial; 631 632 if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || 633 before(orig_serial, ping_serial)) 634 return; 635 clear_bit(RXRPC_CALL_PINGING, &call->flags); 636 if (after(orig_serial, ping_serial)) 637 return; 638 639 rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response, 640 orig_serial, ack_serial, ping_time, resp_time); 641 } 642 643 /* 644 * Process the extra information that may be appended to an ACK packet 645 */ 646 static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, 647 struct rxrpc_ackinfo *ackinfo) 648 { 649 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 650 struct rxrpc_peer *peer; 651 unsigned int mtu; 652 u32 rwind = ntohl(ackinfo->rwind); 653 654 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", 655 sp->hdr.serial, 656 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), 657 rwind, ntohl(ackinfo->jumbo_max)); 658 659 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) 660 rwind = RXRPC_RXTX_BUFF_SIZE - 1; 661 call->tx_winsize = rwind; 662 if (call->cong_ssthresh > rwind) 663 call->cong_ssthresh = rwind; 664 665 mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU)); 666 667 peer = call->peer; 668 if (mtu < peer->maxdata) { 669 spin_lock_bh(&peer->lock); 670 peer->maxdata = mtu; 671 peer->mtu = mtu + peer->hdrsize; 672 spin_unlock_bh(&peer->lock); 673 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); 674 } 675 } 676 677 /* 678 * Process individual soft ACKs. 679 * 680 * Each ACK in the array corresponds to one packet and can be either an ACK or 681 * a NAK. If we get find an explicitly NAK'd packet we resend immediately; 682 * packets that lie beyond the end of the ACK list are scheduled for resend by 683 * the timer on the basis that the peer might just not have processed them at 684 * the time the ACK was sent. 685 */ 686 static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, 687 rxrpc_seq_t seq, int nr_acks, 688 struct rxrpc_ack_summary *summary) 689 { 690 int ix; 691 u8 annotation, anno_type; 692 693 for (; nr_acks > 0; nr_acks--, seq++) { 694 ix = seq & RXRPC_RXTX_BUFF_MASK; 695 annotation = call->rxtx_annotations[ix]; 696 anno_type = annotation & RXRPC_TX_ANNO_MASK; 697 annotation &= ~RXRPC_TX_ANNO_MASK; 698 switch (*acks++) { 699 case RXRPC_ACK_TYPE_ACK: 700 summary->nr_acks++; 701 if (anno_type == RXRPC_TX_ANNO_ACK) 702 continue; 703 summary->nr_new_acks++; 704 call->rxtx_annotations[ix] = 705 RXRPC_TX_ANNO_ACK | annotation; 706 break; 707 case RXRPC_ACK_TYPE_NACK: 708 if (!summary->nr_nacks && 709 call->acks_lowest_nak != seq) { 710 call->acks_lowest_nak = seq; 711 summary->new_low_nack = true; 712 } 713 summary->nr_nacks++; 714 if (anno_type == RXRPC_TX_ANNO_NAK) 715 continue; 716 summary->nr_new_nacks++; 717 if (anno_type == RXRPC_TX_ANNO_RETRANS) 718 continue; 719 call->rxtx_annotations[ix] = 720 RXRPC_TX_ANNO_NAK | annotation; 721 break; 722 default: 723 return rxrpc_proto_abort("SFT", call, 0); 724 } 725 } 726 } 727 728 /* 729 * Process an ACK packet. 730 * 731 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet 732 * in the ACK array. Anything before that is hard-ACK'd and may be discarded. 733 * 734 * A hard-ACK means that a packet has been processed and may be discarded; a 735 * soft-ACK means that the packet may be discarded and retransmission 736 * requested. A phase is complete when all packets are hard-ACK'd. 737 */ 738 static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, 739 u16 skew) 740 { 741 struct rxrpc_ack_summary summary = { 0 }; 742 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 743 union { 744 struct rxrpc_ackpacket ack; 745 struct rxrpc_ackinfo info; 746 u8 acks[RXRPC_MAXACKS]; 747 } buf; 748 rxrpc_serial_t acked_serial; 749 rxrpc_seq_t first_soft_ack, hard_ack; 750 int nr_acks, offset, ioffset; 751 752 _enter(""); 753 754 offset = sizeof(struct rxrpc_wire_header); 755 if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) { 756 _debug("extraction failure"); 757 return rxrpc_proto_abort("XAK", call, 0); 758 } 759 offset += sizeof(buf.ack); 760 761 acked_serial = ntohl(buf.ack.serial); 762 first_soft_ack = ntohl(buf.ack.firstPacket); 763 hard_ack = first_soft_ack - 1; 764 nr_acks = buf.ack.nAcks; 765 summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? 766 buf.ack.reason : RXRPC_ACK__INVALID); 767 768 trace_rxrpc_rx_ack(call, first_soft_ack, summary.ack_reason, nr_acks); 769 770 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", 771 sp->hdr.serial, 772 ntohs(buf.ack.maxSkew), 773 first_soft_ack, 774 ntohl(buf.ack.previousPacket), 775 acked_serial, 776 rxrpc_ack_names[summary.ack_reason], 777 buf.ack.nAcks); 778 779 if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) 780 rxrpc_input_ping_response(call, skb->tstamp, acked_serial, 781 sp->hdr.serial); 782 if (buf.ack.reason == RXRPC_ACK_REQUESTED) 783 rxrpc_input_requested_ack(call, skb->tstamp, acked_serial, 784 sp->hdr.serial); 785 786 if (buf.ack.reason == RXRPC_ACK_PING) { 787 _proto("Rx ACK %%%u PING Request", sp->hdr.serial); 788 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, 789 skew, sp->hdr.serial, true, true, 790 rxrpc_propose_ack_respond_to_ping); 791 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 792 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, 793 skew, sp->hdr.serial, true, true, 794 rxrpc_propose_ack_respond_to_ack); 795 } 796 797 ioffset = offset + nr_acks + 3; 798 if (skb->len >= ioffset + sizeof(buf.info)) { 799 if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) 800 return rxrpc_proto_abort("XAI", call, 0); 801 rxrpc_input_ackinfo(call, skb, &buf.info); 802 } 803 804 if (first_soft_ack == 0) 805 return rxrpc_proto_abort("AK0", call, 0); 806 807 /* Ignore ACKs unless we are or have just been transmitting. */ 808 switch (call->state) { 809 case RXRPC_CALL_CLIENT_SEND_REQUEST: 810 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 811 case RXRPC_CALL_SERVER_SEND_REPLY: 812 case RXRPC_CALL_SERVER_AWAIT_ACK: 813 break; 814 default: 815 return; 816 } 817 818 /* Discard any out-of-order or duplicate ACKs. */ 819 if (before_eq(sp->hdr.serial, call->acks_latest)) { 820 _debug("discard ACK %d <= %d", 821 sp->hdr.serial, call->acks_latest); 822 return; 823 } 824 call->acks_latest_ts = skb->tstamp; 825 call->acks_latest = sp->hdr.serial; 826 827 if (before(hard_ack, call->tx_hard_ack) || 828 after(hard_ack, call->tx_top)) 829 return rxrpc_proto_abort("AKW", call, 0); 830 if (nr_acks > call->tx_top - hard_ack) 831 return rxrpc_proto_abort("AKN", call, 0); 832 833 if (after(hard_ack, call->tx_hard_ack)) 834 rxrpc_rotate_tx_window(call, hard_ack, &summary); 835 836 if (nr_acks > 0) { 837 if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) 838 return rxrpc_proto_abort("XSA", call, 0); 839 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, 840 &summary); 841 } 842 843 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { 844 rxrpc_end_tx_phase(call, false, "ETA"); 845 return; 846 } 847 848 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & 849 RXRPC_TX_ANNO_LAST && 850 summary.nr_acks == call->tx_top - hard_ack && 851 rxrpc_is_client_call(call)) 852 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 853 false, true, 854 rxrpc_propose_ack_ping_for_lost_reply); 855 856 return rxrpc_congestion_management(call, skb, &summary, acked_serial); 857 } 858 859 /* 860 * Process an ACKALL packet. 861 */ 862 static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb) 863 { 864 struct rxrpc_ack_summary summary = { 0 }; 865 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 866 867 _proto("Rx ACKALL %%%u", sp->hdr.serial); 868 869 rxrpc_rotate_tx_window(call, call->tx_top, &summary); 870 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) 871 rxrpc_end_tx_phase(call, false, "ETL"); 872 } 873 874 /* 875 * Process an ABORT packet. 876 */ 877 static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) 878 { 879 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 880 __be32 wtmp; 881 u32 abort_code = RX_CALL_DEAD; 882 883 _enter(""); 884 885 if (skb->len >= 4 && 886 skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 887 &wtmp, sizeof(wtmp)) >= 0) 888 abort_code = ntohl(wtmp); 889 890 _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code); 891 892 if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 893 abort_code, ECONNABORTED)) 894 rxrpc_notify_socket(call); 895 } 896 897 /* 898 * Process an incoming call packet. 899 */ 900 static void rxrpc_input_call_packet(struct rxrpc_call *call, 901 struct sk_buff *skb, u16 skew) 902 { 903 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 904 905 _enter("%p,%p", call, skb); 906 907 switch (sp->hdr.type) { 908 case RXRPC_PACKET_TYPE_DATA: 909 rxrpc_input_data(call, skb, skew); 910 break; 911 912 case RXRPC_PACKET_TYPE_ACK: 913 rxrpc_input_ack(call, skb, skew); 914 break; 915 916 case RXRPC_PACKET_TYPE_BUSY: 917 _proto("Rx BUSY %%%u", sp->hdr.serial); 918 919 /* Just ignore BUSY packets from the server; the retry and 920 * lifespan timers will take care of business. BUSY packets 921 * from the client don't make sense. 922 */ 923 break; 924 925 case RXRPC_PACKET_TYPE_ABORT: 926 rxrpc_input_abort(call, skb); 927 break; 928 929 case RXRPC_PACKET_TYPE_ACKALL: 930 rxrpc_input_ackall(call, skb); 931 break; 932 933 default: 934 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial); 935 break; 936 } 937 938 _leave(""); 939 } 940 941 /* 942 * Handle a new call on a channel implicitly completing the preceding call on 943 * that channel. 944 * 945 * TODO: If callNumber > call_id + 1, renegotiate security. 946 */ 947 static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, 948 struct rxrpc_call *call) 949 { 950 switch (call->state) { 951 case RXRPC_CALL_SERVER_AWAIT_ACK: 952 rxrpc_call_completed(call); 953 break; 954 case RXRPC_CALL_COMPLETE: 955 break; 956 default: 957 if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, ESHUTDOWN)) { 958 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 959 rxrpc_queue_call(call); 960 } 961 break; 962 } 963 964 __rxrpc_disconnect_call(conn, call); 965 rxrpc_notify_socket(call); 966 } 967 968 /* 969 * post connection-level events to the connection 970 * - this includes challenges, responses, some aborts and call terminal packet 971 * retransmission. 972 */ 973 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, 974 struct sk_buff *skb) 975 { 976 _enter("%p,%p", conn, skb); 977 978 skb_queue_tail(&conn->rx_queue, skb); 979 rxrpc_queue_conn(conn); 980 } 981 982 /* 983 * post endpoint-level events to the local endpoint 984 * - this includes debug and version messages 985 */ 986 static void rxrpc_post_packet_to_local(struct rxrpc_local *local, 987 struct sk_buff *skb) 988 { 989 _enter("%p,%p", local, skb); 990 991 skb_queue_tail(&local->event_queue, skb); 992 rxrpc_queue_local(local); 993 } 994 995 /* 996 * put a packet up for transport-level abort 997 */ 998 static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) 999 { 1000 CHECK_SLAB_OKAY(&local->usage); 1001 1002 skb_queue_tail(&local->reject_queue, skb); 1003 rxrpc_queue_local(local); 1004 } 1005 1006 /* 1007 * Extract the wire header from a packet and translate the byte order. 1008 */ 1009 static noinline 1010 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) 1011 { 1012 struct rxrpc_wire_header whdr; 1013 1014 /* dig out the RxRPC connection details */ 1015 if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) 1016 return -EBADMSG; 1017 1018 memset(sp, 0, sizeof(*sp)); 1019 sp->hdr.epoch = ntohl(whdr.epoch); 1020 sp->hdr.cid = ntohl(whdr.cid); 1021 sp->hdr.callNumber = ntohl(whdr.callNumber); 1022 sp->hdr.seq = ntohl(whdr.seq); 1023 sp->hdr.serial = ntohl(whdr.serial); 1024 sp->hdr.flags = whdr.flags; 1025 sp->hdr.type = whdr.type; 1026 sp->hdr.userStatus = whdr.userStatus; 1027 sp->hdr.securityIndex = whdr.securityIndex; 1028 sp->hdr._rsvd = ntohs(whdr._rsvd); 1029 sp->hdr.serviceId = ntohs(whdr.serviceId); 1030 return 0; 1031 } 1032 1033 /* 1034 * handle data received on the local endpoint 1035 * - may be called in interrupt context 1036 * 1037 * The socket is locked by the caller and this prevents the socket from being 1038 * shut down and the local endpoint from going away, thus sk_user_data will not 1039 * be cleared until this function returns. 1040 */ 1041 void rxrpc_data_ready(struct sock *udp_sk) 1042 { 1043 struct rxrpc_connection *conn; 1044 struct rxrpc_channel *chan; 1045 struct rxrpc_call *call; 1046 struct rxrpc_skb_priv *sp; 1047 struct rxrpc_local *local = udp_sk->sk_user_data; 1048 struct sk_buff *skb; 1049 unsigned int channel; 1050 int ret, skew; 1051 1052 _enter("%p", udp_sk); 1053 1054 ASSERT(!irqs_disabled()); 1055 1056 skb = skb_recv_datagram(udp_sk, 0, 1, &ret); 1057 if (!skb) { 1058 if (ret == -EAGAIN) 1059 return; 1060 _debug("UDP socket error %d", ret); 1061 return; 1062 } 1063 1064 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1065 1066 _net("recv skb %p", skb); 1067 1068 /* we'll probably need to checksum it (didn't call sock_recvmsg) */ 1069 if (skb_checksum_complete(skb)) { 1070 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1071 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); 1072 _leave(" [CSUM failed]"); 1073 return; 1074 } 1075 1076 __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0); 1077 1078 /* The socket buffer we have is owned by UDP, with UDP's data all over 1079 * it, but we really want our own data there. 1080 */ 1081 skb_orphan(skb); 1082 sp = rxrpc_skb(skb); 1083 1084 /* dig out the RxRPC connection details */ 1085 if (rxrpc_extract_header(sp, skb) < 0) 1086 goto bad_message; 1087 1088 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { 1089 static int lose; 1090 if ((lose++ & 7) == 7) { 1091 trace_rxrpc_rx_lose(sp); 1092 rxrpc_lose_skb(skb, rxrpc_skb_rx_lost); 1093 return; 1094 } 1095 } 1096 1097 trace_rxrpc_rx_packet(sp); 1098 1099 _net("Rx RxRPC %s ep=%x call=%x:%x", 1100 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient", 1101 sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber); 1102 1103 if (sp->hdr.type >= RXRPC_N_PACKET_TYPES || 1104 !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) { 1105 _proto("Rx Bad Packet Type %u", sp->hdr.type); 1106 goto bad_message; 1107 } 1108 1109 switch (sp->hdr.type) { 1110 case RXRPC_PACKET_TYPE_VERSION: 1111 rxrpc_post_packet_to_local(local, skb); 1112 goto out; 1113 1114 case RXRPC_PACKET_TYPE_BUSY: 1115 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) 1116 goto discard; 1117 1118 case RXRPC_PACKET_TYPE_DATA: 1119 if (sp->hdr.callNumber == 0) 1120 goto bad_message; 1121 if (sp->hdr.flags & RXRPC_JUMBO_PACKET && 1122 !rxrpc_validate_jumbo(skb)) 1123 goto bad_message; 1124 break; 1125 } 1126 1127 rcu_read_lock(); 1128 1129 conn = rxrpc_find_connection_rcu(local, skb); 1130 if (conn) { 1131 if (sp->hdr.securityIndex != conn->security_ix) 1132 goto wrong_security; 1133 1134 if (sp->hdr.callNumber == 0) { 1135 /* Connection-level packet */ 1136 _debug("CONN %p {%d}", conn, conn->debug_id); 1137 rxrpc_post_packet_to_conn(conn, skb); 1138 goto out_unlock; 1139 } 1140 1141 /* Note the serial number skew here */ 1142 skew = (int)sp->hdr.serial - (int)conn->hi_serial; 1143 if (skew >= 0) { 1144 if (skew > 0) 1145 conn->hi_serial = sp->hdr.serial; 1146 } else { 1147 skew = -skew; 1148 skew = min(skew, 65535); 1149 } 1150 1151 /* Call-bound packets are routed by connection channel. */ 1152 channel = sp->hdr.cid & RXRPC_CHANNELMASK; 1153 chan = &conn->channels[channel]; 1154 1155 /* Ignore really old calls */ 1156 if (sp->hdr.callNumber < chan->last_call) 1157 goto discard_unlock; 1158 1159 if (sp->hdr.callNumber == chan->last_call) { 1160 /* For the previous service call, if completed successfully, we 1161 * discard all further packets. 1162 */ 1163 if (rxrpc_conn_is_service(conn) && 1164 (chan->last_type == RXRPC_PACKET_TYPE_ACK || 1165 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)) 1166 goto discard_unlock; 1167 1168 /* But otherwise we need to retransmit the final packet from 1169 * data cached in the connection record. 1170 */ 1171 rxrpc_post_packet_to_conn(conn, skb); 1172 goto out_unlock; 1173 } 1174 1175 call = rcu_dereference(chan->call); 1176 1177 if (sp->hdr.callNumber > chan->call_id) { 1178 if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) { 1179 rcu_read_unlock(); 1180 goto reject_packet; 1181 } 1182 if (call) 1183 rxrpc_input_implicit_end_call(conn, call); 1184 call = NULL; 1185 } 1186 } else { 1187 skew = 0; 1188 call = NULL; 1189 } 1190 1191 if (!call || atomic_read(&call->usage) == 0) { 1192 if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) || 1193 sp->hdr.callNumber == 0 || 1194 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 1195 goto bad_message_unlock; 1196 if (sp->hdr.seq != 1) 1197 goto discard_unlock; 1198 call = rxrpc_new_incoming_call(local, conn, skb); 1199 if (!call) { 1200 rcu_read_unlock(); 1201 goto reject_packet; 1202 } 1203 rxrpc_send_ping(call, skb, skew); 1204 } 1205 1206 rxrpc_input_call_packet(call, skb, skew); 1207 goto discard_unlock; 1208 1209 discard_unlock: 1210 rcu_read_unlock(); 1211 discard: 1212 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1213 out: 1214 trace_rxrpc_rx_done(0, 0); 1215 return; 1216 1217 out_unlock: 1218 rcu_read_unlock(); 1219 goto out; 1220 1221 wrong_security: 1222 rcu_read_unlock(); 1223 trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1224 RXKADINCONSISTENCY, EBADMSG); 1225 skb->priority = RXKADINCONSISTENCY; 1226 goto post_abort; 1227 1228 bad_message_unlock: 1229 rcu_read_unlock(); 1230 bad_message: 1231 trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1232 RX_PROTOCOL_ERROR, EBADMSG); 1233 skb->priority = RX_PROTOCOL_ERROR; 1234 post_abort: 1235 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; 1236 reject_packet: 1237 trace_rxrpc_rx_done(skb->mark, skb->priority); 1238 rxrpc_reject_packet(local, skb); 1239 _leave(" [badmsg]"); 1240 } 1241