1 /* RxRPC packet reception 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/net.h> 16 #include <linux/skbuff.h> 17 #include <linux/errqueue.h> 18 #include <linux/udp.h> 19 #include <linux/in.h> 20 #include <linux/in6.h> 21 #include <linux/icmp.h> 22 #include <linux/gfp.h> 23 #include <net/sock.h> 24 #include <net/af_rxrpc.h> 25 #include <net/ip.h> 26 #include <net/udp.h> 27 #include <net/net_namespace.h> 28 #include "ar-internal.h" 29 30 /* 31 * queue a packet for recvmsg to pass to userspace 32 * - the caller must hold a lock on call->lock 33 * - must not be called with interrupts disabled (sk_filter() disables BH's) 34 * - eats the packet whether successful or not 35 * - there must be just one reference to the packet, which the caller passes to 36 * this function 37 */ 38 int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, 39 bool force, bool terminal) 40 { 41 struct rxrpc_skb_priv *sp; 42 struct rxrpc_sock *rx = call->socket; 43 struct sock *sk; 44 int ret; 45 46 _enter(",,%d,%d", force, terminal); 47 48 ASSERT(!irqs_disabled()); 49 50 sp = rxrpc_skb(skb); 51 ASSERTCMP(sp->call, ==, call); 52 53 /* if we've already posted the terminal message for a call, then we 54 * don't post any more */ 55 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { 56 _debug("already terminated"); 57 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); 58 rxrpc_free_skb(skb); 59 return 0; 60 } 61 62 sk = &rx->sk; 63 64 if (!force) { 65 /* cast skb->rcvbuf to unsigned... It's pointless, but 66 * reduces number of warnings when compiling with -W 67 * --ANK */ 68 // ret = -ENOBUFS; 69 // if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 70 // (unsigned int) sk->sk_rcvbuf) 71 // goto out; 72 73 ret = sk_filter(sk, skb); 74 if (ret < 0) 75 goto out; 76 } 77 78 spin_lock_bh(&sk->sk_receive_queue.lock); 79 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) && 80 !test_bit(RXRPC_CALL_RELEASED, &call->flags) && 81 call->socket->sk.sk_state != RXRPC_CLOSE) { 82 skb->destructor = rxrpc_packet_destructor; 83 skb->dev = NULL; 84 skb->sk = sk; 85 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 86 87 if (terminal) { 88 _debug("<<<< TERMINAL MESSAGE >>>>"); 89 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags); 90 } 91 92 /* allow interception by a kernel service */ 93 if (rx->interceptor) { 94 rx->interceptor(sk, call->user_call_ID, skb); 95 spin_unlock_bh(&sk->sk_receive_queue.lock); 96 } else { 97 _net("post skb %p", skb); 98 __skb_queue_tail(&sk->sk_receive_queue, skb); 99 spin_unlock_bh(&sk->sk_receive_queue.lock); 100 101 if (!sock_flag(sk, SOCK_DEAD)) 102 sk->sk_data_ready(sk); 103 } 104 skb = NULL; 105 } else { 106 spin_unlock_bh(&sk->sk_receive_queue.lock); 107 } 108 ret = 0; 109 110 out: 111 rxrpc_free_skb(skb); 112 113 _leave(" = %d", ret); 114 return ret; 115 } 116 117 /* 118 * process a DATA packet, posting the packet to the appropriate queue 119 * - eats the packet if successful 120 */ 121 static int rxrpc_fast_process_data(struct rxrpc_call *call, 122 struct sk_buff *skb, u32 seq) 123 { 124 struct rxrpc_skb_priv *sp; 125 bool terminal; 126 int ret, ackbit, ack; 127 u32 serial; 128 u8 flags; 129 130 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); 131 132 sp = rxrpc_skb(skb); 133 ASSERTCMP(sp->call, ==, NULL); 134 flags = sp->hdr.flags; 135 serial = sp->hdr.serial; 136 137 spin_lock(&call->lock); 138 139 if (call->state > RXRPC_CALL_COMPLETE) 140 goto discard; 141 142 ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post); 143 ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv); 144 ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten); 145 146 if (seq < call->rx_data_post) { 147 _debug("dup #%u [-%u]", seq, call->rx_data_post); 148 ack = RXRPC_ACK_DUPLICATE; 149 ret = -ENOBUFS; 150 goto discard_and_ack; 151 } 152 153 /* we may already have the packet in the out of sequence queue */ 154 ackbit = seq - (call->rx_data_eaten + 1); 155 ASSERTCMP(ackbit, >=, 0); 156 if (__test_and_set_bit(ackbit, call->ackr_window)) { 157 _debug("dup oos #%u [%u,%u]", 158 seq, call->rx_data_eaten, call->rx_data_post); 159 ack = RXRPC_ACK_DUPLICATE; 160 goto discard_and_ack; 161 } 162 163 if (seq >= call->ackr_win_top) { 164 _debug("exceed #%u [%u]", seq, call->ackr_win_top); 165 __clear_bit(ackbit, call->ackr_window); 166 ack = RXRPC_ACK_EXCEEDS_WINDOW; 167 goto discard_and_ack; 168 } 169 170 if (seq == call->rx_data_expect) { 171 clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags); 172 call->rx_data_expect++; 173 } else if (seq > call->rx_data_expect) { 174 _debug("oos #%u [%u]", seq, call->rx_data_expect); 175 call->rx_data_expect = seq + 1; 176 if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) { 177 ack = RXRPC_ACK_OUT_OF_SEQUENCE; 178 goto enqueue_and_ack; 179 } 180 goto enqueue_packet; 181 } 182 183 if (seq != call->rx_data_post) { 184 _debug("ahead #%u [%u]", seq, call->rx_data_post); 185 goto enqueue_packet; 186 } 187 188 if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags)) 189 goto protocol_error; 190 191 /* if the packet need security things doing to it, then it goes down 192 * the slow path */ 193 if (call->conn->security_ix) 194 goto enqueue_packet; 195 196 sp->call = call; 197 rxrpc_get_call(call); 198 atomic_inc(&call->skb_count); 199 terminal = ((flags & RXRPC_LAST_PACKET) && 200 !(flags & RXRPC_CLIENT_INITIATED)); 201 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); 202 if (ret < 0) { 203 if (ret == -ENOMEM || ret == -ENOBUFS) { 204 __clear_bit(ackbit, call->ackr_window); 205 ack = RXRPC_ACK_NOSPACE; 206 goto discard_and_ack; 207 } 208 goto out; 209 } 210 211 skb = NULL; 212 sp = NULL; 213 214 _debug("post #%u", seq); 215 ASSERTCMP(call->rx_data_post, ==, seq); 216 call->rx_data_post++; 217 218 if (flags & RXRPC_LAST_PACKET) 219 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); 220 221 /* if we've reached an out of sequence packet then we need to drain 222 * that queue into the socket Rx queue now */ 223 if (call->rx_data_post == call->rx_first_oos) { 224 _debug("drain rx oos now"); 225 read_lock(&call->state_lock); 226 if (call->state < RXRPC_CALL_COMPLETE && 227 !test_and_set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) 228 rxrpc_queue_call(call); 229 read_unlock(&call->state_lock); 230 } 231 232 spin_unlock(&call->lock); 233 atomic_inc(&call->ackr_not_idle); 234 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false); 235 _leave(" = 0 [posted]"); 236 return 0; 237 238 protocol_error: 239 ret = -EBADMSG; 240 out: 241 spin_unlock(&call->lock); 242 _leave(" = %d", ret); 243 return ret; 244 245 discard_and_ack: 246 _debug("discard and ACK packet %p", skb); 247 __rxrpc_propose_ACK(call, ack, serial, true); 248 discard: 249 spin_unlock(&call->lock); 250 rxrpc_free_skb(skb); 251 _leave(" = 0 [discarded]"); 252 return 0; 253 254 enqueue_and_ack: 255 __rxrpc_propose_ACK(call, ack, serial, true); 256 enqueue_packet: 257 _net("defer skb %p", skb); 258 spin_unlock(&call->lock); 259 skb_queue_tail(&call->rx_queue, skb); 260 atomic_inc(&call->ackr_not_idle); 261 read_lock(&call->state_lock); 262 if (call->state < RXRPC_CALL_DEAD) 263 rxrpc_queue_call(call); 264 read_unlock(&call->state_lock); 265 _leave(" = 0 [queued]"); 266 return 0; 267 } 268 269 /* 270 * assume an implicit ACKALL of the transmission phase of a client socket upon 271 * reception of the first reply packet 272 */ 273 static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial) 274 { 275 write_lock_bh(&call->state_lock); 276 277 switch (call->state) { 278 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 279 call->state = RXRPC_CALL_CLIENT_RECV_REPLY; 280 call->acks_latest = serial; 281 282 _debug("implicit ACKALL %%%u", call->acks_latest); 283 set_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events); 284 write_unlock_bh(&call->state_lock); 285 286 if (try_to_del_timer_sync(&call->resend_timer) >= 0) { 287 clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events); 288 clear_bit(RXRPC_CALL_EV_RESEND, &call->events); 289 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 290 } 291 break; 292 293 default: 294 write_unlock_bh(&call->state_lock); 295 break; 296 } 297 } 298 299 /* 300 * post an incoming packet to the nominated call to deal with 301 * - must get rid of the sk_buff, either by freeing it or by queuing it 302 */ 303 void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) 304 { 305 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 306 __be32 wtmp; 307 u32 hi_serial, abort_code; 308 309 _enter("%p,%p", call, skb); 310 311 ASSERT(!irqs_disabled()); 312 313 #if 0 // INJECT RX ERROR 314 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) { 315 static int skip = 0; 316 if (++skip == 3) { 317 printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n"); 318 skip = 0; 319 goto free_packet; 320 } 321 } 322 #endif 323 324 /* track the latest serial number on this connection for ACK packet 325 * information */ 326 hi_serial = atomic_read(&call->conn->hi_serial); 327 while (sp->hdr.serial > hi_serial) 328 hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, 329 sp->hdr.serial); 330 331 /* request ACK generation for any ACK or DATA packet that requests 332 * it */ 333 if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 334 _proto("ACK Requested on %%%u", sp->hdr.serial); 335 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, false); 336 } 337 338 switch (sp->hdr.type) { 339 case RXRPC_PACKET_TYPE_ABORT: 340 _debug("abort"); 341 342 if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0) 343 goto protocol_error; 344 345 abort_code = ntohl(wtmp); 346 _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code); 347 348 write_lock_bh(&call->state_lock); 349 if (call->state < RXRPC_CALL_COMPLETE) { 350 call->state = RXRPC_CALL_REMOTELY_ABORTED; 351 call->remote_abort = abort_code; 352 set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events); 353 rxrpc_queue_call(call); 354 } 355 goto free_packet_unlock; 356 357 case RXRPC_PACKET_TYPE_BUSY: 358 _proto("Rx BUSY %%%u", sp->hdr.serial); 359 360 if (rxrpc_conn_is_service(call->conn)) 361 goto protocol_error; 362 363 write_lock_bh(&call->state_lock); 364 switch (call->state) { 365 case RXRPC_CALL_CLIENT_SEND_REQUEST: 366 call->state = RXRPC_CALL_SERVER_BUSY; 367 set_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events); 368 rxrpc_queue_call(call); 369 case RXRPC_CALL_SERVER_BUSY: 370 goto free_packet_unlock; 371 default: 372 goto protocol_error_locked; 373 } 374 375 default: 376 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial); 377 goto protocol_error; 378 379 case RXRPC_PACKET_TYPE_DATA: 380 _proto("Rx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq); 381 382 if (sp->hdr.seq == 0) 383 goto protocol_error; 384 385 call->ackr_prev_seq = sp->hdr.seq; 386 387 /* received data implicitly ACKs all of the request packets we 388 * sent when we're acting as a client */ 389 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) 390 rxrpc_assume_implicit_ackall(call, sp->hdr.serial); 391 392 switch (rxrpc_fast_process_data(call, skb, sp->hdr.seq)) { 393 case 0: 394 skb = NULL; 395 goto done; 396 397 default: 398 BUG(); 399 400 /* data packet received beyond the last packet */ 401 case -EBADMSG: 402 goto protocol_error; 403 } 404 405 case RXRPC_PACKET_TYPE_ACKALL: 406 case RXRPC_PACKET_TYPE_ACK: 407 /* ACK processing is done in process context */ 408 read_lock_bh(&call->state_lock); 409 if (call->state < RXRPC_CALL_DEAD) { 410 skb_queue_tail(&call->rx_queue, skb); 411 rxrpc_queue_call(call); 412 skb = NULL; 413 } 414 read_unlock_bh(&call->state_lock); 415 goto free_packet; 416 } 417 418 protocol_error: 419 _debug("protocol error"); 420 write_lock_bh(&call->state_lock); 421 protocol_error_locked: 422 if (call->state <= RXRPC_CALL_COMPLETE) { 423 call->state = RXRPC_CALL_LOCALLY_ABORTED; 424 call->local_abort = RX_PROTOCOL_ERROR; 425 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 426 rxrpc_queue_call(call); 427 } 428 free_packet_unlock: 429 write_unlock_bh(&call->state_lock); 430 free_packet: 431 rxrpc_free_skb(skb); 432 done: 433 _leave(""); 434 } 435 436 /* 437 * split up a jumbo data packet 438 */ 439 static void rxrpc_process_jumbo_packet(struct rxrpc_call *call, 440 struct sk_buff *jumbo) 441 { 442 struct rxrpc_jumbo_header jhdr; 443 struct rxrpc_skb_priv *sp; 444 struct sk_buff *part; 445 446 _enter(",{%u,%u}", jumbo->data_len, jumbo->len); 447 448 sp = rxrpc_skb(jumbo); 449 450 do { 451 sp->hdr.flags &= ~RXRPC_JUMBO_PACKET; 452 453 /* make a clone to represent the first subpacket in what's left 454 * of the jumbo packet */ 455 part = skb_clone(jumbo, GFP_ATOMIC); 456 if (!part) { 457 /* simply ditch the tail in the event of ENOMEM */ 458 pskb_trim(jumbo, RXRPC_JUMBO_DATALEN); 459 break; 460 } 461 rxrpc_new_skb(part); 462 463 pskb_trim(part, RXRPC_JUMBO_DATALEN); 464 465 if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN)) 466 goto protocol_error; 467 468 if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0) 469 goto protocol_error; 470 if (!pskb_pull(jumbo, sizeof(jhdr))) 471 BUG(); 472 473 sp->hdr.seq += 1; 474 sp->hdr.serial += 1; 475 sp->hdr.flags = jhdr.flags; 476 sp->hdr._rsvd = ntohs(jhdr._rsvd); 477 478 _proto("Rx DATA Jumbo %%%u", sp->hdr.serial - 1); 479 480 rxrpc_fast_process_packet(call, part); 481 part = NULL; 482 483 } while (sp->hdr.flags & RXRPC_JUMBO_PACKET); 484 485 rxrpc_fast_process_packet(call, jumbo); 486 _leave(""); 487 return; 488 489 protocol_error: 490 _debug("protocol error"); 491 rxrpc_free_skb(part); 492 rxrpc_free_skb(jumbo); 493 write_lock_bh(&call->state_lock); 494 if (call->state <= RXRPC_CALL_COMPLETE) { 495 call->state = RXRPC_CALL_LOCALLY_ABORTED; 496 call->local_abort = RX_PROTOCOL_ERROR; 497 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 498 rxrpc_queue_call(call); 499 } 500 write_unlock_bh(&call->state_lock); 501 _leave(""); 502 } 503 504 /* 505 * post an incoming packet to the appropriate call/socket to deal with 506 * - must get rid of the sk_buff, either by freeing it or by queuing it 507 */ 508 static void rxrpc_post_packet_to_call(struct rxrpc_call *call, 509 struct sk_buff *skb) 510 { 511 struct rxrpc_skb_priv *sp; 512 513 _enter("%p,%p", call, skb); 514 515 sp = rxrpc_skb(skb); 516 517 _debug("extant call [%d]", call->state); 518 519 read_lock(&call->state_lock); 520 switch (call->state) { 521 case RXRPC_CALL_LOCALLY_ABORTED: 522 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) { 523 rxrpc_queue_call(call); 524 goto free_unlock; 525 } 526 case RXRPC_CALL_REMOTELY_ABORTED: 527 case RXRPC_CALL_NETWORK_ERROR: 528 case RXRPC_CALL_DEAD: 529 goto dead_call; 530 case RXRPC_CALL_COMPLETE: 531 case RXRPC_CALL_CLIENT_FINAL_ACK: 532 /* complete server call */ 533 if (rxrpc_conn_is_service(call->conn)) 534 goto dead_call; 535 /* resend last packet of a completed call */ 536 _debug("final ack again"); 537 rxrpc_get_call(call); 538 set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events); 539 rxrpc_queue_call(call); 540 goto free_unlock; 541 default: 542 break; 543 } 544 545 read_unlock(&call->state_lock); 546 rxrpc_get_call(call); 547 548 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 549 sp->hdr.flags & RXRPC_JUMBO_PACKET) 550 rxrpc_process_jumbo_packet(call, skb); 551 else 552 rxrpc_fast_process_packet(call, skb); 553 554 rxrpc_put_call(call); 555 goto done; 556 557 dead_call: 558 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { 559 skb->priority = RX_CALL_DEAD; 560 rxrpc_reject_packet(call->conn->params.local, skb); 561 goto unlock; 562 } 563 free_unlock: 564 rxrpc_free_skb(skb); 565 unlock: 566 read_unlock(&call->state_lock); 567 done: 568 _leave(""); 569 } 570 571 /* 572 * post connection-level events to the connection 573 * - this includes challenges, responses and some aborts 574 */ 575 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, 576 struct sk_buff *skb) 577 { 578 _enter("%p,%p", conn, skb); 579 580 skb_queue_tail(&conn->rx_queue, skb); 581 rxrpc_queue_conn(conn); 582 } 583 584 /* 585 * post endpoint-level events to the local endpoint 586 * - this includes debug and version messages 587 */ 588 static void rxrpc_post_packet_to_local(struct rxrpc_local *local, 589 struct sk_buff *skb) 590 { 591 _enter("%p,%p", local, skb); 592 593 skb_queue_tail(&local->event_queue, skb); 594 rxrpc_queue_local(local); 595 } 596 597 /* 598 * Extract the wire header from a packet and translate the byte order. 599 */ 600 static noinline 601 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) 602 { 603 struct rxrpc_wire_header whdr; 604 605 /* dig out the RxRPC connection details */ 606 if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) 607 return -EBADMSG; 608 if (!pskb_pull(skb, sizeof(whdr))) 609 BUG(); 610 611 memset(sp, 0, sizeof(*sp)); 612 sp->hdr.epoch = ntohl(whdr.epoch); 613 sp->hdr.cid = ntohl(whdr.cid); 614 sp->hdr.callNumber = ntohl(whdr.callNumber); 615 sp->hdr.seq = ntohl(whdr.seq); 616 sp->hdr.serial = ntohl(whdr.serial); 617 sp->hdr.flags = whdr.flags; 618 sp->hdr.type = whdr.type; 619 sp->hdr.userStatus = whdr.userStatus; 620 sp->hdr.securityIndex = whdr.securityIndex; 621 sp->hdr._rsvd = ntohs(whdr._rsvd); 622 sp->hdr.serviceId = ntohs(whdr.serviceId); 623 return 0; 624 } 625 626 /* 627 * handle data received on the local endpoint 628 * - may be called in interrupt context 629 * 630 * The socket is locked by the caller and this prevents the socket from being 631 * shut down and the local endpoint from going away, thus sk_user_data will not 632 * be cleared until this function returns. 633 */ 634 void rxrpc_data_ready(struct sock *sk) 635 { 636 struct rxrpc_connection *conn; 637 struct rxrpc_skb_priv *sp; 638 struct rxrpc_local *local = sk->sk_user_data; 639 struct sk_buff *skb; 640 int ret; 641 642 _enter("%p", sk); 643 644 ASSERT(!irqs_disabled()); 645 646 skb = skb_recv_datagram(sk, 0, 1, &ret); 647 if (!skb) { 648 if (ret == -EAGAIN) 649 return; 650 _debug("UDP socket error %d", ret); 651 return; 652 } 653 654 rxrpc_new_skb(skb); 655 656 _net("recv skb %p", skb); 657 658 /* we'll probably need to checksum it (didn't call sock_recvmsg) */ 659 if (skb_checksum_complete(skb)) { 660 rxrpc_free_skb(skb); 661 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); 662 _leave(" [CSUM failed]"); 663 return; 664 } 665 666 __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0); 667 668 /* The socket buffer we have is owned by UDP, with UDP's data all over 669 * it, but we really want our own data there. 670 */ 671 skb_orphan(skb); 672 sp = rxrpc_skb(skb); 673 674 _net("Rx UDP packet from %08x:%04hu", 675 ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source)); 676 677 /* dig out the RxRPC connection details */ 678 if (rxrpc_extract_header(sp, skb) < 0) 679 goto bad_message; 680 681 _net("Rx RxRPC %s ep=%x call=%x:%x", 682 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient", 683 sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber); 684 685 if (sp->hdr.type >= RXRPC_N_PACKET_TYPES || 686 !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) { 687 _proto("Rx Bad Packet Type %u", sp->hdr.type); 688 goto bad_message; 689 } 690 691 if (sp->hdr.type == RXRPC_PACKET_TYPE_VERSION) { 692 rxrpc_post_packet_to_local(local, skb); 693 goto out; 694 } 695 696 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 697 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0)) 698 goto bad_message; 699 700 rcu_read_lock(); 701 702 conn = rxrpc_find_connection_rcu(local, skb); 703 if (!conn) 704 goto cant_route_call; 705 706 if (sp->hdr.callNumber == 0) { 707 /* Connection-level packet */ 708 _debug("CONN %p {%d}", conn, conn->debug_id); 709 rxrpc_post_packet_to_conn(conn, skb); 710 } else { 711 /* Call-bound packets are routed by connection channel. */ 712 unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK; 713 struct rxrpc_channel *chan = &conn->channels[channel]; 714 struct rxrpc_call *call = rcu_dereference(chan->call); 715 716 if (!call || atomic_read(&call->usage) == 0) 717 goto cant_route_call; 718 719 rxrpc_post_packet_to_call(call, skb); 720 } 721 722 rcu_read_unlock(); 723 out: 724 return; 725 726 cant_route_call: 727 rcu_read_unlock(); 728 729 _debug("can't route call"); 730 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED && 731 sp->hdr.type == RXRPC_PACKET_TYPE_DATA) { 732 if (sp->hdr.seq == 1) { 733 _debug("first packet"); 734 skb_queue_tail(&local->accept_queue, skb); 735 rxrpc_queue_work(&local->processor); 736 _leave(" [incoming]"); 737 return; 738 } 739 skb->priority = RX_INVALID_OPERATION; 740 } else { 741 skb->priority = RX_CALL_DEAD; 742 } 743 744 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { 745 _debug("reject type %d",sp->hdr.type); 746 rxrpc_reject_packet(local, skb); 747 } else { 748 rxrpc_free_skb(skb); 749 } 750 _leave(" [no call]"); 751 return; 752 753 bad_message: 754 skb->priority = RX_PROTOCOL_ERROR; 755 rxrpc_reject_packet(local, skb); 756 _leave(" [badmsg]"); 757 } 758