1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* RxRPC individual remote procedure call handling 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/slab.h> 11 #include <linux/module.h> 12 #include <linux/circ_buf.h> 13 #include <linux/spinlock_types.h> 14 #include <net/sock.h> 15 #include <net/af_rxrpc.h> 16 #include "ar-internal.h" 17 18 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { 19 [RXRPC_CALL_UNINITIALISED] = "Uninit ", 20 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", 21 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", 22 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", 23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc", 25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure", 26 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", 27 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", 28 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", 29 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", 30 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", 31 [RXRPC_CALL_COMPLETE] = "Complete", 32 }; 33 34 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = { 35 [RXRPC_CALL_SUCCEEDED] = "Complete", 36 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", 37 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", 38 [RXRPC_CALL_LOCAL_ERROR] = "LocError", 39 [RXRPC_CALL_NETWORK_ERROR] = "NetError", 40 }; 41 42 struct kmem_cache *rxrpc_call_jar; 43 44 static void rxrpc_call_timer_expired(struct timer_list *t) 45 { 46 struct rxrpc_call *call = from_timer(call, t, timer); 47 48 _enter("%d", call->debug_id); 49 50 if (call->state < RXRPC_CALL_COMPLETE) { 51 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); 52 rxrpc_queue_call(call); 53 } 54 } 55 56 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; 57 58 /* 59 * find an extant server call 60 * - called in process context with IRQs enabled 61 */ 62 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, 63 unsigned long user_call_ID) 64 { 65 struct rxrpc_call *call; 66 struct rb_node *p; 67 68 _enter("%p,%lx", rx, user_call_ID); 69 70 read_lock(&rx->call_lock); 71 72 p = rx->calls.rb_node; 73 while (p) { 74 call = rb_entry(p, struct rxrpc_call, sock_node); 75 76 if (user_call_ID < call->user_call_ID) 77 p = p->rb_left; 78 else if (user_call_ID > call->user_call_ID) 79 p = p->rb_right; 80 else 81 goto found_extant_call; 82 } 83 84 read_unlock(&rx->call_lock); 85 _leave(" = NULL"); 86 return NULL; 87 88 found_extant_call: 89 rxrpc_get_call(call, rxrpc_call_got); 90 read_unlock(&rx->call_lock); 91 _leave(" = %p [%d]", call, atomic_read(&call->usage)); 92 return call; 93 } 94 95 /* 96 * allocate a new call 97 */ 98 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, 99 unsigned int debug_id) 100 { 101 struct rxrpc_call *call; 102 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 103 104 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); 105 if (!call) 106 return NULL; 107 108 call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE, 109 sizeof(struct sk_buff *), 110 gfp); 111 if (!call->rxtx_buffer) 112 goto nomem; 113 114 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp); 115 if (!call->rxtx_annotations) 116 goto nomem_2; 117 118 mutex_init(&call->user_mutex); 119 120 /* Prevent lockdep reporting a deadlock false positive between the afs 121 * filesystem and sys_sendmsg() via the mmap sem. 122 */ 123 if (rx->sk.sk_kern_sock) 124 lockdep_set_class(&call->user_mutex, 125 &rxrpc_call_user_mutex_lock_class_key); 126 127 timer_setup(&call->timer, rxrpc_call_timer_expired, 0); 128 INIT_WORK(&call->processor, &rxrpc_process_call); 129 INIT_LIST_HEAD(&call->link); 130 INIT_LIST_HEAD(&call->chan_wait_link); 131 INIT_LIST_HEAD(&call->accept_link); 132 INIT_LIST_HEAD(&call->recvmsg_link); 133 INIT_LIST_HEAD(&call->sock_link); 134 init_waitqueue_head(&call->waitq); 135 spin_lock_init(&call->lock); 136 spin_lock_init(&call->notify_lock); 137 spin_lock_init(&call->input_lock); 138 rwlock_init(&call->state_lock); 139 atomic_set(&call->usage, 1); 140 call->debug_id = debug_id; 141 call->tx_total_len = -1; 142 call->next_rx_timo = 20 * HZ; 143 call->next_req_timo = 1 * HZ; 144 145 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 146 147 /* Leave space in the ring to handle a maxed-out jumbo packet */ 148 call->rx_winsize = rxrpc_rx_window_size; 149 call->tx_winsize = 16; 150 call->rx_expect_next = 1; 151 152 call->cong_cwnd = 2; 153 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; 154 155 call->rxnet = rxnet; 156 atomic_inc(&rxnet->nr_calls); 157 return call; 158 159 nomem_2: 160 kfree(call->rxtx_buffer); 161 nomem: 162 kmem_cache_free(rxrpc_call_jar, call); 163 return NULL; 164 } 165 166 /* 167 * Allocate a new client call. 168 */ 169 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, 170 struct sockaddr_rxrpc *srx, 171 gfp_t gfp, 172 unsigned int debug_id) 173 { 174 struct rxrpc_call *call; 175 ktime_t now; 176 177 _enter(""); 178 179 call = rxrpc_alloc_call(rx, gfp, debug_id); 180 if (!call) 181 return ERR_PTR(-ENOMEM); 182 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; 183 call->service_id = srx->srx_service; 184 call->tx_phase = true; 185 now = ktime_get_real(); 186 call->acks_latest_ts = now; 187 call->cong_tstamp = now; 188 189 _leave(" = %p", call); 190 return call; 191 } 192 193 /* 194 * Initiate the call ack/resend/expiry timer. 195 */ 196 static void rxrpc_start_call_timer(struct rxrpc_call *call) 197 { 198 unsigned long now = jiffies; 199 unsigned long j = now + MAX_JIFFY_OFFSET; 200 201 call->ack_at = j; 202 call->ack_lost_at = j; 203 call->resend_at = j; 204 call->ping_at = j; 205 call->expect_rx_by = j; 206 call->expect_req_by = j; 207 call->expect_term_by = j; 208 call->timer.expires = now; 209 } 210 211 /* 212 * Set up a call for the given parameters. 213 * - Called with the socket lock held, which it must release. 214 * - If it returns a call, the call's lock will need releasing by the caller. 215 */ 216 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 217 struct rxrpc_conn_parameters *cp, 218 struct sockaddr_rxrpc *srx, 219 struct rxrpc_call_params *p, 220 gfp_t gfp, 221 unsigned int debug_id) 222 __releases(&rx->sk.sk_lock.slock) 223 __acquires(&call->user_mutex) 224 { 225 struct rxrpc_call *call, *xcall; 226 struct rxrpc_net *rxnet; 227 struct rb_node *parent, **pp; 228 const void *here = __builtin_return_address(0); 229 int ret; 230 231 _enter("%p,%lx", rx, p->user_call_ID); 232 233 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id); 234 if (IS_ERR(call)) { 235 release_sock(&rx->sk); 236 _leave(" = %ld", PTR_ERR(call)); 237 return call; 238 } 239 240 if (p->intr) 241 __set_bit(RXRPC_CALL_IS_INTR, &call->flags); 242 call->tx_total_len = p->tx_total_len; 243 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), 244 here, (const void *)p->user_call_ID); 245 246 /* We need to protect a partially set up call against the user as we 247 * will be acting outside the socket lock. 248 */ 249 mutex_lock(&call->user_mutex); 250 251 /* Publish the call, even though it is incompletely set up as yet */ 252 write_lock(&rx->call_lock); 253 254 pp = &rx->calls.rb_node; 255 parent = NULL; 256 while (*pp) { 257 parent = *pp; 258 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 259 260 if (p->user_call_ID < xcall->user_call_ID) 261 pp = &(*pp)->rb_left; 262 else if (p->user_call_ID > xcall->user_call_ID) 263 pp = &(*pp)->rb_right; 264 else 265 goto error_dup_user_ID; 266 } 267 268 rcu_assign_pointer(call->socket, rx); 269 call->user_call_ID = p->user_call_ID; 270 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 271 rxrpc_get_call(call, rxrpc_call_got_userid); 272 rb_link_node(&call->sock_node, parent, pp); 273 rb_insert_color(&call->sock_node, &rx->calls); 274 list_add(&call->sock_link, &rx->sock_calls); 275 276 write_unlock(&rx->call_lock); 277 278 rxnet = call->rxnet; 279 write_lock(&rxnet->call_lock); 280 list_add_tail(&call->link, &rxnet->calls); 281 write_unlock(&rxnet->call_lock); 282 283 /* From this point on, the call is protected by its own lock. */ 284 release_sock(&rx->sk); 285 286 /* Set up or get a connection record and set the protocol parameters, 287 * including channel number and call ID. 288 */ 289 ret = rxrpc_connect_call(rx, call, cp, srx, gfp); 290 if (ret < 0) 291 goto error; 292 293 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), 294 here, NULL); 295 296 rxrpc_start_call_timer(call); 297 298 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); 299 300 _leave(" = %p [new]", call); 301 return call; 302 303 /* We unexpectedly found the user ID in the list after taking 304 * the call_lock. This shouldn't happen unless the user races 305 * with itself and tries to add the same user ID twice at the 306 * same time in different threads. 307 */ 308 error_dup_user_ID: 309 write_unlock(&rx->call_lock); 310 release_sock(&rx->sk); 311 ret = -EEXIST; 312 313 error: 314 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 315 RX_CALL_DEAD, ret); 316 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), 317 here, ERR_PTR(ret)); 318 rxrpc_release_call(rx, call); 319 mutex_unlock(&call->user_mutex); 320 rxrpc_put_call(call, rxrpc_call_put); 321 _leave(" = %d", ret); 322 return ERR_PTR(ret); 323 } 324 325 /* 326 * Set up an incoming call. call->conn points to the connection. 327 * This is called in BH context and isn't allowed to fail. 328 */ 329 void rxrpc_incoming_call(struct rxrpc_sock *rx, 330 struct rxrpc_call *call, 331 struct sk_buff *skb) 332 { 333 struct rxrpc_connection *conn = call->conn; 334 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 335 u32 chan; 336 337 _enter(",%d", call->conn->debug_id); 338 339 rcu_assign_pointer(call->socket, rx); 340 call->call_id = sp->hdr.callNumber; 341 call->service_id = sp->hdr.serviceId; 342 call->cid = sp->hdr.cid; 343 call->state = RXRPC_CALL_SERVER_ACCEPTING; 344 if (sp->hdr.securityIndex > 0) 345 call->state = RXRPC_CALL_SERVER_SECURING; 346 call->cong_tstamp = skb->tstamp; 347 348 /* Set the channel for this call. We don't get channel_lock as we're 349 * only defending against the data_ready handler (which we're called 350 * from) and the RESPONSE packet parser (which is only really 351 * interested in call_counter and can cope with a disagreement with the 352 * call pointer). 353 */ 354 chan = sp->hdr.cid & RXRPC_CHANNELMASK; 355 conn->channels[chan].call_counter = call->call_id; 356 conn->channels[chan].call_id = call->call_id; 357 rcu_assign_pointer(conn->channels[chan].call, call); 358 359 spin_lock(&conn->params.peer->lock); 360 hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets); 361 spin_unlock(&conn->params.peer->lock); 362 363 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); 364 365 rxrpc_start_call_timer(call); 366 _leave(""); 367 } 368 369 /* 370 * Queue a call's work processor, getting a ref to pass to the work queue. 371 */ 372 bool rxrpc_queue_call(struct rxrpc_call *call) 373 { 374 const void *here = __builtin_return_address(0); 375 int n = atomic_fetch_add_unless(&call->usage, 1, 0); 376 if (n == 0) 377 return false; 378 if (rxrpc_queue_work(&call->processor)) 379 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL); 380 else 381 rxrpc_put_call(call, rxrpc_call_put_noqueue); 382 return true; 383 } 384 385 /* 386 * Queue a call's work processor, passing the callers ref to the work queue. 387 */ 388 bool __rxrpc_queue_call(struct rxrpc_call *call) 389 { 390 const void *here = __builtin_return_address(0); 391 int n = atomic_read(&call->usage); 392 ASSERTCMP(n, >=, 1); 393 if (rxrpc_queue_work(&call->processor)) 394 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL); 395 else 396 rxrpc_put_call(call, rxrpc_call_put_noqueue); 397 return true; 398 } 399 400 /* 401 * Note the re-emergence of a call. 402 */ 403 void rxrpc_see_call(struct rxrpc_call *call) 404 { 405 const void *here = __builtin_return_address(0); 406 if (call) { 407 int n = atomic_read(&call->usage); 408 409 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL); 410 } 411 } 412 413 /* 414 * Note the addition of a ref on a call. 415 */ 416 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) 417 { 418 const void *here = __builtin_return_address(0); 419 int n = atomic_inc_return(&call->usage); 420 421 trace_rxrpc_call(call, op, n, here, NULL); 422 } 423 424 /* 425 * Detach a call from its owning socket. 426 */ 427 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) 428 { 429 const void *here = __builtin_return_address(0); 430 struct rxrpc_connection *conn = call->conn; 431 bool put = false; 432 int i; 433 434 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 435 436 trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), 437 here, (const void *)call->flags); 438 439 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 440 441 spin_lock_bh(&call->lock); 442 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) 443 BUG(); 444 spin_unlock_bh(&call->lock); 445 446 del_timer_sync(&call->timer); 447 448 /* Make sure we don't get any more notifications */ 449 write_lock_bh(&rx->recvmsg_lock); 450 451 if (!list_empty(&call->recvmsg_link)) { 452 _debug("unlinking once-pending call %p { e=%lx f=%lx }", 453 call, call->events, call->flags); 454 list_del(&call->recvmsg_link); 455 put = true; 456 } 457 458 /* list_empty() must return false in rxrpc_notify_socket() */ 459 call->recvmsg_link.next = NULL; 460 call->recvmsg_link.prev = NULL; 461 462 write_unlock_bh(&rx->recvmsg_lock); 463 if (put) 464 rxrpc_put_call(call, rxrpc_call_put); 465 466 write_lock(&rx->call_lock); 467 468 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 469 rb_erase(&call->sock_node, &rx->calls); 470 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); 471 rxrpc_put_call(call, rxrpc_call_put_userid); 472 } 473 474 list_del(&call->sock_link); 475 write_unlock(&rx->call_lock); 476 477 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); 478 479 if (conn) 480 rxrpc_disconnect_call(call); 481 482 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { 483 rxrpc_free_skb(call->rxtx_buffer[i], 484 (call->tx_phase ? rxrpc_skb_tx_cleaned : 485 rxrpc_skb_rx_cleaned)); 486 call->rxtx_buffer[i] = NULL; 487 } 488 489 _leave(""); 490 } 491 492 /* 493 * release all the calls associated with a socket 494 */ 495 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) 496 { 497 struct rxrpc_call *call; 498 499 _enter("%p", rx); 500 501 while (!list_empty(&rx->to_be_accepted)) { 502 call = list_entry(rx->to_be_accepted.next, 503 struct rxrpc_call, accept_link); 504 list_del(&call->accept_link); 505 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET); 506 rxrpc_put_call(call, rxrpc_call_put); 507 } 508 509 while (!list_empty(&rx->sock_calls)) { 510 call = list_entry(rx->sock_calls.next, 511 struct rxrpc_call, sock_link); 512 rxrpc_get_call(call, rxrpc_call_got); 513 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET); 514 rxrpc_send_abort_packet(call); 515 rxrpc_release_call(rx, call); 516 rxrpc_put_call(call, rxrpc_call_put); 517 } 518 519 _leave(""); 520 } 521 522 /* 523 * release a call 524 */ 525 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) 526 { 527 struct rxrpc_net *rxnet = call->rxnet; 528 const void *here = __builtin_return_address(0); 529 int n; 530 531 ASSERT(call != NULL); 532 533 n = atomic_dec_return(&call->usage); 534 trace_rxrpc_call(call, op, n, here, NULL); 535 ASSERTCMP(n, >=, 0); 536 if (n == 0) { 537 _debug("call %d dead", call->debug_id); 538 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 539 540 if (!list_empty(&call->link)) { 541 write_lock(&rxnet->call_lock); 542 list_del_init(&call->link); 543 write_unlock(&rxnet->call_lock); 544 } 545 546 rxrpc_cleanup_call(call); 547 } 548 } 549 550 /* 551 * Final call destruction under RCU. 552 */ 553 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 554 { 555 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 556 struct rxrpc_net *rxnet = call->rxnet; 557 558 rxrpc_put_peer(call->peer); 559 kfree(call->rxtx_buffer); 560 kfree(call->rxtx_annotations); 561 kmem_cache_free(rxrpc_call_jar, call); 562 if (atomic_dec_and_test(&rxnet->nr_calls)) 563 wake_up_var(&rxnet->nr_calls); 564 } 565 566 /* 567 * clean up a call 568 */ 569 void rxrpc_cleanup_call(struct rxrpc_call *call) 570 { 571 int i; 572 573 _net("DESTROY CALL %d", call->debug_id); 574 575 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 576 577 del_timer_sync(&call->timer); 578 579 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 580 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 581 ASSERTCMP(call->conn, ==, NULL); 582 583 /* Clean up the Rx/Tx buffer */ 584 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) 585 rxrpc_free_skb(call->rxtx_buffer[i], 586 (call->tx_phase ? rxrpc_skb_tx_cleaned : 587 rxrpc_skb_rx_cleaned)); 588 589 rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); 590 591 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 592 } 593 594 /* 595 * Make sure that all calls are gone from a network namespace. To reach this 596 * point, any open UDP sockets in that namespace must have been closed, so any 597 * outstanding calls cannot be doing I/O. 598 */ 599 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) 600 { 601 struct rxrpc_call *call; 602 603 _enter(""); 604 605 if (!list_empty(&rxnet->calls)) { 606 write_lock(&rxnet->call_lock); 607 608 while (!list_empty(&rxnet->calls)) { 609 call = list_entry(rxnet->calls.next, 610 struct rxrpc_call, link); 611 _debug("Zapping call %p", call); 612 613 rxrpc_see_call(call); 614 list_del_init(&call->link); 615 616 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", 617 call, atomic_read(&call->usage), 618 rxrpc_call_states[call->state], 619 call->flags, call->events); 620 621 write_unlock(&rxnet->call_lock); 622 cond_resched(); 623 write_lock(&rxnet->call_lock); 624 } 625 626 write_unlock(&rxnet->call_lock); 627 } 628 629 atomic_dec(&rxnet->nr_calls); 630 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls)); 631 } 632