1 /* RxRPC individual remote procedure call handling 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/circ_buf.h> 17 #include <linux/spinlock_types.h> 18 #include <net/sock.h> 19 #include <net/af_rxrpc.h> 20 #include "ar-internal.h" 21 22 /* 23 * Maximum lifetime of a call (in jiffies). 24 */ 25 unsigned int rxrpc_max_call_lifetime = 60 * HZ; 26 27 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { 28 [RXRPC_CALL_UNINITIALISED] = "Uninit ", 29 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", 30 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", 31 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", 32 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 33 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc", 34 [RXRPC_CALL_SERVER_SECURING] = "SvSecure", 35 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", 36 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", 37 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", 38 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", 39 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", 40 [RXRPC_CALL_COMPLETE] = "Complete", 41 }; 42 43 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = { 44 [RXRPC_CALL_SUCCEEDED] = "Complete", 45 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", 46 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", 47 [RXRPC_CALL_LOCAL_ERROR] = "LocError", 48 [RXRPC_CALL_NETWORK_ERROR] = "NetError", 49 }; 50 51 const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = { 52 [rxrpc_call_new_client] = "NWc", 53 [rxrpc_call_new_service] = "NWs", 54 [rxrpc_call_queued] = "QUE", 55 [rxrpc_call_queued_ref] = "QUR", 56 [rxrpc_call_seen] = "SEE", 57 [rxrpc_call_got] = "GOT", 58 [rxrpc_call_got_userid] = "Gus", 59 [rxrpc_call_got_kernel] = "Gke", 60 [rxrpc_call_put] = "PUT", 61 [rxrpc_call_put_userid] = "Pus", 62 [rxrpc_call_put_kernel] = "Pke", 63 [rxrpc_call_put_noqueue] = "PNQ", 64 }; 65 66 struct kmem_cache *rxrpc_call_jar; 67 LIST_HEAD(rxrpc_calls); 68 DEFINE_RWLOCK(rxrpc_call_lock); 69 70 static void rxrpc_call_timer_expired(unsigned long _call) 71 { 72 struct rxrpc_call *call = (struct rxrpc_call *)_call; 73 74 _enter("%d", call->debug_id); 75 76 if (call->state < RXRPC_CALL_COMPLETE) 77 rxrpc_queue_call(call); 78 } 79 80 /* 81 * find an extant server call 82 * - called in process context with IRQs enabled 83 */ 84 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, 85 unsigned long user_call_ID) 86 { 87 struct rxrpc_call *call; 88 struct rb_node *p; 89 90 _enter("%p,%lx", rx, user_call_ID); 91 92 read_lock(&rx->call_lock); 93 94 p = rx->calls.rb_node; 95 while (p) { 96 call = rb_entry(p, struct rxrpc_call, sock_node); 97 98 if (user_call_ID < call->user_call_ID) 99 p = p->rb_left; 100 else if (user_call_ID > call->user_call_ID) 101 p = p->rb_right; 102 else 103 goto found_extant_call; 104 } 105 106 read_unlock(&rx->call_lock); 107 _leave(" = NULL"); 108 return NULL; 109 110 found_extant_call: 111 rxrpc_get_call(call, rxrpc_call_got); 112 read_unlock(&rx->call_lock); 113 _leave(" = %p [%d]", call, atomic_read(&call->usage)); 114 return call; 115 } 116 117 /* 118 * allocate a new call 119 */ 120 struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) 121 { 122 struct rxrpc_call *call; 123 124 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); 125 if (!call) 126 return NULL; 127 128 call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE, 129 sizeof(struct sk_buff *), 130 gfp); 131 if (!call->rxtx_buffer) 132 goto nomem; 133 134 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp); 135 if (!call->rxtx_annotations) 136 goto nomem_2; 137 138 setup_timer(&call->timer, rxrpc_call_timer_expired, 139 (unsigned long)call); 140 INIT_WORK(&call->processor, &rxrpc_process_call); 141 INIT_LIST_HEAD(&call->link); 142 INIT_LIST_HEAD(&call->chan_wait_link); 143 INIT_LIST_HEAD(&call->accept_link); 144 INIT_LIST_HEAD(&call->recvmsg_link); 145 INIT_LIST_HEAD(&call->sock_link); 146 init_waitqueue_head(&call->waitq); 147 spin_lock_init(&call->lock); 148 rwlock_init(&call->state_lock); 149 atomic_set(&call->usage, 1); 150 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 151 152 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 153 154 /* Leave space in the ring to handle a maxed-out jumbo packet */ 155 call->rx_winsize = rxrpc_rx_window_size; 156 call->tx_winsize = 16; 157 call->rx_expect_next = 1; 158 return call; 159 160 nomem_2: 161 kfree(call->rxtx_buffer); 162 nomem: 163 kmem_cache_free(rxrpc_call_jar, call); 164 return NULL; 165 } 166 167 /* 168 * Allocate a new client call. 169 */ 170 static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, 171 gfp_t gfp) 172 { 173 struct rxrpc_call *call; 174 175 _enter(""); 176 177 call = rxrpc_alloc_call(gfp); 178 if (!call) 179 return ERR_PTR(-ENOMEM); 180 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; 181 call->service_id = srx->srx_service; 182 183 _leave(" = %p", call); 184 return call; 185 } 186 187 /* 188 * Initiate the call ack/resend/expiry timer. 189 */ 190 static void rxrpc_start_call_timer(struct rxrpc_call *call) 191 { 192 unsigned long expire_at; 193 194 expire_at = jiffies + rxrpc_max_call_lifetime; 195 call->expire_at = expire_at; 196 call->ack_at = expire_at; 197 call->resend_at = expire_at; 198 call->timer.expires = expire_at; 199 add_timer(&call->timer); 200 } 201 202 /* 203 * set up a call for the given data 204 * - called in process context with IRQs enabled 205 */ 206 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 207 struct rxrpc_conn_parameters *cp, 208 struct sockaddr_rxrpc *srx, 209 unsigned long user_call_ID, 210 gfp_t gfp) 211 { 212 struct rxrpc_call *call, *xcall; 213 struct rb_node *parent, **pp; 214 const void *here = __builtin_return_address(0); 215 int ret; 216 217 _enter("%p,%lx", rx, user_call_ID); 218 219 call = rxrpc_alloc_client_call(srx, gfp); 220 if (IS_ERR(call)) { 221 _leave(" = %ld", PTR_ERR(call)); 222 return call; 223 } 224 225 trace_rxrpc_call(call, 0, atomic_read(&call->usage), here, 226 (const void *)user_call_ID); 227 228 /* Publish the call, even though it is incompletely set up as yet */ 229 call->user_call_ID = user_call_ID; 230 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 231 232 write_lock(&rx->call_lock); 233 234 pp = &rx->calls.rb_node; 235 parent = NULL; 236 while (*pp) { 237 parent = *pp; 238 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 239 240 if (user_call_ID < xcall->user_call_ID) 241 pp = &(*pp)->rb_left; 242 else if (user_call_ID > xcall->user_call_ID) 243 pp = &(*pp)->rb_right; 244 else 245 goto found_user_ID_now_present; 246 } 247 248 rcu_assign_pointer(call->socket, rx); 249 rxrpc_get_call(call, rxrpc_call_got_userid); 250 rb_link_node(&call->sock_node, parent, pp); 251 rb_insert_color(&call->sock_node, &rx->calls); 252 list_add(&call->sock_link, &rx->sock_calls); 253 254 write_unlock(&rx->call_lock); 255 256 write_lock(&rxrpc_call_lock); 257 list_add_tail(&call->link, &rxrpc_calls); 258 write_unlock(&rxrpc_call_lock); 259 260 /* Set up or get a connection record and set the protocol parameters, 261 * including channel number and call ID. 262 */ 263 ret = rxrpc_connect_call(call, cp, srx, gfp); 264 if (ret < 0) 265 goto error; 266 267 spin_lock_bh(&call->conn->params.peer->lock); 268 hlist_add_head(&call->error_link, 269 &call->conn->params.peer->error_targets); 270 spin_unlock_bh(&call->conn->params.peer->lock); 271 272 rxrpc_start_call_timer(call); 273 274 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); 275 276 _leave(" = %p [new]", call); 277 return call; 278 279 error: 280 write_lock(&rx->call_lock); 281 rb_erase(&call->sock_node, &rx->calls); 282 write_unlock(&rx->call_lock); 283 rxrpc_put_call(call, rxrpc_call_put_userid); 284 285 write_lock(&rxrpc_call_lock); 286 list_del_init(&call->link); 287 write_unlock(&rxrpc_call_lock); 288 289 error_out: 290 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 291 RX_CALL_DEAD, ret); 292 set_bit(RXRPC_CALL_RELEASED, &call->flags); 293 rxrpc_put_call(call, rxrpc_call_put); 294 _leave(" = %d", ret); 295 return ERR_PTR(ret); 296 297 /* We unexpectedly found the user ID in the list after taking 298 * the call_lock. This shouldn't happen unless the user races 299 * with itself and tries to add the same user ID twice at the 300 * same time in different threads. 301 */ 302 found_user_ID_now_present: 303 write_unlock(&rx->call_lock); 304 ret = -EEXIST; 305 goto error_out; 306 } 307 308 /* 309 * Set up an incoming call. call->conn points to the connection. 310 * This is called in BH context and isn't allowed to fail. 311 */ 312 void rxrpc_incoming_call(struct rxrpc_sock *rx, 313 struct rxrpc_call *call, 314 struct sk_buff *skb) 315 { 316 struct rxrpc_connection *conn = call->conn; 317 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 318 u32 chan; 319 320 _enter(",%d", call->conn->debug_id); 321 322 rcu_assign_pointer(call->socket, rx); 323 call->call_id = sp->hdr.callNumber; 324 call->service_id = sp->hdr.serviceId; 325 call->cid = sp->hdr.cid; 326 call->state = RXRPC_CALL_SERVER_ACCEPTING; 327 if (sp->hdr.securityIndex > 0) 328 call->state = RXRPC_CALL_SERVER_SECURING; 329 330 /* Set the channel for this call. We don't get channel_lock as we're 331 * only defending against the data_ready handler (which we're called 332 * from) and the RESPONSE packet parser (which is only really 333 * interested in call_counter and can cope with a disagreement with the 334 * call pointer). 335 */ 336 chan = sp->hdr.cid & RXRPC_CHANNELMASK; 337 conn->channels[chan].call_counter = call->call_id; 338 conn->channels[chan].call_id = call->call_id; 339 rcu_assign_pointer(conn->channels[chan].call, call); 340 341 spin_lock(&conn->params.peer->lock); 342 hlist_add_head(&call->error_link, &conn->params.peer->error_targets); 343 spin_unlock(&conn->params.peer->lock); 344 345 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); 346 347 rxrpc_start_call_timer(call); 348 _leave(""); 349 } 350 351 /* 352 * Queue a call's work processor, getting a ref to pass to the work queue. 353 */ 354 bool rxrpc_queue_call(struct rxrpc_call *call) 355 { 356 const void *here = __builtin_return_address(0); 357 int n = __atomic_add_unless(&call->usage, 1, 0); 358 if (n == 0) 359 return false; 360 if (rxrpc_queue_work(&call->processor)) 361 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL); 362 else 363 rxrpc_put_call(call, rxrpc_call_put_noqueue); 364 return true; 365 } 366 367 /* 368 * Queue a call's work processor, passing the callers ref to the work queue. 369 */ 370 bool __rxrpc_queue_call(struct rxrpc_call *call) 371 { 372 const void *here = __builtin_return_address(0); 373 int n = atomic_read(&call->usage); 374 ASSERTCMP(n, >=, 1); 375 if (rxrpc_queue_work(&call->processor)) 376 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL); 377 else 378 rxrpc_put_call(call, rxrpc_call_put_noqueue); 379 return true; 380 } 381 382 /* 383 * Note the re-emergence of a call. 384 */ 385 void rxrpc_see_call(struct rxrpc_call *call) 386 { 387 const void *here = __builtin_return_address(0); 388 if (call) { 389 int n = atomic_read(&call->usage); 390 391 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL); 392 } 393 } 394 395 /* 396 * Note the addition of a ref on a call. 397 */ 398 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) 399 { 400 const void *here = __builtin_return_address(0); 401 int n = atomic_inc_return(&call->usage); 402 403 trace_rxrpc_call(call, op, n, here, NULL); 404 } 405 406 /* 407 * Detach a call from its owning socket. 408 */ 409 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) 410 { 411 struct rxrpc_connection *conn = call->conn; 412 bool put = false; 413 int i; 414 415 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 416 417 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 418 419 rxrpc_see_call(call); 420 421 spin_lock_bh(&call->lock); 422 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) 423 BUG(); 424 spin_unlock_bh(&call->lock); 425 426 del_timer_sync(&call->timer); 427 428 /* Make sure we don't get any more notifications */ 429 write_lock_bh(&rx->recvmsg_lock); 430 431 if (!list_empty(&call->recvmsg_link)) { 432 _debug("unlinking once-pending call %p { e=%lx f=%lx }", 433 call, call->events, call->flags); 434 list_del(&call->recvmsg_link); 435 put = true; 436 } 437 438 /* list_empty() must return false in rxrpc_notify_socket() */ 439 call->recvmsg_link.next = NULL; 440 call->recvmsg_link.prev = NULL; 441 442 write_unlock_bh(&rx->recvmsg_lock); 443 if (put) 444 rxrpc_put_call(call, rxrpc_call_put); 445 446 write_lock(&rx->call_lock); 447 448 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 449 rb_erase(&call->sock_node, &rx->calls); 450 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); 451 rxrpc_put_call(call, rxrpc_call_put_userid); 452 } 453 454 list_del(&call->sock_link); 455 write_unlock(&rx->call_lock); 456 457 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); 458 459 if (conn) 460 rxrpc_disconnect_call(call); 461 462 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { 463 rxrpc_free_skb(call->rxtx_buffer[i]); 464 call->rxtx_buffer[i] = NULL; 465 } 466 467 _leave(""); 468 } 469 470 /* 471 * release all the calls associated with a socket 472 */ 473 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) 474 { 475 struct rxrpc_call *call; 476 477 _enter("%p", rx); 478 479 while (!list_empty(&rx->sock_calls)) { 480 call = list_entry(rx->sock_calls.next, 481 struct rxrpc_call, sock_link); 482 rxrpc_get_call(call, rxrpc_call_got); 483 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET); 484 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT); 485 rxrpc_release_call(rx, call); 486 rxrpc_put_call(call, rxrpc_call_put); 487 } 488 489 _leave(""); 490 } 491 492 /* 493 * release a call 494 */ 495 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) 496 { 497 const void *here = __builtin_return_address(0); 498 int n; 499 500 ASSERT(call != NULL); 501 502 n = atomic_dec_return(&call->usage); 503 trace_rxrpc_call(call, op, n, here, NULL); 504 ASSERTCMP(n, >=, 0); 505 if (n == 0) { 506 _debug("call %d dead", call->debug_id); 507 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 508 509 write_lock(&rxrpc_call_lock); 510 list_del_init(&call->link); 511 write_unlock(&rxrpc_call_lock); 512 513 rxrpc_cleanup_call(call); 514 } 515 } 516 517 /* 518 * Final call destruction under RCU. 519 */ 520 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 521 { 522 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 523 524 rxrpc_put_peer(call->peer); 525 kfree(call->rxtx_buffer); 526 kfree(call->rxtx_annotations); 527 kmem_cache_free(rxrpc_call_jar, call); 528 } 529 530 /* 531 * clean up a call 532 */ 533 void rxrpc_cleanup_call(struct rxrpc_call *call) 534 { 535 int i; 536 537 _net("DESTROY CALL %d", call->debug_id); 538 539 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 540 541 del_timer_sync(&call->timer); 542 543 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 544 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 545 ASSERTCMP(call->conn, ==, NULL); 546 547 /* Clean up the Rx/Tx buffer */ 548 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) 549 rxrpc_free_skb(call->rxtx_buffer[i]); 550 551 rxrpc_free_skb(call->tx_pending); 552 553 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 554 } 555 556 /* 557 * Make sure that all calls are gone. 558 */ 559 void __exit rxrpc_destroy_all_calls(void) 560 { 561 struct rxrpc_call *call; 562 563 _enter(""); 564 565 if (list_empty(&rxrpc_calls)) 566 return; 567 568 write_lock(&rxrpc_call_lock); 569 570 while (!list_empty(&rxrpc_calls)) { 571 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); 572 _debug("Zapping call %p", call); 573 574 rxrpc_see_call(call); 575 list_del_init(&call->link); 576 577 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", 578 call, atomic_read(&call->usage), 579 rxrpc_call_states[call->state], 580 call->flags, call->events); 581 582 write_unlock(&rxrpc_call_lock); 583 cond_resched(); 584 write_lock(&rxrpc_call_lock); 585 } 586 587 write_unlock(&rxrpc_call_lock); 588 } 589