1 /* RxRPC individual remote procedure call handling 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/circ_buf.h> 17 #include <linux/spinlock_types.h> 18 #include <net/sock.h> 19 #include <net/af_rxrpc.h> 20 #include "ar-internal.h" 21 22 /* 23 * Maximum lifetime of a call (in jiffies). 24 */ 25 unsigned int rxrpc_max_call_lifetime = 60 * HZ; 26 27 /* 28 * Time till dead call expires after last use (in jiffies). 29 */ 30 unsigned int rxrpc_dead_call_expiry = 2 * HZ; 31 32 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { 33 [RXRPC_CALL_UNINITIALISED] = "Uninit ", 34 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", 35 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", 36 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", 37 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 38 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK", 39 [RXRPC_CALL_SERVER_SECURING] = "SvSecure", 40 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", 41 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", 42 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", 43 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", 44 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", 45 [RXRPC_CALL_COMPLETE] = "Complete", 46 [RXRPC_CALL_DEAD] = "Dead ", 47 }; 48 49 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = { 50 [RXRPC_CALL_SUCCEEDED] = "Complete", 51 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ", 52 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", 53 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", 54 [RXRPC_CALL_LOCAL_ERROR] = "LocError", 55 [RXRPC_CALL_NETWORK_ERROR] = "NetError", 56 }; 57 58 struct kmem_cache *rxrpc_call_jar; 59 LIST_HEAD(rxrpc_calls); 60 DEFINE_RWLOCK(rxrpc_call_lock); 61 62 static void rxrpc_destroy_call(struct work_struct *work); 63 static void rxrpc_call_life_expired(unsigned long _call); 64 static void rxrpc_dead_call_expired(unsigned long _call); 65 static void rxrpc_ack_time_expired(unsigned long _call); 66 static void rxrpc_resend_time_expired(unsigned long _call); 67 68 /* 69 * find an extant server call 70 * - called in process context with IRQs enabled 71 */ 72 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, 73 unsigned long user_call_ID) 74 { 75 struct rxrpc_call *call; 76 struct rb_node *p; 77 78 _enter("%p,%lx", rx, user_call_ID); 79 80 read_lock(&rx->call_lock); 81 82 p = rx->calls.rb_node; 83 while (p) { 84 call = rb_entry(p, struct rxrpc_call, sock_node); 85 86 if (user_call_ID < call->user_call_ID) 87 p = p->rb_left; 88 else if (user_call_ID > call->user_call_ID) 89 p = p->rb_right; 90 else 91 goto found_extant_call; 92 } 93 94 read_unlock(&rx->call_lock); 95 _leave(" = NULL"); 96 return NULL; 97 98 found_extant_call: 99 rxrpc_get_call(call); 100 read_unlock(&rx->call_lock); 101 _leave(" = %p [%d]", call, atomic_read(&call->usage)); 102 return call; 103 } 104 105 /* 106 * allocate a new call 107 */ 108 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) 109 { 110 struct rxrpc_call *call; 111 112 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); 113 if (!call) 114 return NULL; 115 116 call->acks_winsz = 16; 117 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long), 118 gfp); 119 if (!call->acks_window) { 120 kmem_cache_free(rxrpc_call_jar, call); 121 return NULL; 122 } 123 124 setup_timer(&call->lifetimer, &rxrpc_call_life_expired, 125 (unsigned long) call); 126 setup_timer(&call->deadspan, &rxrpc_dead_call_expired, 127 (unsigned long) call); 128 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired, 129 (unsigned long) call); 130 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired, 131 (unsigned long) call); 132 INIT_WORK(&call->destroyer, &rxrpc_destroy_call); 133 INIT_WORK(&call->processor, &rxrpc_process_call); 134 INIT_LIST_HEAD(&call->link); 135 INIT_LIST_HEAD(&call->chan_wait_link); 136 INIT_LIST_HEAD(&call->accept_link); 137 skb_queue_head_init(&call->rx_queue); 138 skb_queue_head_init(&call->rx_oos_queue); 139 init_waitqueue_head(&call->waitq); 140 spin_lock_init(&call->lock); 141 rwlock_init(&call->state_lock); 142 atomic_set(&call->usage, 1); 143 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 144 145 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 146 147 call->rx_data_expect = 1; 148 call->rx_data_eaten = 0; 149 call->rx_first_oos = 0; 150 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size; 151 call->creation_jif = jiffies; 152 return call; 153 } 154 155 /* 156 * Allocate a new client call. 157 */ 158 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, 159 struct sockaddr_rxrpc *srx, 160 gfp_t gfp) 161 { 162 struct rxrpc_call *call; 163 164 _enter(""); 165 166 ASSERT(rx->local != NULL); 167 168 call = rxrpc_alloc_call(gfp); 169 if (!call) 170 return ERR_PTR(-ENOMEM); 171 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; 172 173 sock_hold(&rx->sk); 174 call->socket = rx; 175 call->rx_data_post = 1; 176 call->service_id = srx->srx_service; 177 178 _leave(" = %p", call); 179 return call; 180 } 181 182 /* 183 * Begin client call. 184 */ 185 static int rxrpc_begin_client_call(struct rxrpc_call *call, 186 struct rxrpc_conn_parameters *cp, 187 struct sockaddr_rxrpc *srx, 188 gfp_t gfp) 189 { 190 int ret; 191 192 /* Set up or get a connection record and set the protocol parameters, 193 * including channel number and call ID. 194 */ 195 ret = rxrpc_connect_call(call, cp, srx, gfp); 196 if (ret < 0) 197 return ret; 198 199 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 200 201 spin_lock(&call->conn->params.peer->lock); 202 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets); 203 spin_unlock(&call->conn->params.peer->lock); 204 205 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; 206 add_timer(&call->lifetimer); 207 return 0; 208 } 209 210 /* 211 * set up a call for the given data 212 * - called in process context with IRQs enabled 213 */ 214 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 215 struct rxrpc_conn_parameters *cp, 216 struct sockaddr_rxrpc *srx, 217 unsigned long user_call_ID, 218 gfp_t gfp) 219 { 220 struct rxrpc_call *call, *xcall; 221 struct rb_node *parent, **pp; 222 int ret; 223 224 _enter("%p,%lx", rx, user_call_ID); 225 226 call = rxrpc_alloc_client_call(rx, srx, gfp); 227 if (IS_ERR(call)) { 228 _leave(" = %ld", PTR_ERR(call)); 229 return call; 230 } 231 232 /* Publish the call, even though it is incompletely set up as yet */ 233 call->user_call_ID = user_call_ID; 234 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 235 236 write_lock(&rx->call_lock); 237 238 pp = &rx->calls.rb_node; 239 parent = NULL; 240 while (*pp) { 241 parent = *pp; 242 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 243 244 if (user_call_ID < xcall->user_call_ID) 245 pp = &(*pp)->rb_left; 246 else if (user_call_ID > xcall->user_call_ID) 247 pp = &(*pp)->rb_right; 248 else 249 goto found_user_ID_now_present; 250 } 251 252 rxrpc_get_call(call); 253 254 rb_link_node(&call->sock_node, parent, pp); 255 rb_insert_color(&call->sock_node, &rx->calls); 256 write_unlock(&rx->call_lock); 257 258 write_lock_bh(&rxrpc_call_lock); 259 list_add_tail(&call->link, &rxrpc_calls); 260 write_unlock_bh(&rxrpc_call_lock); 261 262 ret = rxrpc_begin_client_call(call, cp, srx, gfp); 263 if (ret < 0) 264 goto error; 265 266 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); 267 268 _leave(" = %p [new]", call); 269 return call; 270 271 error: 272 write_lock(&rx->call_lock); 273 rb_erase(&call->sock_node, &rx->calls); 274 write_unlock(&rx->call_lock); 275 rxrpc_put_call(call); 276 277 write_lock_bh(&rxrpc_call_lock); 278 list_del_init(&call->link); 279 write_unlock_bh(&rxrpc_call_lock); 280 281 set_bit(RXRPC_CALL_RELEASED, &call->flags); 282 call->state = RXRPC_CALL_DEAD; 283 rxrpc_put_call(call); 284 _leave(" = %d", ret); 285 return ERR_PTR(ret); 286 287 /* We unexpectedly found the user ID in the list after taking 288 * the call_lock. This shouldn't happen unless the user races 289 * with itself and tries to add the same user ID twice at the 290 * same time in different threads. 291 */ 292 found_user_ID_now_present: 293 write_unlock(&rx->call_lock); 294 set_bit(RXRPC_CALL_RELEASED, &call->flags); 295 call->state = RXRPC_CALL_DEAD; 296 rxrpc_put_call(call); 297 _leave(" = -EEXIST [%p]", call); 298 return ERR_PTR(-EEXIST); 299 } 300 301 /* 302 * set up an incoming call 303 * - called in process context with IRQs enabled 304 */ 305 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, 306 struct rxrpc_connection *conn, 307 struct sk_buff *skb) 308 { 309 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 310 struct rxrpc_call *call, *candidate; 311 u32 call_id, chan; 312 313 _enter(",%d", conn->debug_id); 314 315 ASSERT(rx != NULL); 316 317 candidate = rxrpc_alloc_call(GFP_NOIO); 318 if (!candidate) 319 return ERR_PTR(-EBUSY); 320 321 chan = sp->hdr.cid & RXRPC_CHANNELMASK; 322 candidate->socket = rx; 323 candidate->conn = conn; 324 candidate->peer = conn->params.peer; 325 candidate->cid = sp->hdr.cid; 326 candidate->call_id = sp->hdr.callNumber; 327 candidate->rx_data_post = 0; 328 candidate->state = RXRPC_CALL_SERVER_ACCEPTING; 329 candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE); 330 if (conn->security_ix > 0) 331 candidate->state = RXRPC_CALL_SERVER_SECURING; 332 333 spin_lock(&conn->channel_lock); 334 335 /* set the channel for this call */ 336 call = rcu_dereference_protected(conn->channels[chan].call, 337 lockdep_is_held(&conn->channel_lock)); 338 339 _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call); 340 if (call && call->call_id == sp->hdr.callNumber) { 341 /* already set; must've been a duplicate packet */ 342 _debug("extant call [%d]", call->state); 343 ASSERTCMP(call->conn, ==, conn); 344 345 read_lock(&call->state_lock); 346 switch (call->state) { 347 case RXRPC_CALL_LOCALLY_ABORTED: 348 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) 349 rxrpc_queue_call(call); 350 case RXRPC_CALL_REMOTELY_ABORTED: 351 read_unlock(&call->state_lock); 352 goto aborted_call; 353 default: 354 rxrpc_get_call(call); 355 read_unlock(&call->state_lock); 356 goto extant_call; 357 } 358 } 359 360 if (call) { 361 /* it seems the channel is still in use from the previous call 362 * - ditch the old binding if its call is now complete */ 363 _debug("CALL: %u { %s }", 364 call->debug_id, rxrpc_call_states[call->state]); 365 366 if (call->state == RXRPC_CALL_COMPLETE) { 367 __rxrpc_disconnect_call(conn, call); 368 } else { 369 spin_unlock(&conn->channel_lock); 370 kmem_cache_free(rxrpc_call_jar, candidate); 371 _leave(" = -EBUSY"); 372 return ERR_PTR(-EBUSY); 373 } 374 } 375 376 /* check the call number isn't duplicate */ 377 _debug("check dup"); 378 call_id = sp->hdr.callNumber; 379 380 /* We just ignore calls prior to the current call ID. Terminated calls 381 * are handled via the connection. 382 */ 383 if (call_id <= conn->channels[chan].call_counter) 384 goto old_call; /* TODO: Just drop packet */ 385 386 /* make the call available */ 387 _debug("new call"); 388 call = candidate; 389 candidate = NULL; 390 conn->channels[chan].call_counter = call_id; 391 rcu_assign_pointer(conn->channels[chan].call, call); 392 sock_hold(&rx->sk); 393 rxrpc_get_connection(conn); 394 rxrpc_get_peer(call->peer); 395 spin_unlock(&conn->channel_lock); 396 397 spin_lock(&conn->params.peer->lock); 398 hlist_add_head(&call->error_link, &conn->params.peer->error_targets); 399 spin_unlock(&conn->params.peer->lock); 400 401 write_lock_bh(&rxrpc_call_lock); 402 list_add_tail(&call->link, &rxrpc_calls); 403 write_unlock_bh(&rxrpc_call_lock); 404 405 call->service_id = conn->params.service_id; 406 407 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); 408 409 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; 410 add_timer(&call->lifetimer); 411 _leave(" = %p {%d} [new]", call, call->debug_id); 412 return call; 413 414 extant_call: 415 spin_unlock(&conn->channel_lock); 416 kmem_cache_free(rxrpc_call_jar, candidate); 417 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1); 418 return call; 419 420 aborted_call: 421 spin_unlock(&conn->channel_lock); 422 kmem_cache_free(rxrpc_call_jar, candidate); 423 _leave(" = -ECONNABORTED"); 424 return ERR_PTR(-ECONNABORTED); 425 426 old_call: 427 spin_unlock(&conn->channel_lock); 428 kmem_cache_free(rxrpc_call_jar, candidate); 429 _leave(" = -ECONNRESET [old]"); 430 return ERR_PTR(-ECONNRESET); 431 } 432 433 /* 434 * detach a call from a socket and set up for release 435 */ 436 void rxrpc_release_call(struct rxrpc_call *call) 437 { 438 struct rxrpc_connection *conn = call->conn; 439 struct rxrpc_sock *rx = call->socket; 440 441 _enter("{%d,%d,%d,%d}", 442 call->debug_id, atomic_read(&call->usage), 443 atomic_read(&call->ackr_not_idle), 444 call->rx_first_oos); 445 446 spin_lock_bh(&call->lock); 447 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) 448 BUG(); 449 spin_unlock_bh(&call->lock); 450 451 /* dissociate from the socket 452 * - the socket's ref on the call is passed to the death timer 453 */ 454 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); 455 456 spin_lock(&conn->params.peer->lock); 457 hlist_del_init(&call->error_link); 458 spin_unlock(&conn->params.peer->lock); 459 460 write_lock_bh(&rx->call_lock); 461 if (!list_empty(&call->accept_link)) { 462 _debug("unlinking once-pending call %p { e=%lx f=%lx }", 463 call, call->events, call->flags); 464 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); 465 list_del_init(&call->accept_link); 466 sk_acceptq_removed(&rx->sk); 467 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 468 rb_erase(&call->sock_node, &rx->calls); 469 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); 470 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); 471 } 472 write_unlock_bh(&rx->call_lock); 473 474 /* free up the channel for reuse */ 475 write_lock_bh(&call->state_lock); 476 477 if (call->state < RXRPC_CALL_COMPLETE && 478 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { 479 _debug("+++ ABORTING STATE %d +++\n", call->state); 480 __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET); 481 } 482 write_unlock_bh(&call->state_lock); 483 484 rxrpc_disconnect_call(call); 485 486 /* clean up the Rx queue */ 487 if (!skb_queue_empty(&call->rx_queue) || 488 !skb_queue_empty(&call->rx_oos_queue)) { 489 struct rxrpc_skb_priv *sp; 490 struct sk_buff *skb; 491 492 _debug("purge Rx queues"); 493 494 spin_lock_bh(&call->lock); 495 while ((skb = skb_dequeue(&call->rx_queue)) || 496 (skb = skb_dequeue(&call->rx_oos_queue))) { 497 spin_unlock_bh(&call->lock); 498 499 sp = rxrpc_skb(skb); 500 _debug("- zap %s %%%u #%u", 501 rxrpc_pkts[sp->hdr.type], 502 sp->hdr.serial, sp->hdr.seq); 503 rxrpc_free_skb(skb); 504 spin_lock_bh(&call->lock); 505 } 506 spin_unlock_bh(&call->lock); 507 508 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE); 509 } 510 511 del_timer_sync(&call->resend_timer); 512 del_timer_sync(&call->ack_timer); 513 del_timer_sync(&call->lifetimer); 514 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry; 515 add_timer(&call->deadspan); 516 517 _leave(""); 518 } 519 520 /* 521 * handle a dead call being ready for reaping 522 */ 523 static void rxrpc_dead_call_expired(unsigned long _call) 524 { 525 struct rxrpc_call *call = (struct rxrpc_call *) _call; 526 527 _enter("{%d}", call->debug_id); 528 529 write_lock_bh(&call->state_lock); 530 call->state = RXRPC_CALL_DEAD; 531 write_unlock_bh(&call->state_lock); 532 rxrpc_put_call(call); 533 } 534 535 /* 536 * mark a call as to be released, aborting it if it's still in progress 537 * - called with softirqs disabled 538 */ 539 static void rxrpc_mark_call_released(struct rxrpc_call *call) 540 { 541 bool sched; 542 543 write_lock(&call->state_lock); 544 if (call->state < RXRPC_CALL_DEAD) { 545 sched = __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET); 546 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) 547 sched = true; 548 } 549 write_unlock(&call->state_lock); 550 if (sched) 551 rxrpc_queue_call(call); 552 } 553 554 /* 555 * release all the calls associated with a socket 556 */ 557 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) 558 { 559 struct rxrpc_call *call; 560 struct rb_node *p; 561 562 _enter("%p", rx); 563 564 read_lock_bh(&rx->call_lock); 565 566 /* kill the not-yet-accepted incoming calls */ 567 list_for_each_entry(call, &rx->secureq, accept_link) { 568 rxrpc_mark_call_released(call); 569 } 570 571 list_for_each_entry(call, &rx->acceptq, accept_link) { 572 rxrpc_mark_call_released(call); 573 } 574 575 /* mark all the calls as no longer wanting incoming packets */ 576 for (p = rb_first(&rx->calls); p; p = rb_next(p)) { 577 call = rb_entry(p, struct rxrpc_call, sock_node); 578 rxrpc_mark_call_released(call); 579 } 580 581 read_unlock_bh(&rx->call_lock); 582 _leave(""); 583 } 584 585 /* 586 * release a call 587 */ 588 void __rxrpc_put_call(struct rxrpc_call *call) 589 { 590 ASSERT(call != NULL); 591 592 _enter("%p{u=%d}", call, atomic_read(&call->usage)); 593 594 ASSERTCMP(atomic_read(&call->usage), >, 0); 595 596 if (atomic_dec_and_test(&call->usage)) { 597 _debug("call %d dead", call->debug_id); 598 WARN_ON(atomic_read(&call->skb_count) != 0); 599 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 600 rxrpc_queue_work(&call->destroyer); 601 } 602 _leave(""); 603 } 604 605 /* 606 * Final call destruction under RCU. 607 */ 608 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 609 { 610 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 611 612 rxrpc_purge_queue(&call->rx_queue); 613 rxrpc_put_peer(call->peer); 614 kmem_cache_free(rxrpc_call_jar, call); 615 } 616 617 /* 618 * clean up a call 619 */ 620 static void rxrpc_cleanup_call(struct rxrpc_call *call) 621 { 622 _net("DESTROY CALL %d", call->debug_id); 623 624 ASSERT(call->socket); 625 626 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 627 628 del_timer_sync(&call->lifetimer); 629 del_timer_sync(&call->deadspan); 630 del_timer_sync(&call->ack_timer); 631 del_timer_sync(&call->resend_timer); 632 633 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 634 ASSERTCMP(call->events, ==, 0); 635 if (work_pending(&call->processor)) { 636 _debug("defer destroy"); 637 rxrpc_queue_work(&call->destroyer); 638 return; 639 } 640 641 ASSERTCMP(call->conn, ==, NULL); 642 643 if (call->acks_window) { 644 _debug("kill Tx window %d", 645 CIRC_CNT(call->acks_head, call->acks_tail, 646 call->acks_winsz)); 647 smp_mb(); 648 while (CIRC_CNT(call->acks_head, call->acks_tail, 649 call->acks_winsz) > 0) { 650 struct rxrpc_skb_priv *sp; 651 unsigned long _skb; 652 653 _skb = call->acks_window[call->acks_tail] & ~1; 654 sp = rxrpc_skb((struct sk_buff *)_skb); 655 _debug("+++ clear Tx %u", sp->hdr.seq); 656 rxrpc_free_skb((struct sk_buff *)_skb); 657 call->acks_tail = 658 (call->acks_tail + 1) & (call->acks_winsz - 1); 659 } 660 661 kfree(call->acks_window); 662 } 663 664 rxrpc_free_skb(call->tx_pending); 665 666 rxrpc_purge_queue(&call->rx_queue); 667 ASSERT(skb_queue_empty(&call->rx_oos_queue)); 668 sock_put(&call->socket->sk); 669 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 670 } 671 672 /* 673 * destroy a call 674 */ 675 static void rxrpc_destroy_call(struct work_struct *work) 676 { 677 struct rxrpc_call *call = 678 container_of(work, struct rxrpc_call, destroyer); 679 680 _enter("%p{%d,%x,%p}", 681 call, atomic_read(&call->usage), call->cid, call->conn); 682 683 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 684 685 write_lock_bh(&rxrpc_call_lock); 686 list_del_init(&call->link); 687 write_unlock_bh(&rxrpc_call_lock); 688 689 rxrpc_cleanup_call(call); 690 _leave(""); 691 } 692 693 /* 694 * preemptively destroy all the call records from a transport endpoint rather 695 * than waiting for them to time out 696 */ 697 void __exit rxrpc_destroy_all_calls(void) 698 { 699 struct rxrpc_call *call; 700 701 _enter(""); 702 write_lock_bh(&rxrpc_call_lock); 703 704 while (!list_empty(&rxrpc_calls)) { 705 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); 706 _debug("Zapping call %p", call); 707 708 list_del_init(&call->link); 709 710 switch (atomic_read(&call->usage)) { 711 case 0: 712 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 713 break; 714 case 1: 715 if (del_timer_sync(&call->deadspan) != 0 && 716 call->state != RXRPC_CALL_DEAD) 717 rxrpc_dead_call_expired((unsigned long) call); 718 if (call->state != RXRPC_CALL_DEAD) 719 break; 720 default: 721 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n", 722 call, atomic_read(&call->usage), 723 atomic_read(&call->ackr_not_idle), 724 rxrpc_call_states[call->state], 725 call->flags, call->events); 726 if (!skb_queue_empty(&call->rx_queue)) 727 pr_err("Rx queue occupied\n"); 728 if (!skb_queue_empty(&call->rx_oos_queue)) 729 pr_err("OOS queue occupied\n"); 730 break; 731 } 732 733 write_unlock_bh(&rxrpc_call_lock); 734 cond_resched(); 735 write_lock_bh(&rxrpc_call_lock); 736 } 737 738 write_unlock_bh(&rxrpc_call_lock); 739 _leave(""); 740 } 741 742 /* 743 * handle call lifetime being exceeded 744 */ 745 static void rxrpc_call_life_expired(unsigned long _call) 746 { 747 struct rxrpc_call *call = (struct rxrpc_call *) _call; 748 749 _enter("{%d}", call->debug_id); 750 751 if (call->state >= RXRPC_CALL_COMPLETE) 752 return; 753 754 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events); 755 rxrpc_queue_call(call); 756 } 757 758 /* 759 * handle resend timer expiry 760 * - may not take call->state_lock as this can deadlock against del_timer_sync() 761 */ 762 static void rxrpc_resend_time_expired(unsigned long _call) 763 { 764 struct rxrpc_call *call = (struct rxrpc_call *) _call; 765 766 _enter("{%d}", call->debug_id); 767 768 if (call->state >= RXRPC_CALL_COMPLETE) 769 return; 770 771 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 772 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events)) 773 rxrpc_queue_call(call); 774 } 775 776 /* 777 * handle ACK timer expiry 778 */ 779 static void rxrpc_ack_time_expired(unsigned long _call) 780 { 781 struct rxrpc_call *call = (struct rxrpc_call *) _call; 782 783 _enter("{%d}", call->debug_id); 784 785 if (call->state >= RXRPC_CALL_COMPLETE) 786 return; 787 788 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) 789 rxrpc_queue_call(call); 790 } 791