1 /* RxRPC individual remote procedure call handling 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/circ_buf.h> 17 #include <linux/spinlock_types.h> 18 #include <net/sock.h> 19 #include <net/af_rxrpc.h> 20 #include "ar-internal.h" 21 22 /* 23 * Maximum lifetime of a call (in jiffies). 24 */ 25 unsigned int rxrpc_max_call_lifetime = 60 * HZ; 26 27 /* 28 * Time till dead call expires after last use (in jiffies). 29 */ 30 unsigned int rxrpc_dead_call_expiry = 2 * HZ; 31 32 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { 33 [RXRPC_CALL_UNINITIALISED] = "Uninit", 34 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", 35 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", 36 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", 37 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 38 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK", 39 [RXRPC_CALL_SERVER_SECURING] = "SvSecure", 40 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", 41 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", 42 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", 43 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", 44 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", 45 [RXRPC_CALL_COMPLETE] = "Complete", 46 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ", 47 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", 48 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", 49 [RXRPC_CALL_NETWORK_ERROR] = "NetError", 50 [RXRPC_CALL_DEAD] = "Dead ", 51 }; 52 53 struct kmem_cache *rxrpc_call_jar; 54 LIST_HEAD(rxrpc_calls); 55 DEFINE_RWLOCK(rxrpc_call_lock); 56 57 static void rxrpc_destroy_call(struct work_struct *work); 58 static void rxrpc_call_life_expired(unsigned long _call); 59 static void rxrpc_dead_call_expired(unsigned long _call); 60 static void rxrpc_ack_time_expired(unsigned long _call); 61 static void rxrpc_resend_time_expired(unsigned long _call); 62 63 /* 64 * find an extant server call 65 * - called in process context with IRQs enabled 66 */ 67 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, 68 unsigned long user_call_ID) 69 { 70 struct rxrpc_call *call; 71 struct rb_node *p; 72 73 _enter("%p,%lx", rx, user_call_ID); 74 75 read_lock(&rx->call_lock); 76 77 p = rx->calls.rb_node; 78 while (p) { 79 call = rb_entry(p, struct rxrpc_call, sock_node); 80 81 if (user_call_ID < call->user_call_ID) 82 p = p->rb_left; 83 else if (user_call_ID > call->user_call_ID) 84 p = p->rb_right; 85 else 86 goto found_extant_call; 87 } 88 89 read_unlock(&rx->call_lock); 90 _leave(" = NULL"); 91 return NULL; 92 93 found_extant_call: 94 rxrpc_get_call(call); 95 read_unlock(&rx->call_lock); 96 _leave(" = %p [%d]", call, atomic_read(&call->usage)); 97 return call; 98 } 99 100 /* 101 * allocate a new call 102 */ 103 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) 104 { 105 struct rxrpc_call *call; 106 107 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); 108 if (!call) 109 return NULL; 110 111 call->acks_winsz = 16; 112 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long), 113 gfp); 114 if (!call->acks_window) { 115 kmem_cache_free(rxrpc_call_jar, call); 116 return NULL; 117 } 118 119 setup_timer(&call->lifetimer, &rxrpc_call_life_expired, 120 (unsigned long) call); 121 setup_timer(&call->deadspan, &rxrpc_dead_call_expired, 122 (unsigned long) call); 123 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired, 124 (unsigned long) call); 125 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired, 126 (unsigned long) call); 127 INIT_WORK(&call->destroyer, &rxrpc_destroy_call); 128 INIT_WORK(&call->processor, &rxrpc_process_call); 129 INIT_LIST_HEAD(&call->link); 130 INIT_LIST_HEAD(&call->accept_link); 131 skb_queue_head_init(&call->rx_queue); 132 skb_queue_head_init(&call->rx_oos_queue); 133 init_waitqueue_head(&call->tx_waitq); 134 spin_lock_init(&call->lock); 135 rwlock_init(&call->state_lock); 136 atomic_set(&call->usage, 1); 137 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 138 139 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 140 141 call->rx_data_expect = 1; 142 call->rx_data_eaten = 0; 143 call->rx_first_oos = 0; 144 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size; 145 call->creation_jif = jiffies; 146 return call; 147 } 148 149 /* 150 * Allocate a new client call. 151 */ 152 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, 153 struct sockaddr_rxrpc *srx, 154 gfp_t gfp) 155 { 156 struct rxrpc_call *call; 157 158 _enter(""); 159 160 ASSERT(rx->local != NULL); 161 162 call = rxrpc_alloc_call(gfp); 163 if (!call) 164 return ERR_PTR(-ENOMEM); 165 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; 166 167 sock_hold(&rx->sk); 168 call->socket = rx; 169 call->rx_data_post = 1; 170 171 call->local = rx->local; 172 call->service_id = srx->srx_service; 173 call->in_clientflag = 0; 174 175 _leave(" = %p", call); 176 return call; 177 } 178 179 /* 180 * Begin client call. 181 */ 182 static int rxrpc_begin_client_call(struct rxrpc_call *call, 183 struct rxrpc_conn_parameters *cp, 184 struct sockaddr_rxrpc *srx, 185 gfp_t gfp) 186 { 187 int ret; 188 189 /* Set up or get a connection record and set the protocol parameters, 190 * including channel number and call ID. 191 */ 192 ret = rxrpc_connect_call(call, cp, srx, gfp); 193 if (ret < 0) 194 return ret; 195 196 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 197 198 spin_lock(&call->conn->params.peer->lock); 199 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets); 200 spin_unlock(&call->conn->params.peer->lock); 201 202 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; 203 add_timer(&call->lifetimer); 204 return 0; 205 } 206 207 /* 208 * set up a call for the given data 209 * - called in process context with IRQs enabled 210 */ 211 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 212 struct rxrpc_conn_parameters *cp, 213 struct sockaddr_rxrpc *srx, 214 unsigned long user_call_ID, 215 gfp_t gfp) 216 { 217 struct rxrpc_call *call, *xcall; 218 struct rb_node *parent, **pp; 219 int ret; 220 221 _enter("%p,%lx", rx, user_call_ID); 222 223 call = rxrpc_alloc_client_call(rx, srx, gfp); 224 if (IS_ERR(call)) { 225 _leave(" = %ld", PTR_ERR(call)); 226 return call; 227 } 228 229 /* Publish the call, even though it is incompletely set up as yet */ 230 call->user_call_ID = user_call_ID; 231 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 232 233 write_lock(&rx->call_lock); 234 235 pp = &rx->calls.rb_node; 236 parent = NULL; 237 while (*pp) { 238 parent = *pp; 239 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 240 241 if (user_call_ID < xcall->user_call_ID) 242 pp = &(*pp)->rb_left; 243 else if (user_call_ID > xcall->user_call_ID) 244 pp = &(*pp)->rb_right; 245 else 246 goto found_user_ID_now_present; 247 } 248 249 rxrpc_get_call(call); 250 251 rb_link_node(&call->sock_node, parent, pp); 252 rb_insert_color(&call->sock_node, &rx->calls); 253 write_unlock(&rx->call_lock); 254 255 write_lock_bh(&rxrpc_call_lock); 256 list_add_tail(&call->link, &rxrpc_calls); 257 write_unlock_bh(&rxrpc_call_lock); 258 259 ret = rxrpc_begin_client_call(call, cp, srx, gfp); 260 if (ret < 0) 261 goto error; 262 263 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); 264 265 _leave(" = %p [new]", call); 266 return call; 267 268 error: 269 write_lock(&rx->call_lock); 270 rb_erase(&call->sock_node, &rx->calls); 271 write_unlock(&rx->call_lock); 272 rxrpc_put_call(call); 273 274 write_lock_bh(&rxrpc_call_lock); 275 list_del_init(&call->link); 276 write_unlock_bh(&rxrpc_call_lock); 277 278 set_bit(RXRPC_CALL_RELEASED, &call->flags); 279 call->state = RXRPC_CALL_DEAD; 280 rxrpc_put_call(call); 281 _leave(" = %d", ret); 282 return ERR_PTR(ret); 283 284 /* We unexpectedly found the user ID in the list after taking 285 * the call_lock. This shouldn't happen unless the user races 286 * with itself and tries to add the same user ID twice at the 287 * same time in different threads. 288 */ 289 found_user_ID_now_present: 290 write_unlock(&rx->call_lock); 291 set_bit(RXRPC_CALL_RELEASED, &call->flags); 292 call->state = RXRPC_CALL_DEAD; 293 rxrpc_put_call(call); 294 _leave(" = -EEXIST [%p]", call); 295 return ERR_PTR(-EEXIST); 296 } 297 298 /* 299 * set up an incoming call 300 * - called in process context with IRQs enabled 301 */ 302 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, 303 struct rxrpc_connection *conn, 304 struct sk_buff *skb) 305 { 306 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 307 struct rxrpc_call *call, *candidate; 308 u32 call_id, chan; 309 310 _enter(",%d", conn->debug_id); 311 312 ASSERT(rx != NULL); 313 314 candidate = rxrpc_alloc_call(GFP_NOIO); 315 if (!candidate) 316 return ERR_PTR(-EBUSY); 317 318 chan = sp->hdr.cid & RXRPC_CHANNELMASK; 319 candidate->socket = rx; 320 candidate->conn = conn; 321 candidate->cid = sp->hdr.cid; 322 candidate->call_id = sp->hdr.callNumber; 323 candidate->channel = chan; 324 candidate->rx_data_post = 0; 325 candidate->state = RXRPC_CALL_SERVER_ACCEPTING; 326 if (conn->security_ix > 0) 327 candidate->state = RXRPC_CALL_SERVER_SECURING; 328 329 spin_lock(&conn->channel_lock); 330 331 /* set the channel for this call */ 332 call = rcu_dereference_protected(conn->channels[chan].call, 333 lockdep_is_held(&conn->channel_lock)); 334 335 _debug("channel[%u] is %p", candidate->channel, call); 336 if (call && call->call_id == sp->hdr.callNumber) { 337 /* already set; must've been a duplicate packet */ 338 _debug("extant call [%d]", call->state); 339 ASSERTCMP(call->conn, ==, conn); 340 341 read_lock(&call->state_lock); 342 switch (call->state) { 343 case RXRPC_CALL_LOCALLY_ABORTED: 344 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) 345 rxrpc_queue_call(call); 346 case RXRPC_CALL_REMOTELY_ABORTED: 347 read_unlock(&call->state_lock); 348 goto aborted_call; 349 default: 350 rxrpc_get_call(call); 351 read_unlock(&call->state_lock); 352 goto extant_call; 353 } 354 } 355 356 if (call) { 357 /* it seems the channel is still in use from the previous call 358 * - ditch the old binding if its call is now complete */ 359 _debug("CALL: %u { %s }", 360 call->debug_id, rxrpc_call_states[call->state]); 361 362 if (call->state >= RXRPC_CALL_COMPLETE) { 363 __rxrpc_disconnect_call(call); 364 } else { 365 spin_unlock(&conn->channel_lock); 366 kmem_cache_free(rxrpc_call_jar, candidate); 367 _leave(" = -EBUSY"); 368 return ERR_PTR(-EBUSY); 369 } 370 } 371 372 /* check the call number isn't duplicate */ 373 _debug("check dup"); 374 call_id = sp->hdr.callNumber; 375 376 /* We just ignore calls prior to the current call ID. Terminated calls 377 * are handled via the connection. 378 */ 379 if (call_id <= conn->channels[chan].call_counter) 380 goto old_call; /* TODO: Just drop packet */ 381 382 /* make the call available */ 383 _debug("new call"); 384 call = candidate; 385 candidate = NULL; 386 conn->channels[chan].call_counter = call_id; 387 rcu_assign_pointer(conn->channels[chan].call, call); 388 sock_hold(&rx->sk); 389 rxrpc_get_connection(conn); 390 spin_unlock(&conn->channel_lock); 391 392 spin_lock(&conn->params.peer->lock); 393 hlist_add_head(&call->error_link, &conn->params.peer->error_targets); 394 spin_unlock(&conn->params.peer->lock); 395 396 write_lock_bh(&rxrpc_call_lock); 397 list_add_tail(&call->link, &rxrpc_calls); 398 write_unlock_bh(&rxrpc_call_lock); 399 400 call->local = conn->params.local; 401 call->epoch = conn->proto.epoch; 402 call->service_id = conn->params.service_id; 403 call->in_clientflag = RXRPC_CLIENT_INITIATED; 404 405 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); 406 407 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; 408 add_timer(&call->lifetimer); 409 _leave(" = %p {%d} [new]", call, call->debug_id); 410 return call; 411 412 extant_call: 413 spin_unlock(&conn->channel_lock); 414 kmem_cache_free(rxrpc_call_jar, candidate); 415 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1); 416 return call; 417 418 aborted_call: 419 spin_unlock(&conn->channel_lock); 420 kmem_cache_free(rxrpc_call_jar, candidate); 421 _leave(" = -ECONNABORTED"); 422 return ERR_PTR(-ECONNABORTED); 423 424 old_call: 425 spin_unlock(&conn->channel_lock); 426 kmem_cache_free(rxrpc_call_jar, candidate); 427 _leave(" = -ECONNRESET [old]"); 428 return ERR_PTR(-ECONNRESET); 429 } 430 431 /* 432 * detach a call from a socket and set up for release 433 */ 434 void rxrpc_release_call(struct rxrpc_call *call) 435 { 436 struct rxrpc_connection *conn = call->conn; 437 struct rxrpc_sock *rx = call->socket; 438 439 _enter("{%d,%d,%d,%d}", 440 call->debug_id, atomic_read(&call->usage), 441 atomic_read(&call->ackr_not_idle), 442 call->rx_first_oos); 443 444 spin_lock_bh(&call->lock); 445 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) 446 BUG(); 447 spin_unlock_bh(&call->lock); 448 449 /* dissociate from the socket 450 * - the socket's ref on the call is passed to the death timer 451 */ 452 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); 453 454 spin_lock(&conn->params.peer->lock); 455 hlist_del_init(&call->error_link); 456 spin_unlock(&conn->params.peer->lock); 457 458 write_lock_bh(&rx->call_lock); 459 if (!list_empty(&call->accept_link)) { 460 _debug("unlinking once-pending call %p { e=%lx f=%lx }", 461 call, call->events, call->flags); 462 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); 463 list_del_init(&call->accept_link); 464 sk_acceptq_removed(&rx->sk); 465 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 466 rb_erase(&call->sock_node, &rx->calls); 467 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); 468 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); 469 } 470 write_unlock_bh(&rx->call_lock); 471 472 /* free up the channel for reuse */ 473 write_lock_bh(&call->state_lock); 474 475 if (call->state < RXRPC_CALL_COMPLETE && 476 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { 477 _debug("+++ ABORTING STATE %d +++\n", call->state); 478 call->state = RXRPC_CALL_LOCALLY_ABORTED; 479 call->local_abort = RX_CALL_DEAD; 480 } 481 write_unlock_bh(&call->state_lock); 482 483 rxrpc_disconnect_call(call); 484 485 /* clean up the Rx queue */ 486 if (!skb_queue_empty(&call->rx_queue) || 487 !skb_queue_empty(&call->rx_oos_queue)) { 488 struct rxrpc_skb_priv *sp; 489 struct sk_buff *skb; 490 491 _debug("purge Rx queues"); 492 493 spin_lock_bh(&call->lock); 494 while ((skb = skb_dequeue(&call->rx_queue)) || 495 (skb = skb_dequeue(&call->rx_oos_queue))) { 496 spin_unlock_bh(&call->lock); 497 498 sp = rxrpc_skb(skb); 499 _debug("- zap %s %%%u #%u", 500 rxrpc_pkts[sp->hdr.type], 501 sp->hdr.serial, sp->hdr.seq); 502 rxrpc_free_skb(skb); 503 spin_lock_bh(&call->lock); 504 } 505 spin_unlock_bh(&call->lock); 506 507 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE); 508 } 509 510 del_timer_sync(&call->resend_timer); 511 del_timer_sync(&call->ack_timer); 512 del_timer_sync(&call->lifetimer); 513 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry; 514 add_timer(&call->deadspan); 515 516 _leave(""); 517 } 518 519 /* 520 * handle a dead call being ready for reaping 521 */ 522 static void rxrpc_dead_call_expired(unsigned long _call) 523 { 524 struct rxrpc_call *call = (struct rxrpc_call *) _call; 525 526 _enter("{%d}", call->debug_id); 527 528 write_lock_bh(&call->state_lock); 529 call->state = RXRPC_CALL_DEAD; 530 write_unlock_bh(&call->state_lock); 531 rxrpc_put_call(call); 532 } 533 534 /* 535 * mark a call as to be released, aborting it if it's still in progress 536 * - called with softirqs disabled 537 */ 538 static void rxrpc_mark_call_released(struct rxrpc_call *call) 539 { 540 bool sched; 541 542 write_lock(&call->state_lock); 543 if (call->state < RXRPC_CALL_DEAD) { 544 sched = false; 545 if (call->state < RXRPC_CALL_COMPLETE) { 546 _debug("abort call %p", call); 547 call->state = RXRPC_CALL_LOCALLY_ABORTED; 548 call->local_abort = RX_CALL_DEAD; 549 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) 550 sched = true; 551 } 552 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) 553 sched = true; 554 if (sched) 555 rxrpc_queue_call(call); 556 } 557 write_unlock(&call->state_lock); 558 } 559 560 /* 561 * release all the calls associated with a socket 562 */ 563 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) 564 { 565 struct rxrpc_call *call; 566 struct rb_node *p; 567 568 _enter("%p", rx); 569 570 read_lock_bh(&rx->call_lock); 571 572 /* mark all the calls as no longer wanting incoming packets */ 573 for (p = rb_first(&rx->calls); p; p = rb_next(p)) { 574 call = rb_entry(p, struct rxrpc_call, sock_node); 575 rxrpc_mark_call_released(call); 576 } 577 578 /* kill the not-yet-accepted incoming calls */ 579 list_for_each_entry(call, &rx->secureq, accept_link) { 580 rxrpc_mark_call_released(call); 581 } 582 583 list_for_each_entry(call, &rx->acceptq, accept_link) { 584 rxrpc_mark_call_released(call); 585 } 586 587 read_unlock_bh(&rx->call_lock); 588 _leave(""); 589 } 590 591 /* 592 * release a call 593 */ 594 void __rxrpc_put_call(struct rxrpc_call *call) 595 { 596 ASSERT(call != NULL); 597 598 _enter("%p{u=%d}", call, atomic_read(&call->usage)); 599 600 ASSERTCMP(atomic_read(&call->usage), >, 0); 601 602 if (atomic_dec_and_test(&call->usage)) { 603 _debug("call %d dead", call->debug_id); 604 WARN_ON(atomic_read(&call->skb_count) != 0); 605 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 606 rxrpc_queue_work(&call->destroyer); 607 } 608 _leave(""); 609 } 610 611 /* 612 * Final call destruction under RCU. 613 */ 614 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 615 { 616 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 617 618 rxrpc_purge_queue(&call->rx_queue); 619 kmem_cache_free(rxrpc_call_jar, call); 620 } 621 622 /* 623 * clean up a call 624 */ 625 static void rxrpc_cleanup_call(struct rxrpc_call *call) 626 { 627 _net("DESTROY CALL %d", call->debug_id); 628 629 ASSERT(call->socket); 630 631 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 632 633 del_timer_sync(&call->lifetimer); 634 del_timer_sync(&call->deadspan); 635 del_timer_sync(&call->ack_timer); 636 del_timer_sync(&call->resend_timer); 637 638 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 639 ASSERTCMP(call->events, ==, 0); 640 if (work_pending(&call->processor)) { 641 _debug("defer destroy"); 642 rxrpc_queue_work(&call->destroyer); 643 return; 644 } 645 646 ASSERTCMP(call->conn, ==, NULL); 647 648 if (call->acks_window) { 649 _debug("kill Tx window %d", 650 CIRC_CNT(call->acks_head, call->acks_tail, 651 call->acks_winsz)); 652 smp_mb(); 653 while (CIRC_CNT(call->acks_head, call->acks_tail, 654 call->acks_winsz) > 0) { 655 struct rxrpc_skb_priv *sp; 656 unsigned long _skb; 657 658 _skb = call->acks_window[call->acks_tail] & ~1; 659 sp = rxrpc_skb((struct sk_buff *)_skb); 660 _debug("+++ clear Tx %u", sp->hdr.seq); 661 rxrpc_free_skb((struct sk_buff *)_skb); 662 call->acks_tail = 663 (call->acks_tail + 1) & (call->acks_winsz - 1); 664 } 665 666 kfree(call->acks_window); 667 } 668 669 rxrpc_free_skb(call->tx_pending); 670 671 rxrpc_purge_queue(&call->rx_queue); 672 ASSERT(skb_queue_empty(&call->rx_oos_queue)); 673 sock_put(&call->socket->sk); 674 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 675 } 676 677 /* 678 * destroy a call 679 */ 680 static void rxrpc_destroy_call(struct work_struct *work) 681 { 682 struct rxrpc_call *call = 683 container_of(work, struct rxrpc_call, destroyer); 684 685 _enter("%p{%d,%d,%p}", 686 call, atomic_read(&call->usage), call->channel, call->conn); 687 688 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 689 690 write_lock_bh(&rxrpc_call_lock); 691 list_del_init(&call->link); 692 write_unlock_bh(&rxrpc_call_lock); 693 694 rxrpc_cleanup_call(call); 695 _leave(""); 696 } 697 698 /* 699 * preemptively destroy all the call records from a transport endpoint rather 700 * than waiting for them to time out 701 */ 702 void __exit rxrpc_destroy_all_calls(void) 703 { 704 struct rxrpc_call *call; 705 706 _enter(""); 707 write_lock_bh(&rxrpc_call_lock); 708 709 while (!list_empty(&rxrpc_calls)) { 710 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); 711 _debug("Zapping call %p", call); 712 713 list_del_init(&call->link); 714 715 switch (atomic_read(&call->usage)) { 716 case 0: 717 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 718 break; 719 case 1: 720 if (del_timer_sync(&call->deadspan) != 0 && 721 call->state != RXRPC_CALL_DEAD) 722 rxrpc_dead_call_expired((unsigned long) call); 723 if (call->state != RXRPC_CALL_DEAD) 724 break; 725 default: 726 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n", 727 call, atomic_read(&call->usage), 728 atomic_read(&call->ackr_not_idle), 729 rxrpc_call_states[call->state], 730 call->flags, call->events); 731 if (!skb_queue_empty(&call->rx_queue)) 732 pr_err("Rx queue occupied\n"); 733 if (!skb_queue_empty(&call->rx_oos_queue)) 734 pr_err("OOS queue occupied\n"); 735 break; 736 } 737 738 write_unlock_bh(&rxrpc_call_lock); 739 cond_resched(); 740 write_lock_bh(&rxrpc_call_lock); 741 } 742 743 write_unlock_bh(&rxrpc_call_lock); 744 _leave(""); 745 } 746 747 /* 748 * handle call lifetime being exceeded 749 */ 750 static void rxrpc_call_life_expired(unsigned long _call) 751 { 752 struct rxrpc_call *call = (struct rxrpc_call *) _call; 753 754 if (call->state >= RXRPC_CALL_COMPLETE) 755 return; 756 757 _enter("{%d}", call->debug_id); 758 read_lock_bh(&call->state_lock); 759 if (call->state < RXRPC_CALL_COMPLETE) { 760 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events); 761 rxrpc_queue_call(call); 762 } 763 read_unlock_bh(&call->state_lock); 764 } 765 766 /* 767 * handle resend timer expiry 768 * - may not take call->state_lock as this can deadlock against del_timer_sync() 769 */ 770 static void rxrpc_resend_time_expired(unsigned long _call) 771 { 772 struct rxrpc_call *call = (struct rxrpc_call *) _call; 773 774 _enter("{%d}", call->debug_id); 775 776 if (call->state >= RXRPC_CALL_COMPLETE) 777 return; 778 779 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 780 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events)) 781 rxrpc_queue_call(call); 782 } 783 784 /* 785 * handle ACK timer expiry 786 */ 787 static void rxrpc_ack_time_expired(unsigned long _call) 788 { 789 struct rxrpc_call *call = (struct rxrpc_call *) _call; 790 791 _enter("{%d}", call->debug_id); 792 793 if (call->state >= RXRPC_CALL_COMPLETE) 794 return; 795 796 read_lock_bh(&call->state_lock); 797 if (call->state < RXRPC_CALL_COMPLETE && 798 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) 799 rxrpc_queue_call(call); 800 read_unlock_bh(&call->state_lock); 801 } 802