1 /* RxRPC individual remote procedure call handling 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/circ_buf.h> 17 #include <linux/spinlock_types.h> 18 #include <net/sock.h> 19 #include <net/af_rxrpc.h> 20 #include "ar-internal.h" 21 22 /* 23 * Maximum lifetime of a call (in jiffies). 24 */ 25 unsigned int rxrpc_max_call_lifetime = 60 * HZ; 26 27 /* 28 * Time till dead call expires after last use (in jiffies). 29 */ 30 unsigned int rxrpc_dead_call_expiry = 2 * HZ; 31 32 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { 33 [RXRPC_CALL_UNINITIALISED] = "Uninit", 34 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", 35 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", 36 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", 37 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 38 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK", 39 [RXRPC_CALL_SERVER_SECURING] = "SvSecure", 40 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", 41 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", 42 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", 43 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", 44 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", 45 [RXRPC_CALL_COMPLETE] = "Complete", 46 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ", 47 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", 48 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", 49 [RXRPC_CALL_NETWORK_ERROR] = "NetError", 50 [RXRPC_CALL_DEAD] = "Dead ", 51 }; 52 53 struct kmem_cache *rxrpc_call_jar; 54 LIST_HEAD(rxrpc_calls); 55 DEFINE_RWLOCK(rxrpc_call_lock); 56 57 static void rxrpc_destroy_call(struct work_struct *work); 58 static void rxrpc_call_life_expired(unsigned long _call); 59 static void rxrpc_dead_call_expired(unsigned long _call); 60 static void rxrpc_ack_time_expired(unsigned long _call); 61 static void rxrpc_resend_time_expired(unsigned long _call); 62 63 /* 64 * find an extant server call 65 * - called in process context with IRQs enabled 66 */ 67 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, 68 unsigned long user_call_ID) 69 { 70 struct rxrpc_call *call; 71 struct rb_node *p; 72 73 _enter("%p,%lx", rx, user_call_ID); 74 75 read_lock(&rx->call_lock); 76 77 p = rx->calls.rb_node; 78 while (p) { 79 call = rb_entry(p, struct rxrpc_call, sock_node); 80 81 if (user_call_ID < call->user_call_ID) 82 p = p->rb_left; 83 else if (user_call_ID > call->user_call_ID) 84 p = p->rb_right; 85 else 86 goto found_extant_call; 87 } 88 89 read_unlock(&rx->call_lock); 90 _leave(" = NULL"); 91 return NULL; 92 93 found_extant_call: 94 rxrpc_get_call(call); 95 read_unlock(&rx->call_lock); 96 _leave(" = %p [%d]", call, atomic_read(&call->usage)); 97 return call; 98 } 99 100 /* 101 * allocate a new call 102 */ 103 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) 104 { 105 struct rxrpc_call *call; 106 107 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); 108 if (!call) 109 return NULL; 110 111 call->acks_winsz = 16; 112 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long), 113 gfp); 114 if (!call->acks_window) { 115 kmem_cache_free(rxrpc_call_jar, call); 116 return NULL; 117 } 118 119 setup_timer(&call->lifetimer, &rxrpc_call_life_expired, 120 (unsigned long) call); 121 setup_timer(&call->deadspan, &rxrpc_dead_call_expired, 122 (unsigned long) call); 123 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired, 124 (unsigned long) call); 125 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired, 126 (unsigned long) call); 127 INIT_WORK(&call->destroyer, &rxrpc_destroy_call); 128 INIT_WORK(&call->processor, &rxrpc_process_call); 129 INIT_LIST_HEAD(&call->link); 130 INIT_LIST_HEAD(&call->accept_link); 131 skb_queue_head_init(&call->rx_queue); 132 skb_queue_head_init(&call->rx_oos_queue); 133 init_waitqueue_head(&call->tx_waitq); 134 spin_lock_init(&call->lock); 135 rwlock_init(&call->state_lock); 136 atomic_set(&call->usage, 1); 137 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 138 139 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 140 141 call->rx_data_expect = 1; 142 call->rx_data_eaten = 0; 143 call->rx_first_oos = 0; 144 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size; 145 call->creation_jif = jiffies; 146 return call; 147 } 148 149 /* 150 * Allocate a new client call. 151 */ 152 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, 153 struct sockaddr_rxrpc *srx, 154 gfp_t gfp) 155 { 156 struct rxrpc_call *call; 157 158 _enter(""); 159 160 ASSERT(rx->local != NULL); 161 162 call = rxrpc_alloc_call(gfp); 163 if (!call) 164 return ERR_PTR(-ENOMEM); 165 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; 166 167 sock_hold(&rx->sk); 168 call->socket = rx; 169 call->rx_data_post = 1; 170 171 call->local = rx->local; 172 call->service_id = srx->srx_service; 173 call->in_clientflag = 0; 174 175 _leave(" = %p", call); 176 return call; 177 } 178 179 /* 180 * Begin client call. 181 */ 182 static int rxrpc_begin_client_call(struct rxrpc_call *call, 183 struct rxrpc_conn_parameters *cp, 184 struct sockaddr_rxrpc *srx, 185 gfp_t gfp) 186 { 187 int ret; 188 189 /* Set up or get a connection record and set the protocol parameters, 190 * including channel number and call ID. 191 */ 192 ret = rxrpc_connect_call(call, cp, srx, gfp); 193 if (ret < 0) 194 return ret; 195 196 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 197 198 spin_lock(&call->conn->params.peer->lock); 199 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets); 200 spin_unlock(&call->conn->params.peer->lock); 201 202 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; 203 add_timer(&call->lifetimer); 204 return 0; 205 } 206 207 /* 208 * set up a call for the given data 209 * - called in process context with IRQs enabled 210 */ 211 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 212 struct rxrpc_conn_parameters *cp, 213 struct sockaddr_rxrpc *srx, 214 unsigned long user_call_ID, 215 gfp_t gfp) 216 { 217 struct rxrpc_call *call, *xcall; 218 struct rb_node *parent, **pp; 219 int ret; 220 221 _enter("%p,%lx", rx, user_call_ID); 222 223 call = rxrpc_alloc_client_call(rx, srx, gfp); 224 if (IS_ERR(call)) { 225 _leave(" = %ld", PTR_ERR(call)); 226 return call; 227 } 228 229 /* Publish the call, even though it is incompletely set up as yet */ 230 call->user_call_ID = user_call_ID; 231 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 232 233 write_lock(&rx->call_lock); 234 235 pp = &rx->calls.rb_node; 236 parent = NULL; 237 while (*pp) { 238 parent = *pp; 239 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 240 241 if (user_call_ID < xcall->user_call_ID) 242 pp = &(*pp)->rb_left; 243 else if (user_call_ID > xcall->user_call_ID) 244 pp = &(*pp)->rb_right; 245 else 246 goto found_user_ID_now_present; 247 } 248 249 rxrpc_get_call(call); 250 251 rb_link_node(&call->sock_node, parent, pp); 252 rb_insert_color(&call->sock_node, &rx->calls); 253 write_unlock(&rx->call_lock); 254 255 write_lock_bh(&rxrpc_call_lock); 256 list_add_tail(&call->link, &rxrpc_calls); 257 write_unlock_bh(&rxrpc_call_lock); 258 259 ret = rxrpc_begin_client_call(call, cp, srx, gfp); 260 if (ret < 0) 261 goto error; 262 263 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); 264 265 _leave(" = %p [new]", call); 266 return call; 267 268 error: 269 write_lock(&rx->call_lock); 270 rb_erase(&call->sock_node, &rx->calls); 271 write_unlock(&rx->call_lock); 272 rxrpc_put_call(call); 273 274 write_lock_bh(&rxrpc_call_lock); 275 list_del_init(&call->link); 276 write_unlock_bh(&rxrpc_call_lock); 277 278 call->state = RXRPC_CALL_DEAD; 279 rxrpc_put_call(call); 280 _leave(" = %d", ret); 281 return ERR_PTR(ret); 282 283 /* We unexpectedly found the user ID in the list after taking 284 * the call_lock. This shouldn't happen unless the user races 285 * with itself and tries to add the same user ID twice at the 286 * same time in different threads. 287 */ 288 found_user_ID_now_present: 289 write_unlock(&rx->call_lock); 290 call->state = RXRPC_CALL_DEAD; 291 rxrpc_put_call(call); 292 _leave(" = -EEXIST [%p]", call); 293 return ERR_PTR(-EEXIST); 294 } 295 296 /* 297 * set up an incoming call 298 * - called in process context with IRQs enabled 299 */ 300 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, 301 struct rxrpc_connection *conn, 302 struct sk_buff *skb) 303 { 304 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 305 struct rxrpc_call *call, *candidate; 306 u32 call_id, chan; 307 308 _enter(",%d", conn->debug_id); 309 310 ASSERT(rx != NULL); 311 312 candidate = rxrpc_alloc_call(GFP_NOIO); 313 if (!candidate) 314 return ERR_PTR(-EBUSY); 315 316 chan = sp->hdr.cid & RXRPC_CHANNELMASK; 317 candidate->socket = rx; 318 candidate->conn = conn; 319 candidate->cid = sp->hdr.cid; 320 candidate->call_id = sp->hdr.callNumber; 321 candidate->channel = chan; 322 candidate->rx_data_post = 0; 323 candidate->state = RXRPC_CALL_SERVER_ACCEPTING; 324 if (conn->security_ix > 0) 325 candidate->state = RXRPC_CALL_SERVER_SECURING; 326 327 spin_lock(&conn->channel_lock); 328 329 /* set the channel for this call */ 330 call = rcu_dereference_protected(conn->channels[chan].call, 331 lockdep_is_held(&conn->channel_lock)); 332 333 _debug("channel[%u] is %p", candidate->channel, call); 334 if (call && call->call_id == sp->hdr.callNumber) { 335 /* already set; must've been a duplicate packet */ 336 _debug("extant call [%d]", call->state); 337 ASSERTCMP(call->conn, ==, conn); 338 339 read_lock(&call->state_lock); 340 switch (call->state) { 341 case RXRPC_CALL_LOCALLY_ABORTED: 342 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) 343 rxrpc_queue_call(call); 344 case RXRPC_CALL_REMOTELY_ABORTED: 345 read_unlock(&call->state_lock); 346 goto aborted_call; 347 default: 348 rxrpc_get_call(call); 349 read_unlock(&call->state_lock); 350 goto extant_call; 351 } 352 } 353 354 if (call) { 355 /* it seems the channel is still in use from the previous call 356 * - ditch the old binding if its call is now complete */ 357 _debug("CALL: %u { %s }", 358 call->debug_id, rxrpc_call_states[call->state]); 359 360 if (call->state >= RXRPC_CALL_COMPLETE) { 361 __rxrpc_disconnect_call(call); 362 } else { 363 spin_unlock(&conn->channel_lock); 364 kmem_cache_free(rxrpc_call_jar, candidate); 365 _leave(" = -EBUSY"); 366 return ERR_PTR(-EBUSY); 367 } 368 } 369 370 /* check the call number isn't duplicate */ 371 _debug("check dup"); 372 call_id = sp->hdr.callNumber; 373 374 /* We just ignore calls prior to the current call ID. Terminated calls 375 * are handled via the connection. 376 */ 377 if (call_id <= conn->channels[chan].call_counter) 378 goto old_call; /* TODO: Just drop packet */ 379 380 /* make the call available */ 381 _debug("new call"); 382 call = candidate; 383 candidate = NULL; 384 conn->channels[chan].call_counter = call_id; 385 rcu_assign_pointer(conn->channels[chan].call, call); 386 sock_hold(&rx->sk); 387 rxrpc_get_connection(conn); 388 spin_unlock(&conn->channel_lock); 389 390 spin_lock(&conn->params.peer->lock); 391 hlist_add_head(&call->error_link, &conn->params.peer->error_targets); 392 spin_unlock(&conn->params.peer->lock); 393 394 write_lock_bh(&rxrpc_call_lock); 395 list_add_tail(&call->link, &rxrpc_calls); 396 write_unlock_bh(&rxrpc_call_lock); 397 398 call->local = conn->params.local; 399 call->epoch = conn->proto.epoch; 400 call->service_id = conn->params.service_id; 401 call->in_clientflag = RXRPC_CLIENT_INITIATED; 402 403 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); 404 405 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; 406 add_timer(&call->lifetimer); 407 _leave(" = %p {%d} [new]", call, call->debug_id); 408 return call; 409 410 extant_call: 411 spin_unlock(&conn->channel_lock); 412 kmem_cache_free(rxrpc_call_jar, candidate); 413 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1); 414 return call; 415 416 aborted_call: 417 spin_unlock(&conn->channel_lock); 418 kmem_cache_free(rxrpc_call_jar, candidate); 419 _leave(" = -ECONNABORTED"); 420 return ERR_PTR(-ECONNABORTED); 421 422 old_call: 423 spin_unlock(&conn->channel_lock); 424 kmem_cache_free(rxrpc_call_jar, candidate); 425 _leave(" = -ECONNRESET [old]"); 426 return ERR_PTR(-ECONNRESET); 427 } 428 429 /* 430 * detach a call from a socket and set up for release 431 */ 432 void rxrpc_release_call(struct rxrpc_call *call) 433 { 434 struct rxrpc_connection *conn = call->conn; 435 struct rxrpc_sock *rx = call->socket; 436 437 _enter("{%d,%d,%d,%d}", 438 call->debug_id, atomic_read(&call->usage), 439 atomic_read(&call->ackr_not_idle), 440 call->rx_first_oos); 441 442 spin_lock_bh(&call->lock); 443 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) 444 BUG(); 445 spin_unlock_bh(&call->lock); 446 447 /* dissociate from the socket 448 * - the socket's ref on the call is passed to the death timer 449 */ 450 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); 451 452 spin_lock(&conn->params.peer->lock); 453 hlist_del_init(&call->error_link); 454 spin_unlock(&conn->params.peer->lock); 455 456 write_lock_bh(&rx->call_lock); 457 if (!list_empty(&call->accept_link)) { 458 _debug("unlinking once-pending call %p { e=%lx f=%lx }", 459 call, call->events, call->flags); 460 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); 461 list_del_init(&call->accept_link); 462 sk_acceptq_removed(&rx->sk); 463 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 464 rb_erase(&call->sock_node, &rx->calls); 465 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); 466 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); 467 } 468 write_unlock_bh(&rx->call_lock); 469 470 /* free up the channel for reuse */ 471 write_lock_bh(&call->state_lock); 472 473 if (call->state < RXRPC_CALL_COMPLETE && 474 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { 475 _debug("+++ ABORTING STATE %d +++\n", call->state); 476 call->state = RXRPC_CALL_LOCALLY_ABORTED; 477 call->local_abort = RX_CALL_DEAD; 478 } 479 write_unlock_bh(&call->state_lock); 480 481 rxrpc_disconnect_call(call); 482 483 /* clean up the Rx queue */ 484 if (!skb_queue_empty(&call->rx_queue) || 485 !skb_queue_empty(&call->rx_oos_queue)) { 486 struct rxrpc_skb_priv *sp; 487 struct sk_buff *skb; 488 489 _debug("purge Rx queues"); 490 491 spin_lock_bh(&call->lock); 492 while ((skb = skb_dequeue(&call->rx_queue)) || 493 (skb = skb_dequeue(&call->rx_oos_queue))) { 494 sp = rxrpc_skb(skb); 495 if (sp->call) { 496 ASSERTCMP(sp->call, ==, call); 497 rxrpc_put_call(call); 498 sp->call = NULL; 499 } 500 skb->destructor = NULL; 501 spin_unlock_bh(&call->lock); 502 503 _debug("- zap %s %%%u #%u", 504 rxrpc_pkts[sp->hdr.type], 505 sp->hdr.serial, sp->hdr.seq); 506 rxrpc_free_skb(skb); 507 spin_lock_bh(&call->lock); 508 } 509 spin_unlock_bh(&call->lock); 510 511 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE); 512 } 513 514 del_timer_sync(&call->resend_timer); 515 del_timer_sync(&call->ack_timer); 516 del_timer_sync(&call->lifetimer); 517 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry; 518 add_timer(&call->deadspan); 519 520 _leave(""); 521 } 522 523 /* 524 * handle a dead call being ready for reaping 525 */ 526 static void rxrpc_dead_call_expired(unsigned long _call) 527 { 528 struct rxrpc_call *call = (struct rxrpc_call *) _call; 529 530 _enter("{%d}", call->debug_id); 531 532 write_lock_bh(&call->state_lock); 533 call->state = RXRPC_CALL_DEAD; 534 write_unlock_bh(&call->state_lock); 535 rxrpc_put_call(call); 536 } 537 538 /* 539 * mark a call as to be released, aborting it if it's still in progress 540 * - called with softirqs disabled 541 */ 542 static void rxrpc_mark_call_released(struct rxrpc_call *call) 543 { 544 bool sched; 545 546 write_lock(&call->state_lock); 547 if (call->state < RXRPC_CALL_DEAD) { 548 sched = false; 549 if (call->state < RXRPC_CALL_COMPLETE) { 550 _debug("abort call %p", call); 551 call->state = RXRPC_CALL_LOCALLY_ABORTED; 552 call->local_abort = RX_CALL_DEAD; 553 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) 554 sched = true; 555 } 556 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) 557 sched = true; 558 if (sched) 559 rxrpc_queue_call(call); 560 } 561 write_unlock(&call->state_lock); 562 } 563 564 /* 565 * release all the calls associated with a socket 566 */ 567 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) 568 { 569 struct rxrpc_call *call; 570 struct rb_node *p; 571 572 _enter("%p", rx); 573 574 read_lock_bh(&rx->call_lock); 575 576 /* mark all the calls as no longer wanting incoming packets */ 577 for (p = rb_first(&rx->calls); p; p = rb_next(p)) { 578 call = rb_entry(p, struct rxrpc_call, sock_node); 579 rxrpc_mark_call_released(call); 580 } 581 582 /* kill the not-yet-accepted incoming calls */ 583 list_for_each_entry(call, &rx->secureq, accept_link) { 584 rxrpc_mark_call_released(call); 585 } 586 587 list_for_each_entry(call, &rx->acceptq, accept_link) { 588 rxrpc_mark_call_released(call); 589 } 590 591 read_unlock_bh(&rx->call_lock); 592 _leave(""); 593 } 594 595 /* 596 * release a call 597 */ 598 void __rxrpc_put_call(struct rxrpc_call *call) 599 { 600 ASSERT(call != NULL); 601 602 _enter("%p{u=%d}", call, atomic_read(&call->usage)); 603 604 ASSERTCMP(atomic_read(&call->usage), >, 0); 605 606 if (atomic_dec_and_test(&call->usage)) { 607 _debug("call %d dead", call->debug_id); 608 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 609 rxrpc_queue_work(&call->destroyer); 610 } 611 _leave(""); 612 } 613 614 /* 615 * Final call destruction under RCU. 616 */ 617 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 618 { 619 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 620 621 rxrpc_purge_queue(&call->rx_queue); 622 kmem_cache_free(rxrpc_call_jar, call); 623 } 624 625 /* 626 * clean up a call 627 */ 628 static void rxrpc_cleanup_call(struct rxrpc_call *call) 629 { 630 _net("DESTROY CALL %d", call->debug_id); 631 632 ASSERT(call->socket); 633 634 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 635 636 del_timer_sync(&call->lifetimer); 637 del_timer_sync(&call->deadspan); 638 del_timer_sync(&call->ack_timer); 639 del_timer_sync(&call->resend_timer); 640 641 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 642 ASSERTCMP(call->events, ==, 0); 643 if (work_pending(&call->processor)) { 644 _debug("defer destroy"); 645 rxrpc_queue_work(&call->destroyer); 646 return; 647 } 648 649 ASSERTCMP(call->conn, ==, NULL); 650 651 if (call->acks_window) { 652 _debug("kill Tx window %d", 653 CIRC_CNT(call->acks_head, call->acks_tail, 654 call->acks_winsz)); 655 smp_mb(); 656 while (CIRC_CNT(call->acks_head, call->acks_tail, 657 call->acks_winsz) > 0) { 658 struct rxrpc_skb_priv *sp; 659 unsigned long _skb; 660 661 _skb = call->acks_window[call->acks_tail] & ~1; 662 sp = rxrpc_skb((struct sk_buff *)_skb); 663 _debug("+++ clear Tx %u", sp->hdr.seq); 664 rxrpc_free_skb((struct sk_buff *)_skb); 665 call->acks_tail = 666 (call->acks_tail + 1) & (call->acks_winsz - 1); 667 } 668 669 kfree(call->acks_window); 670 } 671 672 rxrpc_free_skb(call->tx_pending); 673 674 rxrpc_purge_queue(&call->rx_queue); 675 ASSERT(skb_queue_empty(&call->rx_oos_queue)); 676 sock_put(&call->socket->sk); 677 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 678 } 679 680 /* 681 * destroy a call 682 */ 683 static void rxrpc_destroy_call(struct work_struct *work) 684 { 685 struct rxrpc_call *call = 686 container_of(work, struct rxrpc_call, destroyer); 687 688 _enter("%p{%d,%d,%p}", 689 call, atomic_read(&call->usage), call->channel, call->conn); 690 691 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 692 693 write_lock_bh(&rxrpc_call_lock); 694 list_del_init(&call->link); 695 write_unlock_bh(&rxrpc_call_lock); 696 697 rxrpc_cleanup_call(call); 698 _leave(""); 699 } 700 701 /* 702 * preemptively destroy all the call records from a transport endpoint rather 703 * than waiting for them to time out 704 */ 705 void __exit rxrpc_destroy_all_calls(void) 706 { 707 struct rxrpc_call *call; 708 709 _enter(""); 710 write_lock_bh(&rxrpc_call_lock); 711 712 while (!list_empty(&rxrpc_calls)) { 713 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); 714 _debug("Zapping call %p", call); 715 716 list_del_init(&call->link); 717 718 switch (atomic_read(&call->usage)) { 719 case 0: 720 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 721 break; 722 case 1: 723 if (del_timer_sync(&call->deadspan) != 0 && 724 call->state != RXRPC_CALL_DEAD) 725 rxrpc_dead_call_expired((unsigned long) call); 726 if (call->state != RXRPC_CALL_DEAD) 727 break; 728 default: 729 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n", 730 call, atomic_read(&call->usage), 731 atomic_read(&call->ackr_not_idle), 732 rxrpc_call_states[call->state], 733 call->flags, call->events); 734 if (!skb_queue_empty(&call->rx_queue)) 735 pr_err("Rx queue occupied\n"); 736 if (!skb_queue_empty(&call->rx_oos_queue)) 737 pr_err("OOS queue occupied\n"); 738 break; 739 } 740 741 write_unlock_bh(&rxrpc_call_lock); 742 cond_resched(); 743 write_lock_bh(&rxrpc_call_lock); 744 } 745 746 write_unlock_bh(&rxrpc_call_lock); 747 _leave(""); 748 } 749 750 /* 751 * handle call lifetime being exceeded 752 */ 753 static void rxrpc_call_life_expired(unsigned long _call) 754 { 755 struct rxrpc_call *call = (struct rxrpc_call *) _call; 756 757 if (call->state >= RXRPC_CALL_COMPLETE) 758 return; 759 760 _enter("{%d}", call->debug_id); 761 read_lock_bh(&call->state_lock); 762 if (call->state < RXRPC_CALL_COMPLETE) { 763 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events); 764 rxrpc_queue_call(call); 765 } 766 read_unlock_bh(&call->state_lock); 767 } 768 769 /* 770 * handle resend timer expiry 771 * - may not take call->state_lock as this can deadlock against del_timer_sync() 772 */ 773 static void rxrpc_resend_time_expired(unsigned long _call) 774 { 775 struct rxrpc_call *call = (struct rxrpc_call *) _call; 776 777 _enter("{%d}", call->debug_id); 778 779 if (call->state >= RXRPC_CALL_COMPLETE) 780 return; 781 782 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 783 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events)) 784 rxrpc_queue_call(call); 785 } 786 787 /* 788 * handle ACK timer expiry 789 */ 790 static void rxrpc_ack_time_expired(unsigned long _call) 791 { 792 struct rxrpc_call *call = (struct rxrpc_call *) _call; 793 794 _enter("{%d}", call->debug_id); 795 796 if (call->state >= RXRPC_CALL_COMPLETE) 797 return; 798 799 read_lock_bh(&call->state_lock); 800 if (call->state < RXRPC_CALL_COMPLETE && 801 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) 802 rxrpc_queue_call(call); 803 read_unlock_bh(&call->state_lock); 804 } 805