1 /* incoming call handling 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/net.h> 16 #include <linux/skbuff.h> 17 #include <linux/errqueue.h> 18 #include <linux/udp.h> 19 #include <linux/in.h> 20 #include <linux/in6.h> 21 #include <linux/icmp.h> 22 #include <linux/gfp.h> 23 #include <linux/circ_buf.h> 24 #include <net/sock.h> 25 #include <net/af_rxrpc.h> 26 #include <net/ip.h> 27 #include "ar-internal.h" 28 29 /* 30 * Preallocate a single service call, connection and peer and, if possible, 31 * give them a user ID and attach the user's side of the ID to them. 32 */ 33 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, 34 struct rxrpc_backlog *b, 35 rxrpc_notify_rx_t notify_rx, 36 rxrpc_user_attach_call_t user_attach_call, 37 unsigned long user_call_ID, gfp_t gfp, 38 unsigned int debug_id) 39 { 40 const void *here = __builtin_return_address(0); 41 struct rxrpc_call *call; 42 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 43 int max, tmp; 44 unsigned int size = RXRPC_BACKLOG_MAX; 45 unsigned int head, tail, call_head, call_tail; 46 47 max = rx->sk.sk_max_ack_backlog; 48 tmp = rx->sk.sk_ack_backlog; 49 if (tmp >= max) { 50 _leave(" = -ENOBUFS [full %u]", max); 51 return -ENOBUFS; 52 } 53 max -= tmp; 54 55 /* We don't need more conns and peers than we have calls, but on the 56 * other hand, we shouldn't ever use more peers than conns or conns 57 * than calls. 58 */ 59 call_head = b->call_backlog_head; 60 call_tail = READ_ONCE(b->call_backlog_tail); 61 tmp = CIRC_CNT(call_head, call_tail, size); 62 if (tmp >= max) { 63 _leave(" = -ENOBUFS [enough %u]", tmp); 64 return -ENOBUFS; 65 } 66 max = tmp + 1; 67 68 head = b->peer_backlog_head; 69 tail = READ_ONCE(b->peer_backlog_tail); 70 if (CIRC_CNT(head, tail, size) < max) { 71 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); 72 if (!peer) 73 return -ENOMEM; 74 b->peer_backlog[head] = peer; 75 smp_store_release(&b->peer_backlog_head, 76 (head + 1) & (size - 1)); 77 } 78 79 head = b->conn_backlog_head; 80 tail = READ_ONCE(b->conn_backlog_tail); 81 if (CIRC_CNT(head, tail, size) < max) { 82 struct rxrpc_connection *conn; 83 84 conn = rxrpc_prealloc_service_connection(rxnet, gfp); 85 if (!conn) 86 return -ENOMEM; 87 b->conn_backlog[head] = conn; 88 smp_store_release(&b->conn_backlog_head, 89 (head + 1) & (size - 1)); 90 91 trace_rxrpc_conn(conn, rxrpc_conn_new_service, 92 atomic_read(&conn->usage), here); 93 } 94 95 /* Now it gets complicated, because calls get registered with the 96 * socket here, particularly if a user ID is preassigned by the user. 97 */ 98 call = rxrpc_alloc_call(rx, gfp, debug_id); 99 if (!call) 100 return -ENOMEM; 101 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); 102 call->state = RXRPC_CALL_SERVER_PREALLOC; 103 104 trace_rxrpc_call(call, rxrpc_call_new_service, 105 atomic_read(&call->usage), 106 here, (const void *)user_call_ID); 107 108 write_lock(&rx->call_lock); 109 if (user_attach_call) { 110 struct rxrpc_call *xcall; 111 struct rb_node *parent, **pp; 112 113 /* Check the user ID isn't already in use */ 114 pp = &rx->calls.rb_node; 115 parent = NULL; 116 while (*pp) { 117 parent = *pp; 118 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 119 if (user_call_ID < call->user_call_ID) 120 pp = &(*pp)->rb_left; 121 else if (user_call_ID > call->user_call_ID) 122 pp = &(*pp)->rb_right; 123 else 124 goto id_in_use; 125 } 126 127 call->user_call_ID = user_call_ID; 128 call->notify_rx = notify_rx; 129 rxrpc_get_call(call, rxrpc_call_got_kernel); 130 user_attach_call(call, user_call_ID); 131 rxrpc_get_call(call, rxrpc_call_got_userid); 132 rb_link_node(&call->sock_node, parent, pp); 133 rb_insert_color(&call->sock_node, &rx->calls); 134 set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 135 } 136 137 list_add(&call->sock_link, &rx->sock_calls); 138 139 write_unlock(&rx->call_lock); 140 141 rxnet = call->rxnet; 142 write_lock(&rxnet->call_lock); 143 list_add_tail(&call->link, &rxnet->calls); 144 write_unlock(&rxnet->call_lock); 145 146 b->call_backlog[call_head] = call; 147 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); 148 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); 149 return 0; 150 151 id_in_use: 152 write_unlock(&rx->call_lock); 153 rxrpc_cleanup_call(call); 154 _leave(" = -EBADSLT"); 155 return -EBADSLT; 156 } 157 158 /* 159 * Preallocate sufficient service connections, calls and peers to cover the 160 * entire backlog of a socket. When a new call comes in, if we don't have 161 * sufficient of each available, the call gets rejected as busy or ignored. 162 * 163 * The backlog is replenished when a connection is accepted or rejected. 164 */ 165 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) 166 { 167 struct rxrpc_backlog *b = rx->backlog; 168 169 if (!b) { 170 b = kzalloc(sizeof(struct rxrpc_backlog), gfp); 171 if (!b) 172 return -ENOMEM; 173 rx->backlog = b; 174 } 175 176 if (rx->discard_new_call) 177 return 0; 178 179 while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, 180 atomic_inc_return(&rxrpc_debug_id)) == 0) 181 ; 182 183 return 0; 184 } 185 186 /* 187 * Discard the preallocation on a service. 188 */ 189 void rxrpc_discard_prealloc(struct rxrpc_sock *rx) 190 { 191 struct rxrpc_backlog *b = rx->backlog; 192 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 193 unsigned int size = RXRPC_BACKLOG_MAX, head, tail; 194 195 if (!b) 196 return; 197 rx->backlog = NULL; 198 199 /* Make sure that there aren't any incoming calls in progress before we 200 * clear the preallocation buffers. 201 */ 202 spin_lock_bh(&rx->incoming_lock); 203 spin_unlock_bh(&rx->incoming_lock); 204 205 head = b->peer_backlog_head; 206 tail = b->peer_backlog_tail; 207 while (CIRC_CNT(head, tail, size) > 0) { 208 struct rxrpc_peer *peer = b->peer_backlog[tail]; 209 kfree(peer); 210 tail = (tail + 1) & (size - 1); 211 } 212 213 head = b->conn_backlog_head; 214 tail = b->conn_backlog_tail; 215 while (CIRC_CNT(head, tail, size) > 0) { 216 struct rxrpc_connection *conn = b->conn_backlog[tail]; 217 write_lock(&rxnet->conn_lock); 218 list_del(&conn->link); 219 list_del(&conn->proc_link); 220 write_unlock(&rxnet->conn_lock); 221 kfree(conn); 222 if (atomic_dec_and_test(&rxnet->nr_conns)) 223 wake_up_var(&rxnet->nr_conns); 224 tail = (tail + 1) & (size - 1); 225 } 226 227 head = b->call_backlog_head; 228 tail = b->call_backlog_tail; 229 while (CIRC_CNT(head, tail, size) > 0) { 230 struct rxrpc_call *call = b->call_backlog[tail]; 231 rcu_assign_pointer(call->socket, rx); 232 if (rx->discard_new_call) { 233 _debug("discard %lx", call->user_call_ID); 234 rx->discard_new_call(call, call->user_call_ID); 235 rxrpc_put_call(call, rxrpc_call_put_kernel); 236 } 237 rxrpc_call_completed(call); 238 rxrpc_release_call(rx, call); 239 rxrpc_put_call(call, rxrpc_call_put); 240 tail = (tail + 1) & (size - 1); 241 } 242 243 kfree(b); 244 } 245 246 /* 247 * Allocate a new incoming call from the prealloc pool, along with a connection 248 * and a peer as necessary. 249 */ 250 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, 251 struct rxrpc_local *local, 252 struct rxrpc_connection *conn, 253 struct sk_buff *skb) 254 { 255 struct rxrpc_backlog *b = rx->backlog; 256 struct rxrpc_peer *peer, *xpeer; 257 struct rxrpc_call *call; 258 unsigned short call_head, conn_head, peer_head; 259 unsigned short call_tail, conn_tail, peer_tail; 260 unsigned short call_count, conn_count; 261 262 /* #calls >= #conns >= #peers must hold true. */ 263 call_head = smp_load_acquire(&b->call_backlog_head); 264 call_tail = b->call_backlog_tail; 265 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); 266 conn_head = smp_load_acquire(&b->conn_backlog_head); 267 conn_tail = b->conn_backlog_tail; 268 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); 269 ASSERTCMP(conn_count, >=, call_count); 270 peer_head = smp_load_acquire(&b->peer_backlog_head); 271 peer_tail = b->peer_backlog_tail; 272 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, 273 conn_count); 274 275 if (call_count == 0) 276 return NULL; 277 278 if (!conn) { 279 /* No connection. We're going to need a peer to start off 280 * with. If one doesn't yet exist, use a spare from the 281 * preallocation set. We dump the address into the spare in 282 * anticipation - and to save on stack space. 283 */ 284 xpeer = b->peer_backlog[peer_tail]; 285 if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0) 286 return NULL; 287 288 peer = rxrpc_lookup_incoming_peer(local, xpeer); 289 if (peer == xpeer) { 290 b->peer_backlog[peer_tail] = NULL; 291 smp_store_release(&b->peer_backlog_tail, 292 (peer_tail + 1) & 293 (RXRPC_BACKLOG_MAX - 1)); 294 } 295 296 /* Now allocate and set up the connection */ 297 conn = b->conn_backlog[conn_tail]; 298 b->conn_backlog[conn_tail] = NULL; 299 smp_store_release(&b->conn_backlog_tail, 300 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); 301 conn->params.local = rxrpc_get_local(local); 302 conn->params.peer = peer; 303 rxrpc_see_connection(conn); 304 rxrpc_new_incoming_connection(rx, conn, skb); 305 } else { 306 rxrpc_get_connection(conn); 307 } 308 309 /* And now we can allocate and set up a new call */ 310 call = b->call_backlog[call_tail]; 311 b->call_backlog[call_tail] = NULL; 312 smp_store_release(&b->call_backlog_tail, 313 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); 314 315 rxrpc_see_call(call); 316 call->conn = conn; 317 call->peer = rxrpc_get_peer(conn->params.peer); 318 call->cong_cwnd = call->peer->cong_cwnd; 319 return call; 320 } 321 322 /* 323 * Set up a new incoming call. Called in BH context with the RCU read lock 324 * held. 325 * 326 * If this is for a kernel service, when we allocate the call, it will have 327 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the 328 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace 329 * services only have the ref from the backlog buffer. We want to pass this 330 * ref to non-BH context to dispose of. 331 * 332 * If we want to report an error, we mark the skb with the packet type and 333 * abort code and return NULL. 334 * 335 * The call is returned with the user access mutex held. 336 */ 337 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, 338 struct rxrpc_connection *conn, 339 struct sk_buff *skb) 340 { 341 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 342 struct rxrpc_sock *rx; 343 struct rxrpc_call *call; 344 u16 service_id = sp->hdr.serviceId; 345 346 _enter(""); 347 348 /* Get the socket providing the service */ 349 rx = rcu_dereference(local->service); 350 if (rx && (service_id == rx->srx.srx_service || 351 service_id == rx->second_service)) 352 goto found_service; 353 354 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 355 RX_INVALID_OPERATION, EOPNOTSUPP); 356 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; 357 skb->priority = RX_INVALID_OPERATION; 358 _leave(" = NULL [service]"); 359 return NULL; 360 361 found_service: 362 spin_lock(&rx->incoming_lock); 363 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || 364 rx->sk.sk_state == RXRPC_CLOSE) { 365 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber, 366 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); 367 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; 368 skb->priority = RX_INVALID_OPERATION; 369 _leave(" = NULL [close]"); 370 call = NULL; 371 goto out; 372 } 373 374 call = rxrpc_alloc_incoming_call(rx, local, conn, skb); 375 if (!call) { 376 skb->mark = RXRPC_SKB_MARK_BUSY; 377 _leave(" = NULL [busy]"); 378 call = NULL; 379 goto out; 380 } 381 382 trace_rxrpc_receive(call, rxrpc_receive_incoming, 383 sp->hdr.serial, sp->hdr.seq); 384 385 /* Lock the call to prevent rxrpc_kernel_send/recv_data() and 386 * sendmsg()/recvmsg() inconveniently stealing the mutex once the 387 * notification is generated. 388 * 389 * The BUG should never happen because the kernel should be well 390 * behaved enough not to access the call before the first notification 391 * event and userspace is prevented from doing so until the state is 392 * appropriate. 393 */ 394 if (!mutex_trylock(&call->user_mutex)) 395 BUG(); 396 397 /* Make the call live. */ 398 rxrpc_incoming_call(rx, call, skb); 399 conn = call->conn; 400 401 if (rx->notify_new_call) 402 rx->notify_new_call(&rx->sk, call, call->user_call_ID); 403 else 404 sk_acceptq_added(&rx->sk); 405 406 spin_lock(&conn->state_lock); 407 switch (conn->state) { 408 case RXRPC_CONN_SERVICE_UNSECURED: 409 conn->state = RXRPC_CONN_SERVICE_CHALLENGING; 410 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); 411 rxrpc_queue_conn(call->conn); 412 break; 413 414 case RXRPC_CONN_SERVICE: 415 write_lock(&call->state_lock); 416 if (rx->discard_new_call) 417 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 418 else 419 call->state = RXRPC_CALL_SERVER_ACCEPTING; 420 write_unlock(&call->state_lock); 421 break; 422 423 case RXRPC_CONN_REMOTELY_ABORTED: 424 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 425 conn->remote_abort, -ECONNABORTED); 426 break; 427 case RXRPC_CONN_LOCALLY_ABORTED: 428 rxrpc_abort_call("CON", call, sp->hdr.seq, 429 conn->local_abort, -ECONNABORTED); 430 break; 431 default: 432 BUG(); 433 } 434 spin_unlock(&conn->state_lock); 435 436 if (call->state == RXRPC_CALL_SERVER_ACCEPTING) 437 rxrpc_notify_socket(call); 438 439 /* We have to discard the prealloc queue's ref here and rely on a 440 * combination of the RCU read lock and refs held either by the socket 441 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel 442 * service to prevent the call from being deallocated too early. 443 */ 444 rxrpc_put_call(call, rxrpc_call_put); 445 446 _leave(" = %p{%d}", call, call->debug_id); 447 out: 448 spin_unlock(&rx->incoming_lock); 449 return call; 450 } 451 452 /* 453 * handle acceptance of a call by userspace 454 * - assign the user call ID to the call at the front of the queue 455 * - called with the socket locked. 456 */ 457 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, 458 unsigned long user_call_ID, 459 rxrpc_notify_rx_t notify_rx) 460 __releases(&rx->sk.sk_lock.slock) 461 __acquires(call->user_mutex) 462 { 463 struct rxrpc_call *call; 464 struct rb_node *parent, **pp; 465 int ret; 466 467 _enter(",%lx", user_call_ID); 468 469 ASSERT(!irqs_disabled()); 470 471 write_lock(&rx->call_lock); 472 473 if (list_empty(&rx->to_be_accepted)) { 474 write_unlock(&rx->call_lock); 475 release_sock(&rx->sk); 476 kleave(" = -ENODATA [empty]"); 477 return ERR_PTR(-ENODATA); 478 } 479 480 /* check the user ID isn't already in use */ 481 pp = &rx->calls.rb_node; 482 parent = NULL; 483 while (*pp) { 484 parent = *pp; 485 call = rb_entry(parent, struct rxrpc_call, sock_node); 486 487 if (user_call_ID < call->user_call_ID) 488 pp = &(*pp)->rb_left; 489 else if (user_call_ID > call->user_call_ID) 490 pp = &(*pp)->rb_right; 491 else 492 goto id_in_use; 493 } 494 495 /* Dequeue the first call and check it's still valid. We gain 496 * responsibility for the queue's reference. 497 */ 498 call = list_entry(rx->to_be_accepted.next, 499 struct rxrpc_call, accept_link); 500 write_unlock(&rx->call_lock); 501 502 /* We need to gain the mutex from the interrupt handler without 503 * upsetting lockdep, so we have to release it there and take it here. 504 * We are, however, still holding the socket lock, so other accepts 505 * must wait for us and no one can add the user ID behind our backs. 506 */ 507 if (mutex_lock_interruptible(&call->user_mutex) < 0) { 508 release_sock(&rx->sk); 509 kleave(" = -ERESTARTSYS"); 510 return ERR_PTR(-ERESTARTSYS); 511 } 512 513 write_lock(&rx->call_lock); 514 list_del_init(&call->accept_link); 515 sk_acceptq_removed(&rx->sk); 516 rxrpc_see_call(call); 517 518 /* Find the user ID insertion point. */ 519 pp = &rx->calls.rb_node; 520 parent = NULL; 521 while (*pp) { 522 parent = *pp; 523 call = rb_entry(parent, struct rxrpc_call, sock_node); 524 525 if (user_call_ID < call->user_call_ID) 526 pp = &(*pp)->rb_left; 527 else if (user_call_ID > call->user_call_ID) 528 pp = &(*pp)->rb_right; 529 else 530 BUG(); 531 } 532 533 write_lock_bh(&call->state_lock); 534 switch (call->state) { 535 case RXRPC_CALL_SERVER_ACCEPTING: 536 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 537 break; 538 case RXRPC_CALL_COMPLETE: 539 ret = call->error; 540 goto out_release; 541 default: 542 BUG(); 543 } 544 545 /* formalise the acceptance */ 546 call->notify_rx = notify_rx; 547 call->user_call_ID = user_call_ID; 548 rxrpc_get_call(call, rxrpc_call_got_userid); 549 rb_link_node(&call->sock_node, parent, pp); 550 rb_insert_color(&call->sock_node, &rx->calls); 551 if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) 552 BUG(); 553 554 write_unlock_bh(&call->state_lock); 555 write_unlock(&rx->call_lock); 556 rxrpc_notify_socket(call); 557 rxrpc_service_prealloc(rx, GFP_KERNEL); 558 release_sock(&rx->sk); 559 _leave(" = %p{%d}", call, call->debug_id); 560 return call; 561 562 out_release: 563 _debug("release %p", call); 564 write_unlock_bh(&call->state_lock); 565 write_unlock(&rx->call_lock); 566 rxrpc_release_call(rx, call); 567 rxrpc_put_call(call, rxrpc_call_put); 568 goto out; 569 570 id_in_use: 571 ret = -EBADSLT; 572 write_unlock(&rx->call_lock); 573 out: 574 rxrpc_service_prealloc(rx, GFP_KERNEL); 575 release_sock(&rx->sk); 576 _leave(" = %d", ret); 577 return ERR_PTR(ret); 578 } 579 580 /* 581 * Handle rejection of a call by userspace 582 * - reject the call at the front of the queue 583 */ 584 int rxrpc_reject_call(struct rxrpc_sock *rx) 585 { 586 struct rxrpc_call *call; 587 bool abort = false; 588 int ret; 589 590 _enter(""); 591 592 ASSERT(!irqs_disabled()); 593 594 write_lock(&rx->call_lock); 595 596 if (list_empty(&rx->to_be_accepted)) { 597 write_unlock(&rx->call_lock); 598 return -ENODATA; 599 } 600 601 /* Dequeue the first call and check it's still valid. We gain 602 * responsibility for the queue's reference. 603 */ 604 call = list_entry(rx->to_be_accepted.next, 605 struct rxrpc_call, accept_link); 606 list_del_init(&call->accept_link); 607 sk_acceptq_removed(&rx->sk); 608 rxrpc_see_call(call); 609 610 write_lock_bh(&call->state_lock); 611 switch (call->state) { 612 case RXRPC_CALL_SERVER_ACCEPTING: 613 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); 614 abort = true; 615 /* fall through */ 616 case RXRPC_CALL_COMPLETE: 617 ret = call->error; 618 goto out_discard; 619 default: 620 BUG(); 621 } 622 623 out_discard: 624 write_unlock_bh(&call->state_lock); 625 write_unlock(&rx->call_lock); 626 if (abort) { 627 rxrpc_send_abort_packet(call); 628 rxrpc_release_call(rx, call); 629 rxrpc_put_call(call, rxrpc_call_put); 630 } 631 rxrpc_service_prealloc(rx, GFP_KERNEL); 632 _leave(" = %d", ret); 633 return ret; 634 } 635 636 /* 637 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls 638 * @sock: The socket on which to preallocate 639 * @notify_rx: Event notification function for the call 640 * @user_attach_call: Func to attach call to user_call_ID 641 * @user_call_ID: The tag to attach to the preallocated call 642 * @gfp: The allocation conditions. 643 * @debug_id: The tracing debug ID. 644 * 645 * Charge up the socket with preallocated calls, each with a user ID. A 646 * function should be provided to effect the attachment from the user's side. 647 * The user is given a ref to hold on the call. 648 * 649 * Note that the call may be come connected before this function returns. 650 */ 651 int rxrpc_kernel_charge_accept(struct socket *sock, 652 rxrpc_notify_rx_t notify_rx, 653 rxrpc_user_attach_call_t user_attach_call, 654 unsigned long user_call_ID, gfp_t gfp, 655 unsigned int debug_id) 656 { 657 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 658 struct rxrpc_backlog *b = rx->backlog; 659 660 if (sock->sk->sk_state == RXRPC_CLOSE) 661 return -ESHUTDOWN; 662 663 return rxrpc_service_prealloc_one(rx, b, notify_rx, 664 user_attach_call, user_call_ID, 665 gfp, debug_id); 666 } 667 EXPORT_SYMBOL(rxrpc_kernel_charge_accept); 668