1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* incoming call handling 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/net.h> 12 #include <linux/skbuff.h> 13 #include <linux/errqueue.h> 14 #include <linux/udp.h> 15 #include <linux/in.h> 16 #include <linux/in6.h> 17 #include <linux/icmp.h> 18 #include <linux/gfp.h> 19 #include <linux/circ_buf.h> 20 #include <net/sock.h> 21 #include <net/af_rxrpc.h> 22 #include <net/ip.h> 23 #include "ar-internal.h" 24 25 /* 26 * Preallocate a single service call, connection and peer and, if possible, 27 * give them a user ID and attach the user's side of the ID to them. 28 */ 29 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, 30 struct rxrpc_backlog *b, 31 rxrpc_notify_rx_t notify_rx, 32 rxrpc_user_attach_call_t user_attach_call, 33 unsigned long user_call_ID, gfp_t gfp, 34 unsigned int debug_id) 35 { 36 const void *here = __builtin_return_address(0); 37 struct rxrpc_call *call; 38 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 39 int max, tmp; 40 unsigned int size = RXRPC_BACKLOG_MAX; 41 unsigned int head, tail, call_head, call_tail; 42 43 max = rx->sk.sk_max_ack_backlog; 44 tmp = rx->sk.sk_ack_backlog; 45 if (tmp >= max) { 46 _leave(" = -ENOBUFS [full %u]", max); 47 return -ENOBUFS; 48 } 49 max -= tmp; 50 51 /* We don't need more conns and peers than we have calls, but on the 52 * other hand, we shouldn't ever use more peers than conns or conns 53 * than calls. 54 */ 55 call_head = b->call_backlog_head; 56 call_tail = READ_ONCE(b->call_backlog_tail); 57 tmp = CIRC_CNT(call_head, call_tail, size); 58 if (tmp >= max) { 59 _leave(" = -ENOBUFS [enough %u]", tmp); 60 return -ENOBUFS; 61 } 62 max = tmp + 1; 63 64 head = b->peer_backlog_head; 65 tail = READ_ONCE(b->peer_backlog_tail); 66 if (CIRC_CNT(head, tail, size) < max) { 67 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); 68 if (!peer) 69 return -ENOMEM; 70 b->peer_backlog[head] = peer; 71 smp_store_release(&b->peer_backlog_head, 72 (head + 1) & (size - 1)); 73 } 74 75 head = b->conn_backlog_head; 76 tail = READ_ONCE(b->conn_backlog_tail); 77 if (CIRC_CNT(head, tail, size) < max) { 78 struct rxrpc_connection *conn; 79 80 conn = rxrpc_prealloc_service_connection(rxnet, gfp); 81 if (!conn) 82 return -ENOMEM; 83 b->conn_backlog[head] = conn; 84 smp_store_release(&b->conn_backlog_head, 85 (head + 1) & (size - 1)); 86 87 trace_rxrpc_conn(conn, rxrpc_conn_new_service, 88 atomic_read(&conn->usage), here); 89 } 90 91 /* Now it gets complicated, because calls get registered with the 92 * socket here, particularly if a user ID is preassigned by the user. 93 */ 94 call = rxrpc_alloc_call(rx, gfp, debug_id); 95 if (!call) 96 return -ENOMEM; 97 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); 98 call->state = RXRPC_CALL_SERVER_PREALLOC; 99 100 trace_rxrpc_call(call, rxrpc_call_new_service, 101 atomic_read(&call->usage), 102 here, (const void *)user_call_ID); 103 104 write_lock(&rx->call_lock); 105 if (user_attach_call) { 106 struct rxrpc_call *xcall; 107 struct rb_node *parent, **pp; 108 109 /* Check the user ID isn't already in use */ 110 pp = &rx->calls.rb_node; 111 parent = NULL; 112 while (*pp) { 113 parent = *pp; 114 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 115 if (user_call_ID < xcall->user_call_ID) 116 pp = &(*pp)->rb_left; 117 else if (user_call_ID > xcall->user_call_ID) 118 pp = &(*pp)->rb_right; 119 else 120 goto id_in_use; 121 } 122 123 call->user_call_ID = user_call_ID; 124 call->notify_rx = notify_rx; 125 rxrpc_get_call(call, rxrpc_call_got_kernel); 126 user_attach_call(call, user_call_ID); 127 rxrpc_get_call(call, rxrpc_call_got_userid); 128 rb_link_node(&call->sock_node, parent, pp); 129 rb_insert_color(&call->sock_node, &rx->calls); 130 set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 131 } 132 133 list_add(&call->sock_link, &rx->sock_calls); 134 135 write_unlock(&rx->call_lock); 136 137 rxnet = call->rxnet; 138 write_lock(&rxnet->call_lock); 139 list_add_tail(&call->link, &rxnet->calls); 140 write_unlock(&rxnet->call_lock); 141 142 b->call_backlog[call_head] = call; 143 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); 144 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); 145 return 0; 146 147 id_in_use: 148 write_unlock(&rx->call_lock); 149 rxrpc_cleanup_call(call); 150 _leave(" = -EBADSLT"); 151 return -EBADSLT; 152 } 153 154 /* 155 * Preallocate sufficient service connections, calls and peers to cover the 156 * entire backlog of a socket. When a new call comes in, if we don't have 157 * sufficient of each available, the call gets rejected as busy or ignored. 158 * 159 * The backlog is replenished when a connection is accepted or rejected. 160 */ 161 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) 162 { 163 struct rxrpc_backlog *b = rx->backlog; 164 165 if (!b) { 166 b = kzalloc(sizeof(struct rxrpc_backlog), gfp); 167 if (!b) 168 return -ENOMEM; 169 rx->backlog = b; 170 } 171 172 if (rx->discard_new_call) 173 return 0; 174 175 while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, 176 atomic_inc_return(&rxrpc_debug_id)) == 0) 177 ; 178 179 return 0; 180 } 181 182 /* 183 * Discard the preallocation on a service. 184 */ 185 void rxrpc_discard_prealloc(struct rxrpc_sock *rx) 186 { 187 struct rxrpc_backlog *b = rx->backlog; 188 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 189 unsigned int size = RXRPC_BACKLOG_MAX, head, tail; 190 191 if (!b) 192 return; 193 rx->backlog = NULL; 194 195 /* Make sure that there aren't any incoming calls in progress before we 196 * clear the preallocation buffers. 197 */ 198 spin_lock_bh(&rx->incoming_lock); 199 spin_unlock_bh(&rx->incoming_lock); 200 201 head = b->peer_backlog_head; 202 tail = b->peer_backlog_tail; 203 while (CIRC_CNT(head, tail, size) > 0) { 204 struct rxrpc_peer *peer = b->peer_backlog[tail]; 205 kfree(peer); 206 tail = (tail + 1) & (size - 1); 207 } 208 209 head = b->conn_backlog_head; 210 tail = b->conn_backlog_tail; 211 while (CIRC_CNT(head, tail, size) > 0) { 212 struct rxrpc_connection *conn = b->conn_backlog[tail]; 213 write_lock(&rxnet->conn_lock); 214 list_del(&conn->link); 215 list_del(&conn->proc_link); 216 write_unlock(&rxnet->conn_lock); 217 kfree(conn); 218 if (atomic_dec_and_test(&rxnet->nr_conns)) 219 wake_up_var(&rxnet->nr_conns); 220 tail = (tail + 1) & (size - 1); 221 } 222 223 head = b->call_backlog_head; 224 tail = b->call_backlog_tail; 225 while (CIRC_CNT(head, tail, size) > 0) { 226 struct rxrpc_call *call = b->call_backlog[tail]; 227 rcu_assign_pointer(call->socket, rx); 228 if (rx->discard_new_call) { 229 _debug("discard %lx", call->user_call_ID); 230 rx->discard_new_call(call, call->user_call_ID); 231 rxrpc_put_call(call, rxrpc_call_put_kernel); 232 } 233 rxrpc_call_completed(call); 234 rxrpc_release_call(rx, call); 235 rxrpc_put_call(call, rxrpc_call_put); 236 tail = (tail + 1) & (size - 1); 237 } 238 239 kfree(b); 240 } 241 242 /* 243 * Allocate a new incoming call from the prealloc pool, along with a connection 244 * and a peer as necessary. 245 */ 246 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, 247 struct rxrpc_local *local, 248 struct rxrpc_peer *peer, 249 struct rxrpc_connection *conn, 250 struct sk_buff *skb) 251 { 252 struct rxrpc_backlog *b = rx->backlog; 253 struct rxrpc_call *call; 254 unsigned short call_head, conn_head, peer_head; 255 unsigned short call_tail, conn_tail, peer_tail; 256 unsigned short call_count, conn_count; 257 258 /* #calls >= #conns >= #peers must hold true. */ 259 call_head = smp_load_acquire(&b->call_backlog_head); 260 call_tail = b->call_backlog_tail; 261 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); 262 conn_head = smp_load_acquire(&b->conn_backlog_head); 263 conn_tail = b->conn_backlog_tail; 264 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); 265 ASSERTCMP(conn_count, >=, call_count); 266 peer_head = smp_load_acquire(&b->peer_backlog_head); 267 peer_tail = b->peer_backlog_tail; 268 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, 269 conn_count); 270 271 if (call_count == 0) 272 return NULL; 273 274 if (!conn) { 275 if (peer && !rxrpc_get_peer_maybe(peer)) 276 peer = NULL; 277 if (!peer) { 278 peer = b->peer_backlog[peer_tail]; 279 if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0) 280 return NULL; 281 b->peer_backlog[peer_tail] = NULL; 282 smp_store_release(&b->peer_backlog_tail, 283 (peer_tail + 1) & 284 (RXRPC_BACKLOG_MAX - 1)); 285 286 rxrpc_new_incoming_peer(rx, local, peer); 287 } 288 289 /* Now allocate and set up the connection */ 290 conn = b->conn_backlog[conn_tail]; 291 b->conn_backlog[conn_tail] = NULL; 292 smp_store_release(&b->conn_backlog_tail, 293 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); 294 conn->params.local = rxrpc_get_local(local); 295 conn->params.peer = peer; 296 rxrpc_see_connection(conn); 297 rxrpc_new_incoming_connection(rx, conn, skb); 298 } else { 299 rxrpc_get_connection(conn); 300 } 301 302 /* And now we can allocate and set up a new call */ 303 call = b->call_backlog[call_tail]; 304 b->call_backlog[call_tail] = NULL; 305 smp_store_release(&b->call_backlog_tail, 306 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); 307 308 rxrpc_see_call(call); 309 call->conn = conn; 310 call->peer = rxrpc_get_peer(conn->params.peer); 311 call->cong_cwnd = call->peer->cong_cwnd; 312 return call; 313 } 314 315 /* 316 * Set up a new incoming call. Called in BH context with the RCU read lock 317 * held. 318 * 319 * If this is for a kernel service, when we allocate the call, it will have 320 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the 321 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace 322 * services only have the ref from the backlog buffer. We want to pass this 323 * ref to non-BH context to dispose of. 324 * 325 * If we want to report an error, we mark the skb with the packet type and 326 * abort code and return NULL. 327 * 328 * The call is returned with the user access mutex held. 329 */ 330 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, 331 struct rxrpc_sock *rx, 332 struct sk_buff *skb) 333 { 334 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 335 struct rxrpc_connection *conn; 336 struct rxrpc_peer *peer = NULL; 337 struct rxrpc_call *call; 338 339 _enter(""); 340 341 spin_lock(&rx->incoming_lock); 342 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || 343 rx->sk.sk_state == RXRPC_CLOSE) { 344 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber, 345 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); 346 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 347 skb->priority = RX_INVALID_OPERATION; 348 _leave(" = NULL [close]"); 349 call = NULL; 350 goto out; 351 } 352 353 /* The peer, connection and call may all have sprung into existence due 354 * to a duplicate packet being handled on another CPU in parallel, so 355 * we have to recheck the routing. However, we're now holding 356 * rx->incoming_lock, so the values should remain stable. 357 */ 358 conn = rxrpc_find_connection_rcu(local, skb, &peer); 359 360 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); 361 if (!call) { 362 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; 363 _leave(" = NULL [busy]"); 364 call = NULL; 365 goto out; 366 } 367 368 trace_rxrpc_receive(call, rxrpc_receive_incoming, 369 sp->hdr.serial, sp->hdr.seq); 370 371 /* Lock the call to prevent rxrpc_kernel_send/recv_data() and 372 * sendmsg()/recvmsg() inconveniently stealing the mutex once the 373 * notification is generated. 374 * 375 * The BUG should never happen because the kernel should be well 376 * behaved enough not to access the call before the first notification 377 * event and userspace is prevented from doing so until the state is 378 * appropriate. 379 */ 380 if (!mutex_trylock(&call->user_mutex)) 381 BUG(); 382 383 /* Make the call live. */ 384 rxrpc_incoming_call(rx, call, skb); 385 conn = call->conn; 386 387 if (rx->notify_new_call) 388 rx->notify_new_call(&rx->sk, call, call->user_call_ID); 389 else 390 sk_acceptq_added(&rx->sk); 391 392 spin_lock(&conn->state_lock); 393 switch (conn->state) { 394 case RXRPC_CONN_SERVICE_UNSECURED: 395 conn->state = RXRPC_CONN_SERVICE_CHALLENGING; 396 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); 397 rxrpc_queue_conn(call->conn); 398 break; 399 400 case RXRPC_CONN_SERVICE: 401 write_lock(&call->state_lock); 402 if (call->state < RXRPC_CALL_COMPLETE) { 403 if (rx->discard_new_call) 404 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 405 else 406 call->state = RXRPC_CALL_SERVER_ACCEPTING; 407 } 408 write_unlock(&call->state_lock); 409 break; 410 411 case RXRPC_CONN_REMOTELY_ABORTED: 412 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 413 conn->abort_code, conn->error); 414 break; 415 case RXRPC_CONN_LOCALLY_ABORTED: 416 rxrpc_abort_call("CON", call, sp->hdr.seq, 417 conn->abort_code, conn->error); 418 break; 419 default: 420 BUG(); 421 } 422 spin_unlock(&conn->state_lock); 423 424 if (call->state == RXRPC_CALL_SERVER_ACCEPTING) 425 rxrpc_notify_socket(call); 426 427 /* We have to discard the prealloc queue's ref here and rely on a 428 * combination of the RCU read lock and refs held either by the socket 429 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel 430 * service to prevent the call from being deallocated too early. 431 */ 432 rxrpc_put_call(call, rxrpc_call_put); 433 434 _leave(" = %p{%d}", call, call->debug_id); 435 out: 436 spin_unlock(&rx->incoming_lock); 437 return call; 438 } 439 440 /* 441 * handle acceptance of a call by userspace 442 * - assign the user call ID to the call at the front of the queue 443 * - called with the socket locked. 444 */ 445 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, 446 unsigned long user_call_ID, 447 rxrpc_notify_rx_t notify_rx) 448 __releases(&rx->sk.sk_lock.slock) 449 __acquires(call->user_mutex) 450 { 451 struct rxrpc_call *call; 452 struct rb_node *parent, **pp; 453 int ret; 454 455 _enter(",%lx", user_call_ID); 456 457 ASSERT(!irqs_disabled()); 458 459 write_lock(&rx->call_lock); 460 461 if (list_empty(&rx->to_be_accepted)) { 462 write_unlock(&rx->call_lock); 463 release_sock(&rx->sk); 464 kleave(" = -ENODATA [empty]"); 465 return ERR_PTR(-ENODATA); 466 } 467 468 /* check the user ID isn't already in use */ 469 pp = &rx->calls.rb_node; 470 parent = NULL; 471 while (*pp) { 472 parent = *pp; 473 call = rb_entry(parent, struct rxrpc_call, sock_node); 474 475 if (user_call_ID < call->user_call_ID) 476 pp = &(*pp)->rb_left; 477 else if (user_call_ID > call->user_call_ID) 478 pp = &(*pp)->rb_right; 479 else 480 goto id_in_use; 481 } 482 483 /* Dequeue the first call and check it's still valid. We gain 484 * responsibility for the queue's reference. 485 */ 486 call = list_entry(rx->to_be_accepted.next, 487 struct rxrpc_call, accept_link); 488 write_unlock(&rx->call_lock); 489 490 /* We need to gain the mutex from the interrupt handler without 491 * upsetting lockdep, so we have to release it there and take it here. 492 * We are, however, still holding the socket lock, so other accepts 493 * must wait for us and no one can add the user ID behind our backs. 494 */ 495 if (mutex_lock_interruptible(&call->user_mutex) < 0) { 496 release_sock(&rx->sk); 497 kleave(" = -ERESTARTSYS"); 498 return ERR_PTR(-ERESTARTSYS); 499 } 500 501 write_lock(&rx->call_lock); 502 list_del_init(&call->accept_link); 503 sk_acceptq_removed(&rx->sk); 504 rxrpc_see_call(call); 505 506 /* Find the user ID insertion point. */ 507 pp = &rx->calls.rb_node; 508 parent = NULL; 509 while (*pp) { 510 parent = *pp; 511 call = rb_entry(parent, struct rxrpc_call, sock_node); 512 513 if (user_call_ID < call->user_call_ID) 514 pp = &(*pp)->rb_left; 515 else if (user_call_ID > call->user_call_ID) 516 pp = &(*pp)->rb_right; 517 else 518 BUG(); 519 } 520 521 write_lock_bh(&call->state_lock); 522 switch (call->state) { 523 case RXRPC_CALL_SERVER_ACCEPTING: 524 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 525 break; 526 case RXRPC_CALL_COMPLETE: 527 ret = call->error; 528 goto out_release; 529 default: 530 BUG(); 531 } 532 533 /* formalise the acceptance */ 534 call->notify_rx = notify_rx; 535 call->user_call_ID = user_call_ID; 536 rxrpc_get_call(call, rxrpc_call_got_userid); 537 rb_link_node(&call->sock_node, parent, pp); 538 rb_insert_color(&call->sock_node, &rx->calls); 539 if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) 540 BUG(); 541 542 write_unlock_bh(&call->state_lock); 543 write_unlock(&rx->call_lock); 544 rxrpc_notify_socket(call); 545 rxrpc_service_prealloc(rx, GFP_KERNEL); 546 release_sock(&rx->sk); 547 _leave(" = %p{%d}", call, call->debug_id); 548 return call; 549 550 out_release: 551 _debug("release %p", call); 552 write_unlock_bh(&call->state_lock); 553 write_unlock(&rx->call_lock); 554 rxrpc_release_call(rx, call); 555 rxrpc_put_call(call, rxrpc_call_put); 556 goto out; 557 558 id_in_use: 559 ret = -EBADSLT; 560 write_unlock(&rx->call_lock); 561 out: 562 rxrpc_service_prealloc(rx, GFP_KERNEL); 563 release_sock(&rx->sk); 564 _leave(" = %d", ret); 565 return ERR_PTR(ret); 566 } 567 568 /* 569 * Handle rejection of a call by userspace 570 * - reject the call at the front of the queue 571 */ 572 int rxrpc_reject_call(struct rxrpc_sock *rx) 573 { 574 struct rxrpc_call *call; 575 bool abort = false; 576 int ret; 577 578 _enter(""); 579 580 ASSERT(!irqs_disabled()); 581 582 write_lock(&rx->call_lock); 583 584 if (list_empty(&rx->to_be_accepted)) { 585 write_unlock(&rx->call_lock); 586 return -ENODATA; 587 } 588 589 /* Dequeue the first call and check it's still valid. We gain 590 * responsibility for the queue's reference. 591 */ 592 call = list_entry(rx->to_be_accepted.next, 593 struct rxrpc_call, accept_link); 594 list_del_init(&call->accept_link); 595 sk_acceptq_removed(&rx->sk); 596 rxrpc_see_call(call); 597 598 write_lock_bh(&call->state_lock); 599 switch (call->state) { 600 case RXRPC_CALL_SERVER_ACCEPTING: 601 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); 602 abort = true; 603 /* fall through */ 604 case RXRPC_CALL_COMPLETE: 605 ret = call->error; 606 goto out_discard; 607 default: 608 BUG(); 609 } 610 611 out_discard: 612 write_unlock_bh(&call->state_lock); 613 write_unlock(&rx->call_lock); 614 if (abort) { 615 rxrpc_send_abort_packet(call); 616 rxrpc_release_call(rx, call); 617 rxrpc_put_call(call, rxrpc_call_put); 618 } 619 rxrpc_service_prealloc(rx, GFP_KERNEL); 620 _leave(" = %d", ret); 621 return ret; 622 } 623 624 /* 625 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls 626 * @sock: The socket on which to preallocate 627 * @notify_rx: Event notification function for the call 628 * @user_attach_call: Func to attach call to user_call_ID 629 * @user_call_ID: The tag to attach to the preallocated call 630 * @gfp: The allocation conditions. 631 * @debug_id: The tracing debug ID. 632 * 633 * Charge up the socket with preallocated calls, each with a user ID. A 634 * function should be provided to effect the attachment from the user's side. 635 * The user is given a ref to hold on the call. 636 * 637 * Note that the call may be come connected before this function returns. 638 */ 639 int rxrpc_kernel_charge_accept(struct socket *sock, 640 rxrpc_notify_rx_t notify_rx, 641 rxrpc_user_attach_call_t user_attach_call, 642 unsigned long user_call_ID, gfp_t gfp, 643 unsigned int debug_id) 644 { 645 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 646 struct rxrpc_backlog *b = rx->backlog; 647 648 if (sock->sk->sk_state == RXRPC_CLOSE) 649 return -ESHUTDOWN; 650 651 return rxrpc_service_prealloc_one(rx, b, notify_rx, 652 user_attach_call, user_call_ID, 653 gfp, debug_id); 654 } 655 EXPORT_SYMBOL(rxrpc_kernel_charge_accept); 656