xref: /openbmc/linux/net/rxrpc/call_accept.c (revision 20e2fc42)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* incoming call handling
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
14 #include <linux/udp.h>
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/icmp.h>
18 #include <linux/gfp.h>
19 #include <linux/circ_buf.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include <net/ip.h>
23 #include "ar-internal.h"
24 
25 /*
26  * Preallocate a single service call, connection and peer and, if possible,
27  * give them a user ID and attach the user's side of the ID to them.
28  */
29 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
30 				      struct rxrpc_backlog *b,
31 				      rxrpc_notify_rx_t notify_rx,
32 				      rxrpc_user_attach_call_t user_attach_call,
33 				      unsigned long user_call_ID, gfp_t gfp,
34 				      unsigned int debug_id)
35 {
36 	const void *here = __builtin_return_address(0);
37 	struct rxrpc_call *call;
38 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
39 	int max, tmp;
40 	unsigned int size = RXRPC_BACKLOG_MAX;
41 	unsigned int head, tail, call_head, call_tail;
42 
43 	max = rx->sk.sk_max_ack_backlog;
44 	tmp = rx->sk.sk_ack_backlog;
45 	if (tmp >= max) {
46 		_leave(" = -ENOBUFS [full %u]", max);
47 		return -ENOBUFS;
48 	}
49 	max -= tmp;
50 
51 	/* We don't need more conns and peers than we have calls, but on the
52 	 * other hand, we shouldn't ever use more peers than conns or conns
53 	 * than calls.
54 	 */
55 	call_head = b->call_backlog_head;
56 	call_tail = READ_ONCE(b->call_backlog_tail);
57 	tmp = CIRC_CNT(call_head, call_tail, size);
58 	if (tmp >= max) {
59 		_leave(" = -ENOBUFS [enough %u]", tmp);
60 		return -ENOBUFS;
61 	}
62 	max = tmp + 1;
63 
64 	head = b->peer_backlog_head;
65 	tail = READ_ONCE(b->peer_backlog_tail);
66 	if (CIRC_CNT(head, tail, size) < max) {
67 		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
68 		if (!peer)
69 			return -ENOMEM;
70 		b->peer_backlog[head] = peer;
71 		smp_store_release(&b->peer_backlog_head,
72 				  (head + 1) & (size - 1));
73 	}
74 
75 	head = b->conn_backlog_head;
76 	tail = READ_ONCE(b->conn_backlog_tail);
77 	if (CIRC_CNT(head, tail, size) < max) {
78 		struct rxrpc_connection *conn;
79 
80 		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
81 		if (!conn)
82 			return -ENOMEM;
83 		b->conn_backlog[head] = conn;
84 		smp_store_release(&b->conn_backlog_head,
85 				  (head + 1) & (size - 1));
86 
87 		trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
88 				 atomic_read(&conn->usage), here);
89 	}
90 
91 	/* Now it gets complicated, because calls get registered with the
92 	 * socket here, particularly if a user ID is preassigned by the user.
93 	 */
94 	call = rxrpc_alloc_call(rx, gfp, debug_id);
95 	if (!call)
96 		return -ENOMEM;
97 	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
98 	call->state = RXRPC_CALL_SERVER_PREALLOC;
99 
100 	trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
101 			 atomic_read(&call->usage),
102 			 here, (const void *)user_call_ID);
103 
104 	write_lock(&rx->call_lock);
105 	if (user_attach_call) {
106 		struct rxrpc_call *xcall;
107 		struct rb_node *parent, **pp;
108 
109 		/* Check the user ID isn't already in use */
110 		pp = &rx->calls.rb_node;
111 		parent = NULL;
112 		while (*pp) {
113 			parent = *pp;
114 			xcall = rb_entry(parent, struct rxrpc_call, sock_node);
115 			if (user_call_ID < xcall->user_call_ID)
116 				pp = &(*pp)->rb_left;
117 			else if (user_call_ID > xcall->user_call_ID)
118 				pp = &(*pp)->rb_right;
119 			else
120 				goto id_in_use;
121 		}
122 
123 		call->user_call_ID = user_call_ID;
124 		call->notify_rx = notify_rx;
125 		rxrpc_get_call(call, rxrpc_call_got_kernel);
126 		user_attach_call(call, user_call_ID);
127 		rxrpc_get_call(call, rxrpc_call_got_userid);
128 		rb_link_node(&call->sock_node, parent, pp);
129 		rb_insert_color(&call->sock_node, &rx->calls);
130 		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
131 	}
132 
133 	list_add(&call->sock_link, &rx->sock_calls);
134 
135 	write_unlock(&rx->call_lock);
136 
137 	rxnet = call->rxnet;
138 	write_lock(&rxnet->call_lock);
139 	list_add_tail(&call->link, &rxnet->calls);
140 	write_unlock(&rxnet->call_lock);
141 
142 	b->call_backlog[call_head] = call;
143 	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
144 	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
145 	return 0;
146 
147 id_in_use:
148 	write_unlock(&rx->call_lock);
149 	rxrpc_cleanup_call(call);
150 	_leave(" = -EBADSLT");
151 	return -EBADSLT;
152 }
153 
154 /*
155  * Preallocate sufficient service connections, calls and peers to cover the
156  * entire backlog of a socket.  When a new call comes in, if we don't have
157  * sufficient of each available, the call gets rejected as busy or ignored.
158  *
159  * The backlog is replenished when a connection is accepted or rejected.
160  */
161 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
162 {
163 	struct rxrpc_backlog *b = rx->backlog;
164 
165 	if (!b) {
166 		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
167 		if (!b)
168 			return -ENOMEM;
169 		rx->backlog = b;
170 	}
171 
172 	if (rx->discard_new_call)
173 		return 0;
174 
175 	while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
176 					  atomic_inc_return(&rxrpc_debug_id)) == 0)
177 		;
178 
179 	return 0;
180 }
181 
182 /*
183  * Discard the preallocation on a service.
184  */
185 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
186 {
187 	struct rxrpc_backlog *b = rx->backlog;
188 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
189 	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
190 
191 	if (!b)
192 		return;
193 	rx->backlog = NULL;
194 
195 	/* Make sure that there aren't any incoming calls in progress before we
196 	 * clear the preallocation buffers.
197 	 */
198 	spin_lock_bh(&rx->incoming_lock);
199 	spin_unlock_bh(&rx->incoming_lock);
200 
201 	head = b->peer_backlog_head;
202 	tail = b->peer_backlog_tail;
203 	while (CIRC_CNT(head, tail, size) > 0) {
204 		struct rxrpc_peer *peer = b->peer_backlog[tail];
205 		kfree(peer);
206 		tail = (tail + 1) & (size - 1);
207 	}
208 
209 	head = b->conn_backlog_head;
210 	tail = b->conn_backlog_tail;
211 	while (CIRC_CNT(head, tail, size) > 0) {
212 		struct rxrpc_connection *conn = b->conn_backlog[tail];
213 		write_lock(&rxnet->conn_lock);
214 		list_del(&conn->link);
215 		list_del(&conn->proc_link);
216 		write_unlock(&rxnet->conn_lock);
217 		kfree(conn);
218 		if (atomic_dec_and_test(&rxnet->nr_conns))
219 			wake_up_var(&rxnet->nr_conns);
220 		tail = (tail + 1) & (size - 1);
221 	}
222 
223 	head = b->call_backlog_head;
224 	tail = b->call_backlog_tail;
225 	while (CIRC_CNT(head, tail, size) > 0) {
226 		struct rxrpc_call *call = b->call_backlog[tail];
227 		rcu_assign_pointer(call->socket, rx);
228 		if (rx->discard_new_call) {
229 			_debug("discard %lx", call->user_call_ID);
230 			rx->discard_new_call(call, call->user_call_ID);
231 			rxrpc_put_call(call, rxrpc_call_put_kernel);
232 		}
233 		rxrpc_call_completed(call);
234 		rxrpc_release_call(rx, call);
235 		rxrpc_put_call(call, rxrpc_call_put);
236 		tail = (tail + 1) & (size - 1);
237 	}
238 
239 	kfree(b);
240 }
241 
242 /*
243  * Allocate a new incoming call from the prealloc pool, along with a connection
244  * and a peer as necessary.
245  */
246 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
247 						    struct rxrpc_local *local,
248 						    struct rxrpc_peer *peer,
249 						    struct rxrpc_connection *conn,
250 						    struct sk_buff *skb)
251 {
252 	struct rxrpc_backlog *b = rx->backlog;
253 	struct rxrpc_call *call;
254 	unsigned short call_head, conn_head, peer_head;
255 	unsigned short call_tail, conn_tail, peer_tail;
256 	unsigned short call_count, conn_count;
257 
258 	/* #calls >= #conns >= #peers must hold true. */
259 	call_head = smp_load_acquire(&b->call_backlog_head);
260 	call_tail = b->call_backlog_tail;
261 	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
262 	conn_head = smp_load_acquire(&b->conn_backlog_head);
263 	conn_tail = b->conn_backlog_tail;
264 	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
265 	ASSERTCMP(conn_count, >=, call_count);
266 	peer_head = smp_load_acquire(&b->peer_backlog_head);
267 	peer_tail = b->peer_backlog_tail;
268 	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
269 		  conn_count);
270 
271 	if (call_count == 0)
272 		return NULL;
273 
274 	if (!conn) {
275 		if (peer && !rxrpc_get_peer_maybe(peer))
276 			peer = NULL;
277 		if (!peer) {
278 			peer = b->peer_backlog[peer_tail];
279 			if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
280 				return NULL;
281 			b->peer_backlog[peer_tail] = NULL;
282 			smp_store_release(&b->peer_backlog_tail,
283 					  (peer_tail + 1) &
284 					  (RXRPC_BACKLOG_MAX - 1));
285 
286 			rxrpc_new_incoming_peer(rx, local, peer);
287 		}
288 
289 		/* Now allocate and set up the connection */
290 		conn = b->conn_backlog[conn_tail];
291 		b->conn_backlog[conn_tail] = NULL;
292 		smp_store_release(&b->conn_backlog_tail,
293 				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
294 		conn->params.local = rxrpc_get_local(local);
295 		conn->params.peer = peer;
296 		rxrpc_see_connection(conn);
297 		rxrpc_new_incoming_connection(rx, conn, skb);
298 	} else {
299 		rxrpc_get_connection(conn);
300 	}
301 
302 	/* And now we can allocate and set up a new call */
303 	call = b->call_backlog[call_tail];
304 	b->call_backlog[call_tail] = NULL;
305 	smp_store_release(&b->call_backlog_tail,
306 			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
307 
308 	rxrpc_see_call(call);
309 	call->conn = conn;
310 	call->security = conn->security;
311 	call->peer = rxrpc_get_peer(conn->params.peer);
312 	call->cong_cwnd = call->peer->cong_cwnd;
313 	return call;
314 }
315 
316 /*
317  * Set up a new incoming call.  Called in BH context with the RCU read lock
318  * held.
319  *
320  * If this is for a kernel service, when we allocate the call, it will have
321  * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
322  * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
323  * services only have the ref from the backlog buffer.  We want to pass this
324  * ref to non-BH context to dispose of.
325  *
326  * If we want to report an error, we mark the skb with the packet type and
327  * abort code and return NULL.
328  *
329  * The call is returned with the user access mutex held.
330  */
331 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
332 					   struct rxrpc_sock *rx,
333 					   struct sk_buff *skb)
334 {
335 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
336 	struct rxrpc_connection *conn;
337 	struct rxrpc_peer *peer = NULL;
338 	struct rxrpc_call *call;
339 
340 	_enter("");
341 
342 	spin_lock(&rx->incoming_lock);
343 	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
344 	    rx->sk.sk_state == RXRPC_CLOSE) {
345 		trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
346 				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
347 		skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
348 		skb->priority = RX_INVALID_OPERATION;
349 		_leave(" = NULL [close]");
350 		call = NULL;
351 		goto out;
352 	}
353 
354 	/* The peer, connection and call may all have sprung into existence due
355 	 * to a duplicate packet being handled on another CPU in parallel, so
356 	 * we have to recheck the routing.  However, we're now holding
357 	 * rx->incoming_lock, so the values should remain stable.
358 	 */
359 	conn = rxrpc_find_connection_rcu(local, skb, &peer);
360 
361 	call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
362 	if (!call) {
363 		skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
364 		_leave(" = NULL [busy]");
365 		call = NULL;
366 		goto out;
367 	}
368 
369 	trace_rxrpc_receive(call, rxrpc_receive_incoming,
370 			    sp->hdr.serial, sp->hdr.seq);
371 
372 	/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
373 	 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
374 	 * notification is generated.
375 	 *
376 	 * The BUG should never happen because the kernel should be well
377 	 * behaved enough not to access the call before the first notification
378 	 * event and userspace is prevented from doing so until the state is
379 	 * appropriate.
380 	 */
381 	if (!mutex_trylock(&call->user_mutex))
382 		BUG();
383 
384 	/* Make the call live. */
385 	rxrpc_incoming_call(rx, call, skb);
386 	conn = call->conn;
387 
388 	if (rx->notify_new_call)
389 		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
390 	else
391 		sk_acceptq_added(&rx->sk);
392 
393 	spin_lock(&conn->state_lock);
394 	switch (conn->state) {
395 	case RXRPC_CONN_SERVICE_UNSECURED:
396 		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
397 		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
398 		rxrpc_queue_conn(call->conn);
399 		break;
400 
401 	case RXRPC_CONN_SERVICE:
402 		write_lock(&call->state_lock);
403 		if (call->state < RXRPC_CALL_COMPLETE) {
404 			if (rx->discard_new_call)
405 				call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
406 			else
407 				call->state = RXRPC_CALL_SERVER_ACCEPTING;
408 		}
409 		write_unlock(&call->state_lock);
410 		break;
411 
412 	case RXRPC_CONN_REMOTELY_ABORTED:
413 		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
414 					  conn->abort_code, conn->error);
415 		break;
416 	case RXRPC_CONN_LOCALLY_ABORTED:
417 		rxrpc_abort_call("CON", call, sp->hdr.seq,
418 				 conn->abort_code, conn->error);
419 		break;
420 	default:
421 		BUG();
422 	}
423 	spin_unlock(&conn->state_lock);
424 
425 	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
426 		rxrpc_notify_socket(call);
427 
428 	/* We have to discard the prealloc queue's ref here and rely on a
429 	 * combination of the RCU read lock and refs held either by the socket
430 	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
431 	 * service to prevent the call from being deallocated too early.
432 	 */
433 	rxrpc_put_call(call, rxrpc_call_put);
434 
435 	_leave(" = %p{%d}", call, call->debug_id);
436 out:
437 	spin_unlock(&rx->incoming_lock);
438 	return call;
439 }
440 
441 /*
442  * handle acceptance of a call by userspace
443  * - assign the user call ID to the call at the front of the queue
444  * - called with the socket locked.
445  */
446 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
447 				     unsigned long user_call_ID,
448 				     rxrpc_notify_rx_t notify_rx)
449 	__releases(&rx->sk.sk_lock.slock)
450 	__acquires(call->user_mutex)
451 {
452 	struct rxrpc_call *call;
453 	struct rb_node *parent, **pp;
454 	int ret;
455 
456 	_enter(",%lx", user_call_ID);
457 
458 	ASSERT(!irqs_disabled());
459 
460 	write_lock(&rx->call_lock);
461 
462 	if (list_empty(&rx->to_be_accepted)) {
463 		write_unlock(&rx->call_lock);
464 		release_sock(&rx->sk);
465 		kleave(" = -ENODATA [empty]");
466 		return ERR_PTR(-ENODATA);
467 	}
468 
469 	/* check the user ID isn't already in use */
470 	pp = &rx->calls.rb_node;
471 	parent = NULL;
472 	while (*pp) {
473 		parent = *pp;
474 		call = rb_entry(parent, struct rxrpc_call, sock_node);
475 
476 		if (user_call_ID < call->user_call_ID)
477 			pp = &(*pp)->rb_left;
478 		else if (user_call_ID > call->user_call_ID)
479 			pp = &(*pp)->rb_right;
480 		else
481 			goto id_in_use;
482 	}
483 
484 	/* Dequeue the first call and check it's still valid.  We gain
485 	 * responsibility for the queue's reference.
486 	 */
487 	call = list_entry(rx->to_be_accepted.next,
488 			  struct rxrpc_call, accept_link);
489 	write_unlock(&rx->call_lock);
490 
491 	/* We need to gain the mutex from the interrupt handler without
492 	 * upsetting lockdep, so we have to release it there and take it here.
493 	 * We are, however, still holding the socket lock, so other accepts
494 	 * must wait for us and no one can add the user ID behind our backs.
495 	 */
496 	if (mutex_lock_interruptible(&call->user_mutex) < 0) {
497 		release_sock(&rx->sk);
498 		kleave(" = -ERESTARTSYS");
499 		return ERR_PTR(-ERESTARTSYS);
500 	}
501 
502 	write_lock(&rx->call_lock);
503 	list_del_init(&call->accept_link);
504 	sk_acceptq_removed(&rx->sk);
505 	rxrpc_see_call(call);
506 
507 	/* Find the user ID insertion point. */
508 	pp = &rx->calls.rb_node;
509 	parent = NULL;
510 	while (*pp) {
511 		parent = *pp;
512 		call = rb_entry(parent, struct rxrpc_call, sock_node);
513 
514 		if (user_call_ID < call->user_call_ID)
515 			pp = &(*pp)->rb_left;
516 		else if (user_call_ID > call->user_call_ID)
517 			pp = &(*pp)->rb_right;
518 		else
519 			BUG();
520 	}
521 
522 	write_lock_bh(&call->state_lock);
523 	switch (call->state) {
524 	case RXRPC_CALL_SERVER_ACCEPTING:
525 		call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
526 		break;
527 	case RXRPC_CALL_COMPLETE:
528 		ret = call->error;
529 		goto out_release;
530 	default:
531 		BUG();
532 	}
533 
534 	/* formalise the acceptance */
535 	call->notify_rx = notify_rx;
536 	call->user_call_ID = user_call_ID;
537 	rxrpc_get_call(call, rxrpc_call_got_userid);
538 	rb_link_node(&call->sock_node, parent, pp);
539 	rb_insert_color(&call->sock_node, &rx->calls);
540 	if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
541 		BUG();
542 
543 	write_unlock_bh(&call->state_lock);
544 	write_unlock(&rx->call_lock);
545 	rxrpc_notify_socket(call);
546 	rxrpc_service_prealloc(rx, GFP_KERNEL);
547 	release_sock(&rx->sk);
548 	_leave(" = %p{%d}", call, call->debug_id);
549 	return call;
550 
551 out_release:
552 	_debug("release %p", call);
553 	write_unlock_bh(&call->state_lock);
554 	write_unlock(&rx->call_lock);
555 	rxrpc_release_call(rx, call);
556 	rxrpc_put_call(call, rxrpc_call_put);
557 	goto out;
558 
559 id_in_use:
560 	ret = -EBADSLT;
561 	write_unlock(&rx->call_lock);
562 out:
563 	rxrpc_service_prealloc(rx, GFP_KERNEL);
564 	release_sock(&rx->sk);
565 	_leave(" = %d", ret);
566 	return ERR_PTR(ret);
567 }
568 
569 /*
570  * Handle rejection of a call by userspace
571  * - reject the call at the front of the queue
572  */
573 int rxrpc_reject_call(struct rxrpc_sock *rx)
574 {
575 	struct rxrpc_call *call;
576 	bool abort = false;
577 	int ret;
578 
579 	_enter("");
580 
581 	ASSERT(!irqs_disabled());
582 
583 	write_lock(&rx->call_lock);
584 
585 	if (list_empty(&rx->to_be_accepted)) {
586 		write_unlock(&rx->call_lock);
587 		return -ENODATA;
588 	}
589 
590 	/* Dequeue the first call and check it's still valid.  We gain
591 	 * responsibility for the queue's reference.
592 	 */
593 	call = list_entry(rx->to_be_accepted.next,
594 			  struct rxrpc_call, accept_link);
595 	list_del_init(&call->accept_link);
596 	sk_acceptq_removed(&rx->sk);
597 	rxrpc_see_call(call);
598 
599 	write_lock_bh(&call->state_lock);
600 	switch (call->state) {
601 	case RXRPC_CALL_SERVER_ACCEPTING:
602 		__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
603 		abort = true;
604 		/* fall through */
605 	case RXRPC_CALL_COMPLETE:
606 		ret = call->error;
607 		goto out_discard;
608 	default:
609 		BUG();
610 	}
611 
612 out_discard:
613 	write_unlock_bh(&call->state_lock);
614 	write_unlock(&rx->call_lock);
615 	if (abort) {
616 		rxrpc_send_abort_packet(call);
617 		rxrpc_release_call(rx, call);
618 		rxrpc_put_call(call, rxrpc_call_put);
619 	}
620 	rxrpc_service_prealloc(rx, GFP_KERNEL);
621 	_leave(" = %d", ret);
622 	return ret;
623 }
624 
625 /*
626  * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
627  * @sock: The socket on which to preallocate
628  * @notify_rx: Event notification function for the call
629  * @user_attach_call: Func to attach call to user_call_ID
630  * @user_call_ID: The tag to attach to the preallocated call
631  * @gfp: The allocation conditions.
632  * @debug_id: The tracing debug ID.
633  *
634  * Charge up the socket with preallocated calls, each with a user ID.  A
635  * function should be provided to effect the attachment from the user's side.
636  * The user is given a ref to hold on the call.
637  *
638  * Note that the call may be come connected before this function returns.
639  */
640 int rxrpc_kernel_charge_accept(struct socket *sock,
641 			       rxrpc_notify_rx_t notify_rx,
642 			       rxrpc_user_attach_call_t user_attach_call,
643 			       unsigned long user_call_ID, gfp_t gfp,
644 			       unsigned int debug_id)
645 {
646 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
647 	struct rxrpc_backlog *b = rx->backlog;
648 
649 	if (sock->sk->sk_state == RXRPC_CLOSE)
650 		return -ESHUTDOWN;
651 
652 	return rxrpc_service_prealloc_one(rx, b, notify_rx,
653 					  user_attach_call, user_call_ID,
654 					  gfp, debug_id);
655 }
656 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
657