xref: /openbmc/linux/net/rxrpc/call_accept.c (revision 151f4e2b)
1 /* incoming call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <linux/circ_buf.h>
24 #include <net/sock.h>
25 #include <net/af_rxrpc.h>
26 #include <net/ip.h>
27 #include "ar-internal.h"
28 
29 /*
30  * Preallocate a single service call, connection and peer and, if possible,
31  * give them a user ID and attach the user's side of the ID to them.
32  */
33 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
34 				      struct rxrpc_backlog *b,
35 				      rxrpc_notify_rx_t notify_rx,
36 				      rxrpc_user_attach_call_t user_attach_call,
37 				      unsigned long user_call_ID, gfp_t gfp,
38 				      unsigned int debug_id)
39 {
40 	const void *here = __builtin_return_address(0);
41 	struct rxrpc_call *call;
42 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
43 	int max, tmp;
44 	unsigned int size = RXRPC_BACKLOG_MAX;
45 	unsigned int head, tail, call_head, call_tail;
46 
47 	max = rx->sk.sk_max_ack_backlog;
48 	tmp = rx->sk.sk_ack_backlog;
49 	if (tmp >= max) {
50 		_leave(" = -ENOBUFS [full %u]", max);
51 		return -ENOBUFS;
52 	}
53 	max -= tmp;
54 
55 	/* We don't need more conns and peers than we have calls, but on the
56 	 * other hand, we shouldn't ever use more peers than conns or conns
57 	 * than calls.
58 	 */
59 	call_head = b->call_backlog_head;
60 	call_tail = READ_ONCE(b->call_backlog_tail);
61 	tmp = CIRC_CNT(call_head, call_tail, size);
62 	if (tmp >= max) {
63 		_leave(" = -ENOBUFS [enough %u]", tmp);
64 		return -ENOBUFS;
65 	}
66 	max = tmp + 1;
67 
68 	head = b->peer_backlog_head;
69 	tail = READ_ONCE(b->peer_backlog_tail);
70 	if (CIRC_CNT(head, tail, size) < max) {
71 		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
72 		if (!peer)
73 			return -ENOMEM;
74 		b->peer_backlog[head] = peer;
75 		smp_store_release(&b->peer_backlog_head,
76 				  (head + 1) & (size - 1));
77 	}
78 
79 	head = b->conn_backlog_head;
80 	tail = READ_ONCE(b->conn_backlog_tail);
81 	if (CIRC_CNT(head, tail, size) < max) {
82 		struct rxrpc_connection *conn;
83 
84 		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
85 		if (!conn)
86 			return -ENOMEM;
87 		b->conn_backlog[head] = conn;
88 		smp_store_release(&b->conn_backlog_head,
89 				  (head + 1) & (size - 1));
90 
91 		trace_rxrpc_conn(conn, rxrpc_conn_new_service,
92 				 atomic_read(&conn->usage), here);
93 	}
94 
95 	/* Now it gets complicated, because calls get registered with the
96 	 * socket here, particularly if a user ID is preassigned by the user.
97 	 */
98 	call = rxrpc_alloc_call(rx, gfp, debug_id);
99 	if (!call)
100 		return -ENOMEM;
101 	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102 	call->state = RXRPC_CALL_SERVER_PREALLOC;
103 
104 	trace_rxrpc_call(call, rxrpc_call_new_service,
105 			 atomic_read(&call->usage),
106 			 here, (const void *)user_call_ID);
107 
108 	write_lock(&rx->call_lock);
109 	if (user_attach_call) {
110 		struct rxrpc_call *xcall;
111 		struct rb_node *parent, **pp;
112 
113 		/* Check the user ID isn't already in use */
114 		pp = &rx->calls.rb_node;
115 		parent = NULL;
116 		while (*pp) {
117 			parent = *pp;
118 			xcall = rb_entry(parent, struct rxrpc_call, sock_node);
119 			if (user_call_ID < xcall->user_call_ID)
120 				pp = &(*pp)->rb_left;
121 			else if (user_call_ID > xcall->user_call_ID)
122 				pp = &(*pp)->rb_right;
123 			else
124 				goto id_in_use;
125 		}
126 
127 		call->user_call_ID = user_call_ID;
128 		call->notify_rx = notify_rx;
129 		rxrpc_get_call(call, rxrpc_call_got_kernel);
130 		user_attach_call(call, user_call_ID);
131 		rxrpc_get_call(call, rxrpc_call_got_userid);
132 		rb_link_node(&call->sock_node, parent, pp);
133 		rb_insert_color(&call->sock_node, &rx->calls);
134 		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
135 	}
136 
137 	list_add(&call->sock_link, &rx->sock_calls);
138 
139 	write_unlock(&rx->call_lock);
140 
141 	rxnet = call->rxnet;
142 	write_lock(&rxnet->call_lock);
143 	list_add_tail(&call->link, &rxnet->calls);
144 	write_unlock(&rxnet->call_lock);
145 
146 	b->call_backlog[call_head] = call;
147 	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
148 	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
149 	return 0;
150 
151 id_in_use:
152 	write_unlock(&rx->call_lock);
153 	rxrpc_cleanup_call(call);
154 	_leave(" = -EBADSLT");
155 	return -EBADSLT;
156 }
157 
158 /*
159  * Preallocate sufficient service connections, calls and peers to cover the
160  * entire backlog of a socket.  When a new call comes in, if we don't have
161  * sufficient of each available, the call gets rejected as busy or ignored.
162  *
163  * The backlog is replenished when a connection is accepted or rejected.
164  */
165 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
166 {
167 	struct rxrpc_backlog *b = rx->backlog;
168 
169 	if (!b) {
170 		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
171 		if (!b)
172 			return -ENOMEM;
173 		rx->backlog = b;
174 	}
175 
176 	if (rx->discard_new_call)
177 		return 0;
178 
179 	while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
180 					  atomic_inc_return(&rxrpc_debug_id)) == 0)
181 		;
182 
183 	return 0;
184 }
185 
186 /*
187  * Discard the preallocation on a service.
188  */
189 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
190 {
191 	struct rxrpc_backlog *b = rx->backlog;
192 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
193 	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
194 
195 	if (!b)
196 		return;
197 	rx->backlog = NULL;
198 
199 	/* Make sure that there aren't any incoming calls in progress before we
200 	 * clear the preallocation buffers.
201 	 */
202 	spin_lock_bh(&rx->incoming_lock);
203 	spin_unlock_bh(&rx->incoming_lock);
204 
205 	head = b->peer_backlog_head;
206 	tail = b->peer_backlog_tail;
207 	while (CIRC_CNT(head, tail, size) > 0) {
208 		struct rxrpc_peer *peer = b->peer_backlog[tail];
209 		kfree(peer);
210 		tail = (tail + 1) & (size - 1);
211 	}
212 
213 	head = b->conn_backlog_head;
214 	tail = b->conn_backlog_tail;
215 	while (CIRC_CNT(head, tail, size) > 0) {
216 		struct rxrpc_connection *conn = b->conn_backlog[tail];
217 		write_lock(&rxnet->conn_lock);
218 		list_del(&conn->link);
219 		list_del(&conn->proc_link);
220 		write_unlock(&rxnet->conn_lock);
221 		kfree(conn);
222 		if (atomic_dec_and_test(&rxnet->nr_conns))
223 			wake_up_var(&rxnet->nr_conns);
224 		tail = (tail + 1) & (size - 1);
225 	}
226 
227 	head = b->call_backlog_head;
228 	tail = b->call_backlog_tail;
229 	while (CIRC_CNT(head, tail, size) > 0) {
230 		struct rxrpc_call *call = b->call_backlog[tail];
231 		rcu_assign_pointer(call->socket, rx);
232 		if (rx->discard_new_call) {
233 			_debug("discard %lx", call->user_call_ID);
234 			rx->discard_new_call(call, call->user_call_ID);
235 			rxrpc_put_call(call, rxrpc_call_put_kernel);
236 		}
237 		rxrpc_call_completed(call);
238 		rxrpc_release_call(rx, call);
239 		rxrpc_put_call(call, rxrpc_call_put);
240 		tail = (tail + 1) & (size - 1);
241 	}
242 
243 	kfree(b);
244 }
245 
246 /*
247  * Allocate a new incoming call from the prealloc pool, along with a connection
248  * and a peer as necessary.
249  */
250 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
251 						    struct rxrpc_local *local,
252 						    struct rxrpc_peer *peer,
253 						    struct rxrpc_connection *conn,
254 						    struct sk_buff *skb)
255 {
256 	struct rxrpc_backlog *b = rx->backlog;
257 	struct rxrpc_call *call;
258 	unsigned short call_head, conn_head, peer_head;
259 	unsigned short call_tail, conn_tail, peer_tail;
260 	unsigned short call_count, conn_count;
261 
262 	/* #calls >= #conns >= #peers must hold true. */
263 	call_head = smp_load_acquire(&b->call_backlog_head);
264 	call_tail = b->call_backlog_tail;
265 	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
266 	conn_head = smp_load_acquire(&b->conn_backlog_head);
267 	conn_tail = b->conn_backlog_tail;
268 	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
269 	ASSERTCMP(conn_count, >=, call_count);
270 	peer_head = smp_load_acquire(&b->peer_backlog_head);
271 	peer_tail = b->peer_backlog_tail;
272 	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
273 		  conn_count);
274 
275 	if (call_count == 0)
276 		return NULL;
277 
278 	if (!conn) {
279 		if (peer && !rxrpc_get_peer_maybe(peer))
280 			peer = NULL;
281 		if (!peer) {
282 			peer = b->peer_backlog[peer_tail];
283 			if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
284 				return NULL;
285 			b->peer_backlog[peer_tail] = NULL;
286 			smp_store_release(&b->peer_backlog_tail,
287 					  (peer_tail + 1) &
288 					  (RXRPC_BACKLOG_MAX - 1));
289 
290 			rxrpc_new_incoming_peer(rx, local, peer);
291 		}
292 
293 		/* Now allocate and set up the connection */
294 		conn = b->conn_backlog[conn_tail];
295 		b->conn_backlog[conn_tail] = NULL;
296 		smp_store_release(&b->conn_backlog_tail,
297 				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
298 		conn->params.local = rxrpc_get_local(local);
299 		conn->params.peer = peer;
300 		rxrpc_see_connection(conn);
301 		rxrpc_new_incoming_connection(rx, conn, skb);
302 	} else {
303 		rxrpc_get_connection(conn);
304 	}
305 
306 	/* And now we can allocate and set up a new call */
307 	call = b->call_backlog[call_tail];
308 	b->call_backlog[call_tail] = NULL;
309 	smp_store_release(&b->call_backlog_tail,
310 			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
311 
312 	rxrpc_see_call(call);
313 	call->conn = conn;
314 	call->peer = rxrpc_get_peer(conn->params.peer);
315 	call->cong_cwnd = call->peer->cong_cwnd;
316 	return call;
317 }
318 
319 /*
320  * Set up a new incoming call.  Called in BH context with the RCU read lock
321  * held.
322  *
323  * If this is for a kernel service, when we allocate the call, it will have
324  * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
325  * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
326  * services only have the ref from the backlog buffer.  We want to pass this
327  * ref to non-BH context to dispose of.
328  *
329  * If we want to report an error, we mark the skb with the packet type and
330  * abort code and return NULL.
331  *
332  * The call is returned with the user access mutex held.
333  */
334 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
335 					   struct rxrpc_sock *rx,
336 					   struct sk_buff *skb)
337 {
338 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
339 	struct rxrpc_connection *conn;
340 	struct rxrpc_peer *peer = NULL;
341 	struct rxrpc_call *call;
342 
343 	_enter("");
344 
345 	spin_lock(&rx->incoming_lock);
346 	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
347 	    rx->sk.sk_state == RXRPC_CLOSE) {
348 		trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
349 				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
350 		skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
351 		skb->priority = RX_INVALID_OPERATION;
352 		_leave(" = NULL [close]");
353 		call = NULL;
354 		goto out;
355 	}
356 
357 	/* The peer, connection and call may all have sprung into existence due
358 	 * to a duplicate packet being handled on another CPU in parallel, so
359 	 * we have to recheck the routing.  However, we're now holding
360 	 * rx->incoming_lock, so the values should remain stable.
361 	 */
362 	conn = rxrpc_find_connection_rcu(local, skb, &peer);
363 
364 	call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
365 	if (!call) {
366 		skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
367 		_leave(" = NULL [busy]");
368 		call = NULL;
369 		goto out;
370 	}
371 
372 	trace_rxrpc_receive(call, rxrpc_receive_incoming,
373 			    sp->hdr.serial, sp->hdr.seq);
374 
375 	/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
376 	 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
377 	 * notification is generated.
378 	 *
379 	 * The BUG should never happen because the kernel should be well
380 	 * behaved enough not to access the call before the first notification
381 	 * event and userspace is prevented from doing so until the state is
382 	 * appropriate.
383 	 */
384 	if (!mutex_trylock(&call->user_mutex))
385 		BUG();
386 
387 	/* Make the call live. */
388 	rxrpc_incoming_call(rx, call, skb);
389 	conn = call->conn;
390 
391 	if (rx->notify_new_call)
392 		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
393 	else
394 		sk_acceptq_added(&rx->sk);
395 
396 	spin_lock(&conn->state_lock);
397 	switch (conn->state) {
398 	case RXRPC_CONN_SERVICE_UNSECURED:
399 		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
400 		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
401 		rxrpc_queue_conn(call->conn);
402 		break;
403 
404 	case RXRPC_CONN_SERVICE:
405 		write_lock(&call->state_lock);
406 		if (call->state < RXRPC_CALL_COMPLETE) {
407 			if (rx->discard_new_call)
408 				call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
409 			else
410 				call->state = RXRPC_CALL_SERVER_ACCEPTING;
411 		}
412 		write_unlock(&call->state_lock);
413 		break;
414 
415 	case RXRPC_CONN_REMOTELY_ABORTED:
416 		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
417 					  conn->abort_code, conn->error);
418 		break;
419 	case RXRPC_CONN_LOCALLY_ABORTED:
420 		rxrpc_abort_call("CON", call, sp->hdr.seq,
421 				 conn->abort_code, conn->error);
422 		break;
423 	default:
424 		BUG();
425 	}
426 	spin_unlock(&conn->state_lock);
427 
428 	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
429 		rxrpc_notify_socket(call);
430 
431 	/* We have to discard the prealloc queue's ref here and rely on a
432 	 * combination of the RCU read lock and refs held either by the socket
433 	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
434 	 * service to prevent the call from being deallocated too early.
435 	 */
436 	rxrpc_put_call(call, rxrpc_call_put);
437 
438 	_leave(" = %p{%d}", call, call->debug_id);
439 out:
440 	spin_unlock(&rx->incoming_lock);
441 	return call;
442 }
443 
444 /*
445  * handle acceptance of a call by userspace
446  * - assign the user call ID to the call at the front of the queue
447  * - called with the socket locked.
448  */
449 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
450 				     unsigned long user_call_ID,
451 				     rxrpc_notify_rx_t notify_rx)
452 	__releases(&rx->sk.sk_lock.slock)
453 	__acquires(call->user_mutex)
454 {
455 	struct rxrpc_call *call;
456 	struct rb_node *parent, **pp;
457 	int ret;
458 
459 	_enter(",%lx", user_call_ID);
460 
461 	ASSERT(!irqs_disabled());
462 
463 	write_lock(&rx->call_lock);
464 
465 	if (list_empty(&rx->to_be_accepted)) {
466 		write_unlock(&rx->call_lock);
467 		release_sock(&rx->sk);
468 		kleave(" = -ENODATA [empty]");
469 		return ERR_PTR(-ENODATA);
470 	}
471 
472 	/* check the user ID isn't already in use */
473 	pp = &rx->calls.rb_node;
474 	parent = NULL;
475 	while (*pp) {
476 		parent = *pp;
477 		call = rb_entry(parent, struct rxrpc_call, sock_node);
478 
479 		if (user_call_ID < call->user_call_ID)
480 			pp = &(*pp)->rb_left;
481 		else if (user_call_ID > call->user_call_ID)
482 			pp = &(*pp)->rb_right;
483 		else
484 			goto id_in_use;
485 	}
486 
487 	/* Dequeue the first call and check it's still valid.  We gain
488 	 * responsibility for the queue's reference.
489 	 */
490 	call = list_entry(rx->to_be_accepted.next,
491 			  struct rxrpc_call, accept_link);
492 	write_unlock(&rx->call_lock);
493 
494 	/* We need to gain the mutex from the interrupt handler without
495 	 * upsetting lockdep, so we have to release it there and take it here.
496 	 * We are, however, still holding the socket lock, so other accepts
497 	 * must wait for us and no one can add the user ID behind our backs.
498 	 */
499 	if (mutex_lock_interruptible(&call->user_mutex) < 0) {
500 		release_sock(&rx->sk);
501 		kleave(" = -ERESTARTSYS");
502 		return ERR_PTR(-ERESTARTSYS);
503 	}
504 
505 	write_lock(&rx->call_lock);
506 	list_del_init(&call->accept_link);
507 	sk_acceptq_removed(&rx->sk);
508 	rxrpc_see_call(call);
509 
510 	/* Find the user ID insertion point. */
511 	pp = &rx->calls.rb_node;
512 	parent = NULL;
513 	while (*pp) {
514 		parent = *pp;
515 		call = rb_entry(parent, struct rxrpc_call, sock_node);
516 
517 		if (user_call_ID < call->user_call_ID)
518 			pp = &(*pp)->rb_left;
519 		else if (user_call_ID > call->user_call_ID)
520 			pp = &(*pp)->rb_right;
521 		else
522 			BUG();
523 	}
524 
525 	write_lock_bh(&call->state_lock);
526 	switch (call->state) {
527 	case RXRPC_CALL_SERVER_ACCEPTING:
528 		call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
529 		break;
530 	case RXRPC_CALL_COMPLETE:
531 		ret = call->error;
532 		goto out_release;
533 	default:
534 		BUG();
535 	}
536 
537 	/* formalise the acceptance */
538 	call->notify_rx = notify_rx;
539 	call->user_call_ID = user_call_ID;
540 	rxrpc_get_call(call, rxrpc_call_got_userid);
541 	rb_link_node(&call->sock_node, parent, pp);
542 	rb_insert_color(&call->sock_node, &rx->calls);
543 	if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
544 		BUG();
545 
546 	write_unlock_bh(&call->state_lock);
547 	write_unlock(&rx->call_lock);
548 	rxrpc_notify_socket(call);
549 	rxrpc_service_prealloc(rx, GFP_KERNEL);
550 	release_sock(&rx->sk);
551 	_leave(" = %p{%d}", call, call->debug_id);
552 	return call;
553 
554 out_release:
555 	_debug("release %p", call);
556 	write_unlock_bh(&call->state_lock);
557 	write_unlock(&rx->call_lock);
558 	rxrpc_release_call(rx, call);
559 	rxrpc_put_call(call, rxrpc_call_put);
560 	goto out;
561 
562 id_in_use:
563 	ret = -EBADSLT;
564 	write_unlock(&rx->call_lock);
565 out:
566 	rxrpc_service_prealloc(rx, GFP_KERNEL);
567 	release_sock(&rx->sk);
568 	_leave(" = %d", ret);
569 	return ERR_PTR(ret);
570 }
571 
572 /*
573  * Handle rejection of a call by userspace
574  * - reject the call at the front of the queue
575  */
576 int rxrpc_reject_call(struct rxrpc_sock *rx)
577 {
578 	struct rxrpc_call *call;
579 	bool abort = false;
580 	int ret;
581 
582 	_enter("");
583 
584 	ASSERT(!irqs_disabled());
585 
586 	write_lock(&rx->call_lock);
587 
588 	if (list_empty(&rx->to_be_accepted)) {
589 		write_unlock(&rx->call_lock);
590 		return -ENODATA;
591 	}
592 
593 	/* Dequeue the first call and check it's still valid.  We gain
594 	 * responsibility for the queue's reference.
595 	 */
596 	call = list_entry(rx->to_be_accepted.next,
597 			  struct rxrpc_call, accept_link);
598 	list_del_init(&call->accept_link);
599 	sk_acceptq_removed(&rx->sk);
600 	rxrpc_see_call(call);
601 
602 	write_lock_bh(&call->state_lock);
603 	switch (call->state) {
604 	case RXRPC_CALL_SERVER_ACCEPTING:
605 		__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
606 		abort = true;
607 		/* fall through */
608 	case RXRPC_CALL_COMPLETE:
609 		ret = call->error;
610 		goto out_discard;
611 	default:
612 		BUG();
613 	}
614 
615 out_discard:
616 	write_unlock_bh(&call->state_lock);
617 	write_unlock(&rx->call_lock);
618 	if (abort) {
619 		rxrpc_send_abort_packet(call);
620 		rxrpc_release_call(rx, call);
621 		rxrpc_put_call(call, rxrpc_call_put);
622 	}
623 	rxrpc_service_prealloc(rx, GFP_KERNEL);
624 	_leave(" = %d", ret);
625 	return ret;
626 }
627 
628 /*
629  * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
630  * @sock: The socket on which to preallocate
631  * @notify_rx: Event notification function for the call
632  * @user_attach_call: Func to attach call to user_call_ID
633  * @user_call_ID: The tag to attach to the preallocated call
634  * @gfp: The allocation conditions.
635  * @debug_id: The tracing debug ID.
636  *
637  * Charge up the socket with preallocated calls, each with a user ID.  A
638  * function should be provided to effect the attachment from the user's side.
639  * The user is given a ref to hold on the call.
640  *
641  * Note that the call may be come connected before this function returns.
642  */
643 int rxrpc_kernel_charge_accept(struct socket *sock,
644 			       rxrpc_notify_rx_t notify_rx,
645 			       rxrpc_user_attach_call_t user_attach_call,
646 			       unsigned long user_call_ID, gfp_t gfp,
647 			       unsigned int debug_id)
648 {
649 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
650 	struct rxrpc_backlog *b = rx->backlog;
651 
652 	if (sock->sk->sk_state == RXRPC_CLOSE)
653 		return -ESHUTDOWN;
654 
655 	return rxrpc_service_prealloc_one(rx, b, notify_rx,
656 					  user_attach_call, user_call_ID,
657 					  gfp, debug_id);
658 }
659 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
660