xref: /openbmc/linux/net/rxrpc/call_accept.c (revision d3be4d244330f7ef53242d8dc1b7f77d105e767f)
1 /* incoming call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <linux/circ_buf.h>
24 #include <net/sock.h>
25 #include <net/af_rxrpc.h>
26 #include <net/ip.h>
27 #include "ar-internal.h"
28 
29 /*
30  * Preallocate a single service call, connection and peer and, if possible,
31  * give them a user ID and attach the user's side of the ID to them.
32  */
33 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
34 				      struct rxrpc_backlog *b,
35 				      rxrpc_notify_rx_t notify_rx,
36 				      rxrpc_user_attach_call_t user_attach_call,
37 				      unsigned long user_call_ID, gfp_t gfp,
38 				      unsigned int debug_id)
39 {
40 	const void *here = __builtin_return_address(0);
41 	struct rxrpc_call *call;
42 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
43 	int max, tmp;
44 	unsigned int size = RXRPC_BACKLOG_MAX;
45 	unsigned int head, tail, call_head, call_tail;
46 
47 	max = rx->sk.sk_max_ack_backlog;
48 	tmp = rx->sk.sk_ack_backlog;
49 	if (tmp >= max) {
50 		_leave(" = -ENOBUFS [full %u]", max);
51 		return -ENOBUFS;
52 	}
53 	max -= tmp;
54 
55 	/* We don't need more conns and peers than we have calls, but on the
56 	 * other hand, we shouldn't ever use more peers than conns or conns
57 	 * than calls.
58 	 */
59 	call_head = b->call_backlog_head;
60 	call_tail = READ_ONCE(b->call_backlog_tail);
61 	tmp = CIRC_CNT(call_head, call_tail, size);
62 	if (tmp >= max) {
63 		_leave(" = -ENOBUFS [enough %u]", tmp);
64 		return -ENOBUFS;
65 	}
66 	max = tmp + 1;
67 
68 	head = b->peer_backlog_head;
69 	tail = READ_ONCE(b->peer_backlog_tail);
70 	if (CIRC_CNT(head, tail, size) < max) {
71 		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
72 		if (!peer)
73 			return -ENOMEM;
74 		b->peer_backlog[head] = peer;
75 		smp_store_release(&b->peer_backlog_head,
76 				  (head + 1) & (size - 1));
77 	}
78 
79 	head = b->conn_backlog_head;
80 	tail = READ_ONCE(b->conn_backlog_tail);
81 	if (CIRC_CNT(head, tail, size) < max) {
82 		struct rxrpc_connection *conn;
83 
84 		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
85 		if (!conn)
86 			return -ENOMEM;
87 		b->conn_backlog[head] = conn;
88 		smp_store_release(&b->conn_backlog_head,
89 				  (head + 1) & (size - 1));
90 
91 		trace_rxrpc_conn(conn, rxrpc_conn_new_service,
92 				 atomic_read(&conn->usage), here);
93 	}
94 
95 	/* Now it gets complicated, because calls get registered with the
96 	 * socket here, particularly if a user ID is preassigned by the user.
97 	 */
98 	call = rxrpc_alloc_call(rx, gfp, debug_id);
99 	if (!call)
100 		return -ENOMEM;
101 	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102 	call->state = RXRPC_CALL_SERVER_PREALLOC;
103 
104 	trace_rxrpc_call(call, rxrpc_call_new_service,
105 			 atomic_read(&call->usage),
106 			 here, (const void *)user_call_ID);
107 
108 	write_lock(&rx->call_lock);
109 	if (user_attach_call) {
110 		struct rxrpc_call *xcall;
111 		struct rb_node *parent, **pp;
112 
113 		/* Check the user ID isn't already in use */
114 		pp = &rx->calls.rb_node;
115 		parent = NULL;
116 		while (*pp) {
117 			parent = *pp;
118 			xcall = rb_entry(parent, struct rxrpc_call, sock_node);
119 			if (user_call_ID < call->user_call_ID)
120 				pp = &(*pp)->rb_left;
121 			else if (user_call_ID > call->user_call_ID)
122 				pp = &(*pp)->rb_right;
123 			else
124 				goto id_in_use;
125 		}
126 
127 		call->user_call_ID = user_call_ID;
128 		call->notify_rx = notify_rx;
129 		rxrpc_get_call(call, rxrpc_call_got_kernel);
130 		user_attach_call(call, user_call_ID);
131 		rxrpc_get_call(call, rxrpc_call_got_userid);
132 		rb_link_node(&call->sock_node, parent, pp);
133 		rb_insert_color(&call->sock_node, &rx->calls);
134 		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
135 	}
136 
137 	list_add(&call->sock_link, &rx->sock_calls);
138 
139 	write_unlock(&rx->call_lock);
140 
141 	rxnet = call->rxnet;
142 	write_lock(&rxnet->call_lock);
143 	list_add_tail(&call->link, &rxnet->calls);
144 	write_unlock(&rxnet->call_lock);
145 
146 	b->call_backlog[call_head] = call;
147 	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
148 	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
149 	return 0;
150 
151 id_in_use:
152 	write_unlock(&rx->call_lock);
153 	rxrpc_cleanup_call(call);
154 	_leave(" = -EBADSLT");
155 	return -EBADSLT;
156 }
157 
158 /*
159  * Preallocate sufficient service connections, calls and peers to cover the
160  * entire backlog of a socket.  When a new call comes in, if we don't have
161  * sufficient of each available, the call gets rejected as busy or ignored.
162  *
163  * The backlog is replenished when a connection is accepted or rejected.
164  */
165 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
166 {
167 	struct rxrpc_backlog *b = rx->backlog;
168 
169 	if (!b) {
170 		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
171 		if (!b)
172 			return -ENOMEM;
173 		rx->backlog = b;
174 	}
175 
176 	if (rx->discard_new_call)
177 		return 0;
178 
179 	while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
180 					  atomic_inc_return(&rxrpc_debug_id)) == 0)
181 		;
182 
183 	return 0;
184 }
185 
186 /*
187  * Discard the preallocation on a service.
188  */
189 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
190 {
191 	struct rxrpc_backlog *b = rx->backlog;
192 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
193 	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
194 
195 	if (!b)
196 		return;
197 	rx->backlog = NULL;
198 
199 	/* Make sure that there aren't any incoming calls in progress before we
200 	 * clear the preallocation buffers.
201 	 */
202 	spin_lock_bh(&rx->incoming_lock);
203 	spin_unlock_bh(&rx->incoming_lock);
204 
205 	head = b->peer_backlog_head;
206 	tail = b->peer_backlog_tail;
207 	while (CIRC_CNT(head, tail, size) > 0) {
208 		struct rxrpc_peer *peer = b->peer_backlog[tail];
209 		kfree(peer);
210 		tail = (tail + 1) & (size - 1);
211 	}
212 
213 	head = b->conn_backlog_head;
214 	tail = b->conn_backlog_tail;
215 	while (CIRC_CNT(head, tail, size) > 0) {
216 		struct rxrpc_connection *conn = b->conn_backlog[tail];
217 		write_lock(&rxnet->conn_lock);
218 		list_del(&conn->link);
219 		list_del(&conn->proc_link);
220 		write_unlock(&rxnet->conn_lock);
221 		kfree(conn);
222 		tail = (tail + 1) & (size - 1);
223 	}
224 
225 	head = b->call_backlog_head;
226 	tail = b->call_backlog_tail;
227 	while (CIRC_CNT(head, tail, size) > 0) {
228 		struct rxrpc_call *call = b->call_backlog[tail];
229 		rcu_assign_pointer(call->socket, rx);
230 		if (rx->discard_new_call) {
231 			_debug("discard %lx", call->user_call_ID);
232 			rx->discard_new_call(call, call->user_call_ID);
233 			rxrpc_put_call(call, rxrpc_call_put_kernel);
234 		}
235 		rxrpc_call_completed(call);
236 		rxrpc_release_call(rx, call);
237 		rxrpc_put_call(call, rxrpc_call_put);
238 		tail = (tail + 1) & (size - 1);
239 	}
240 
241 	kfree(b);
242 }
243 
244 /*
245  * Allocate a new incoming call from the prealloc pool, along with a connection
246  * and a peer as necessary.
247  */
248 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
249 						    struct rxrpc_local *local,
250 						    struct rxrpc_connection *conn,
251 						    struct sk_buff *skb)
252 {
253 	struct rxrpc_backlog *b = rx->backlog;
254 	struct rxrpc_peer *peer, *xpeer;
255 	struct rxrpc_call *call;
256 	unsigned short call_head, conn_head, peer_head;
257 	unsigned short call_tail, conn_tail, peer_tail;
258 	unsigned short call_count, conn_count;
259 
260 	/* #calls >= #conns >= #peers must hold true. */
261 	call_head = smp_load_acquire(&b->call_backlog_head);
262 	call_tail = b->call_backlog_tail;
263 	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
264 	conn_head = smp_load_acquire(&b->conn_backlog_head);
265 	conn_tail = b->conn_backlog_tail;
266 	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
267 	ASSERTCMP(conn_count, >=, call_count);
268 	peer_head = smp_load_acquire(&b->peer_backlog_head);
269 	peer_tail = b->peer_backlog_tail;
270 	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
271 		  conn_count);
272 
273 	if (call_count == 0)
274 		return NULL;
275 
276 	if (!conn) {
277 		/* No connection.  We're going to need a peer to start off
278 		 * with.  If one doesn't yet exist, use a spare from the
279 		 * preallocation set.  We dump the address into the spare in
280 		 * anticipation - and to save on stack space.
281 		 */
282 		xpeer = b->peer_backlog[peer_tail];
283 		if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
284 			return NULL;
285 
286 		peer = rxrpc_lookup_incoming_peer(local, xpeer);
287 		if (peer == xpeer) {
288 			b->peer_backlog[peer_tail] = NULL;
289 			smp_store_release(&b->peer_backlog_tail,
290 					  (peer_tail + 1) &
291 					  (RXRPC_BACKLOG_MAX - 1));
292 		}
293 
294 		/* Now allocate and set up the connection */
295 		conn = b->conn_backlog[conn_tail];
296 		b->conn_backlog[conn_tail] = NULL;
297 		smp_store_release(&b->conn_backlog_tail,
298 				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
299 		rxrpc_get_local(local);
300 		conn->params.local = local;
301 		conn->params.peer = peer;
302 		rxrpc_see_connection(conn);
303 		rxrpc_new_incoming_connection(rx, conn, skb);
304 	} else {
305 		rxrpc_get_connection(conn);
306 	}
307 
308 	/* And now we can allocate and set up a new call */
309 	call = b->call_backlog[call_tail];
310 	b->call_backlog[call_tail] = NULL;
311 	smp_store_release(&b->call_backlog_tail,
312 			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
313 
314 	rxrpc_see_call(call);
315 	call->conn = conn;
316 	call->peer = rxrpc_get_peer(conn->params.peer);
317 	call->cong_cwnd = call->peer->cong_cwnd;
318 	return call;
319 }
320 
321 /*
322  * Set up a new incoming call.  Called in BH context with the RCU read lock
323  * held.
324  *
325  * If this is for a kernel service, when we allocate the call, it will have
326  * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
327  * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
328  * services only have the ref from the backlog buffer.  We want to pass this
329  * ref to non-BH context to dispose of.
330  *
331  * If we want to report an error, we mark the skb with the packet type and
332  * abort code and return NULL.
333  *
334  * The call is returned with the user access mutex held.
335  */
336 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
337 					   struct rxrpc_connection *conn,
338 					   struct sk_buff *skb)
339 {
340 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
341 	struct rxrpc_sock *rx;
342 	struct rxrpc_call *call;
343 	u16 service_id = sp->hdr.serviceId;
344 
345 	_enter("");
346 
347 	/* Get the socket providing the service */
348 	rx = rcu_dereference(local->service);
349 	if (rx && (service_id == rx->srx.srx_service ||
350 		   service_id == rx->second_service))
351 		goto found_service;
352 
353 	trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
354 			  RX_INVALID_OPERATION, EOPNOTSUPP);
355 	skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
356 	skb->priority = RX_INVALID_OPERATION;
357 	_leave(" = NULL [service]");
358 	return NULL;
359 
360 found_service:
361 	spin_lock(&rx->incoming_lock);
362 	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
363 	    rx->sk.sk_state == RXRPC_CLOSE) {
364 		trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
365 				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
366 		skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
367 		skb->priority = RX_INVALID_OPERATION;
368 		_leave(" = NULL [close]");
369 		call = NULL;
370 		goto out;
371 	}
372 
373 	call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
374 	if (!call) {
375 		skb->mark = RXRPC_SKB_MARK_BUSY;
376 		_leave(" = NULL [busy]");
377 		call = NULL;
378 		goto out;
379 	}
380 
381 	trace_rxrpc_receive(call, rxrpc_receive_incoming,
382 			    sp->hdr.serial, sp->hdr.seq);
383 
384 	/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
385 	 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
386 	 * notification is generated.
387 	 *
388 	 * The BUG should never happen because the kernel should be well
389 	 * behaved enough not to access the call before the first notification
390 	 * event and userspace is prevented from doing so until the state is
391 	 * appropriate.
392 	 */
393 	if (!mutex_trylock(&call->user_mutex))
394 		BUG();
395 
396 	/* Make the call live. */
397 	rxrpc_incoming_call(rx, call, skb);
398 	conn = call->conn;
399 
400 	if (rx->notify_new_call)
401 		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
402 	else
403 		sk_acceptq_added(&rx->sk);
404 
405 	spin_lock(&conn->state_lock);
406 	switch (conn->state) {
407 	case RXRPC_CONN_SERVICE_UNSECURED:
408 		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
409 		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
410 		rxrpc_queue_conn(call->conn);
411 		break;
412 
413 	case RXRPC_CONN_SERVICE:
414 		write_lock(&call->state_lock);
415 		if (rx->discard_new_call)
416 			call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
417 		else
418 			call->state = RXRPC_CALL_SERVER_ACCEPTING;
419 		write_unlock(&call->state_lock);
420 		break;
421 
422 	case RXRPC_CONN_REMOTELY_ABORTED:
423 		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
424 					  conn->remote_abort, -ECONNABORTED);
425 		break;
426 	case RXRPC_CONN_LOCALLY_ABORTED:
427 		rxrpc_abort_call("CON", call, sp->hdr.seq,
428 				 conn->local_abort, -ECONNABORTED);
429 		break;
430 	default:
431 		BUG();
432 	}
433 	spin_unlock(&conn->state_lock);
434 
435 	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
436 		rxrpc_notify_socket(call);
437 
438 	/* We have to discard the prealloc queue's ref here and rely on a
439 	 * combination of the RCU read lock and refs held either by the socket
440 	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
441 	 * service to prevent the call from being deallocated too early.
442 	 */
443 	rxrpc_put_call(call, rxrpc_call_put);
444 
445 	_leave(" = %p{%d}", call, call->debug_id);
446 out:
447 	spin_unlock(&rx->incoming_lock);
448 	return call;
449 }
450 
451 /*
452  * handle acceptance of a call by userspace
453  * - assign the user call ID to the call at the front of the queue
454  * - called with the socket locked.
455  */
456 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
457 				     unsigned long user_call_ID,
458 				     rxrpc_notify_rx_t notify_rx)
459 	__releases(&rx->sk.sk_lock.slock)
460 	__acquires(call->user_mutex)
461 {
462 	struct rxrpc_call *call;
463 	struct rb_node *parent, **pp;
464 	int ret;
465 
466 	_enter(",%lx", user_call_ID);
467 
468 	ASSERT(!irqs_disabled());
469 
470 	write_lock(&rx->call_lock);
471 
472 	if (list_empty(&rx->to_be_accepted)) {
473 		write_unlock(&rx->call_lock);
474 		release_sock(&rx->sk);
475 		kleave(" = -ENODATA [empty]");
476 		return ERR_PTR(-ENODATA);
477 	}
478 
479 	/* check the user ID isn't already in use */
480 	pp = &rx->calls.rb_node;
481 	parent = NULL;
482 	while (*pp) {
483 		parent = *pp;
484 		call = rb_entry(parent, struct rxrpc_call, sock_node);
485 
486 		if (user_call_ID < call->user_call_ID)
487 			pp = &(*pp)->rb_left;
488 		else if (user_call_ID > call->user_call_ID)
489 			pp = &(*pp)->rb_right;
490 		else
491 			goto id_in_use;
492 	}
493 
494 	/* Dequeue the first call and check it's still valid.  We gain
495 	 * responsibility for the queue's reference.
496 	 */
497 	call = list_entry(rx->to_be_accepted.next,
498 			  struct rxrpc_call, accept_link);
499 	write_unlock(&rx->call_lock);
500 
501 	/* We need to gain the mutex from the interrupt handler without
502 	 * upsetting lockdep, so we have to release it there and take it here.
503 	 * We are, however, still holding the socket lock, so other accepts
504 	 * must wait for us and no one can add the user ID behind our backs.
505 	 */
506 	if (mutex_lock_interruptible(&call->user_mutex) < 0) {
507 		release_sock(&rx->sk);
508 		kleave(" = -ERESTARTSYS");
509 		return ERR_PTR(-ERESTARTSYS);
510 	}
511 
512 	write_lock(&rx->call_lock);
513 	list_del_init(&call->accept_link);
514 	sk_acceptq_removed(&rx->sk);
515 	rxrpc_see_call(call);
516 
517 	/* Find the user ID insertion point. */
518 	pp = &rx->calls.rb_node;
519 	parent = NULL;
520 	while (*pp) {
521 		parent = *pp;
522 		call = rb_entry(parent, struct rxrpc_call, sock_node);
523 
524 		if (user_call_ID < call->user_call_ID)
525 			pp = &(*pp)->rb_left;
526 		else if (user_call_ID > call->user_call_ID)
527 			pp = &(*pp)->rb_right;
528 		else
529 			BUG();
530 	}
531 
532 	write_lock_bh(&call->state_lock);
533 	switch (call->state) {
534 	case RXRPC_CALL_SERVER_ACCEPTING:
535 		call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
536 		break;
537 	case RXRPC_CALL_COMPLETE:
538 		ret = call->error;
539 		goto out_release;
540 	default:
541 		BUG();
542 	}
543 
544 	/* formalise the acceptance */
545 	call->notify_rx = notify_rx;
546 	call->user_call_ID = user_call_ID;
547 	rxrpc_get_call(call, rxrpc_call_got_userid);
548 	rb_link_node(&call->sock_node, parent, pp);
549 	rb_insert_color(&call->sock_node, &rx->calls);
550 	if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
551 		BUG();
552 
553 	write_unlock_bh(&call->state_lock);
554 	write_unlock(&rx->call_lock);
555 	rxrpc_notify_socket(call);
556 	rxrpc_service_prealloc(rx, GFP_KERNEL);
557 	release_sock(&rx->sk);
558 	_leave(" = %p{%d}", call, call->debug_id);
559 	return call;
560 
561 out_release:
562 	_debug("release %p", call);
563 	write_unlock_bh(&call->state_lock);
564 	write_unlock(&rx->call_lock);
565 	rxrpc_release_call(rx, call);
566 	rxrpc_put_call(call, rxrpc_call_put);
567 	goto out;
568 
569 id_in_use:
570 	ret = -EBADSLT;
571 	write_unlock(&rx->call_lock);
572 out:
573 	rxrpc_service_prealloc(rx, GFP_KERNEL);
574 	release_sock(&rx->sk);
575 	_leave(" = %d", ret);
576 	return ERR_PTR(ret);
577 }
578 
579 /*
580  * Handle rejection of a call by userspace
581  * - reject the call at the front of the queue
582  */
583 int rxrpc_reject_call(struct rxrpc_sock *rx)
584 {
585 	struct rxrpc_call *call;
586 	bool abort = false;
587 	int ret;
588 
589 	_enter("");
590 
591 	ASSERT(!irqs_disabled());
592 
593 	write_lock(&rx->call_lock);
594 
595 	if (list_empty(&rx->to_be_accepted)) {
596 		write_unlock(&rx->call_lock);
597 		return -ENODATA;
598 	}
599 
600 	/* Dequeue the first call and check it's still valid.  We gain
601 	 * responsibility for the queue's reference.
602 	 */
603 	call = list_entry(rx->to_be_accepted.next,
604 			  struct rxrpc_call, accept_link);
605 	list_del_init(&call->accept_link);
606 	sk_acceptq_removed(&rx->sk);
607 	rxrpc_see_call(call);
608 
609 	write_lock_bh(&call->state_lock);
610 	switch (call->state) {
611 	case RXRPC_CALL_SERVER_ACCEPTING:
612 		__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
613 		abort = true;
614 		/* fall through */
615 	case RXRPC_CALL_COMPLETE:
616 		ret = call->error;
617 		goto out_discard;
618 	default:
619 		BUG();
620 	}
621 
622 out_discard:
623 	write_unlock_bh(&call->state_lock);
624 	write_unlock(&rx->call_lock);
625 	if (abort) {
626 		rxrpc_send_abort_packet(call);
627 		rxrpc_release_call(rx, call);
628 		rxrpc_put_call(call, rxrpc_call_put);
629 	}
630 	rxrpc_service_prealloc(rx, GFP_KERNEL);
631 	_leave(" = %d", ret);
632 	return ret;
633 }
634 
635 /*
636  * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
637  * @sock: The socket on which to preallocate
638  * @notify_rx: Event notification function for the call
639  * @user_attach_call: Func to attach call to user_call_ID
640  * @user_call_ID: The tag to attach to the preallocated call
641  * @gfp: The allocation conditions.
642  * @debug_id: The tracing debug ID.
643  *
644  * Charge up the socket with preallocated calls, each with a user ID.  A
645  * function should be provided to effect the attachment from the user's side.
646  * The user is given a ref to hold on the call.
647  *
648  * Note that the call may be come connected before this function returns.
649  */
650 int rxrpc_kernel_charge_accept(struct socket *sock,
651 			       rxrpc_notify_rx_t notify_rx,
652 			       rxrpc_user_attach_call_t user_attach_call,
653 			       unsigned long user_call_ID, gfp_t gfp,
654 			       unsigned int debug_id)
655 {
656 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
657 	struct rxrpc_backlog *b = rx->backlog;
658 
659 	if (sock->sk->sk_state == RXRPC_CLOSE)
660 		return -ESHUTDOWN;
661 
662 	return rxrpc_service_prealloc_one(rx, b, notify_rx,
663 					  user_attach_call, user_call_ID,
664 					  gfp, debug_id);
665 }
666 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
667