xref: /openbmc/linux/net/rxrpc/call_object.c (revision 6abeae2a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC individual remote procedure call handling
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/circ_buf.h>
13 #include <linux/spinlock_types.h>
14 #include <net/sock.h>
15 #include <net/af_rxrpc.h>
16 #include "ar-internal.h"
17 
18 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
20 	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
21 	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
22 	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
23 	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
24 	[RXRPC_CALL_SERVER_PREALLOC]		= "SvPrealc",
25 	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
26 	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
27 	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
28 	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
29 	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
30 	[RXRPC_CALL_COMPLETE]			= "Complete",
31 };
32 
33 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34 	[RXRPC_CALL_SUCCEEDED]			= "Complete",
35 	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
36 	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
37 	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
38 	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
39 };
40 
41 struct kmem_cache *rxrpc_call_jar;
42 
43 static struct semaphore rxrpc_call_limiter =
44 	__SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45 static struct semaphore rxrpc_kernel_call_limiter =
46 	__SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
47 
48 static void rxrpc_call_timer_expired(struct timer_list *t)
49 {
50 	struct rxrpc_call *call = from_timer(call, t, timer);
51 
52 	_enter("%d", call->debug_id);
53 
54 	if (call->state < RXRPC_CALL_COMPLETE) {
55 		trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
56 		rxrpc_queue_call(call);
57 	}
58 }
59 
60 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
61 
62 /*
63  * find an extant server call
64  * - called in process context with IRQs enabled
65  */
66 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
67 					      unsigned long user_call_ID)
68 {
69 	struct rxrpc_call *call;
70 	struct rb_node *p;
71 
72 	_enter("%p,%lx", rx, user_call_ID);
73 
74 	read_lock(&rx->call_lock);
75 
76 	p = rx->calls.rb_node;
77 	while (p) {
78 		call = rb_entry(p, struct rxrpc_call, sock_node);
79 
80 		if (user_call_ID < call->user_call_ID)
81 			p = p->rb_left;
82 		else if (user_call_ID > call->user_call_ID)
83 			p = p->rb_right;
84 		else
85 			goto found_extant_call;
86 	}
87 
88 	read_unlock(&rx->call_lock);
89 	_leave(" = NULL");
90 	return NULL;
91 
92 found_extant_call:
93 	rxrpc_get_call(call, rxrpc_call_got);
94 	read_unlock(&rx->call_lock);
95 	_leave(" = %p [%d]", call, atomic_read(&call->usage));
96 	return call;
97 }
98 
99 /*
100  * allocate a new call
101  */
102 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
103 				    unsigned int debug_id)
104 {
105 	struct rxrpc_call *call;
106 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
107 
108 	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
109 	if (!call)
110 		return NULL;
111 
112 	call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
113 				    sizeof(struct sk_buff *),
114 				    gfp);
115 	if (!call->rxtx_buffer)
116 		goto nomem;
117 
118 	call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
119 	if (!call->rxtx_annotations)
120 		goto nomem_2;
121 
122 	mutex_init(&call->user_mutex);
123 
124 	/* Prevent lockdep reporting a deadlock false positive between the afs
125 	 * filesystem and sys_sendmsg() via the mmap sem.
126 	 */
127 	if (rx->sk.sk_kern_sock)
128 		lockdep_set_class(&call->user_mutex,
129 				  &rxrpc_call_user_mutex_lock_class_key);
130 
131 	timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
132 	INIT_WORK(&call->processor, &rxrpc_process_call);
133 	INIT_LIST_HEAD(&call->link);
134 	INIT_LIST_HEAD(&call->chan_wait_link);
135 	INIT_LIST_HEAD(&call->accept_link);
136 	INIT_LIST_HEAD(&call->recvmsg_link);
137 	INIT_LIST_HEAD(&call->sock_link);
138 	init_waitqueue_head(&call->waitq);
139 	spin_lock_init(&call->lock);
140 	spin_lock_init(&call->notify_lock);
141 	spin_lock_init(&call->input_lock);
142 	rwlock_init(&call->state_lock);
143 	atomic_set(&call->usage, 1);
144 	call->debug_id = debug_id;
145 	call->tx_total_len = -1;
146 	call->next_rx_timo = 20 * HZ;
147 	call->next_req_timo = 1 * HZ;
148 
149 	memset(&call->sock_node, 0xed, sizeof(call->sock_node));
150 
151 	/* Leave space in the ring to handle a maxed-out jumbo packet */
152 	call->rx_winsize = rxrpc_rx_window_size;
153 	call->tx_winsize = 16;
154 	call->rx_expect_next = 1;
155 
156 	call->cong_cwnd = 2;
157 	call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
158 
159 	call->rxnet = rxnet;
160 	call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
161 	atomic_inc(&rxnet->nr_calls);
162 	return call;
163 
164 nomem_2:
165 	kfree(call->rxtx_buffer);
166 nomem:
167 	kmem_cache_free(rxrpc_call_jar, call);
168 	return NULL;
169 }
170 
171 /*
172  * Allocate a new client call.
173  */
174 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
175 						  struct sockaddr_rxrpc *srx,
176 						  gfp_t gfp,
177 						  unsigned int debug_id)
178 {
179 	struct rxrpc_call *call;
180 	ktime_t now;
181 
182 	_enter("");
183 
184 	call = rxrpc_alloc_call(rx, gfp, debug_id);
185 	if (!call)
186 		return ERR_PTR(-ENOMEM);
187 	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
188 	call->service_id = srx->srx_service;
189 	call->tx_phase = true;
190 	now = ktime_get_real();
191 	call->acks_latest_ts = now;
192 	call->cong_tstamp = now;
193 
194 	_leave(" = %p", call);
195 	return call;
196 }
197 
198 /*
199  * Initiate the call ack/resend/expiry timer.
200  */
201 static void rxrpc_start_call_timer(struct rxrpc_call *call)
202 {
203 	unsigned long now = jiffies;
204 	unsigned long j = now + MAX_JIFFY_OFFSET;
205 
206 	call->ack_at = j;
207 	call->ack_lost_at = j;
208 	call->resend_at = j;
209 	call->ping_at = j;
210 	call->expect_rx_by = j;
211 	call->expect_req_by = j;
212 	call->expect_term_by = j;
213 	call->timer.expires = now;
214 }
215 
216 /*
217  * Wait for a call slot to become available.
218  */
219 static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
220 {
221 	struct semaphore *limiter = &rxrpc_call_limiter;
222 
223 	if (p->kernel)
224 		limiter = &rxrpc_kernel_call_limiter;
225 	if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
226 		down(limiter);
227 		return limiter;
228 	}
229 	return down_interruptible(limiter) < 0 ? NULL : limiter;
230 }
231 
232 /*
233  * Release a call slot.
234  */
235 static void rxrpc_put_call_slot(struct rxrpc_call *call)
236 {
237 	struct semaphore *limiter = &rxrpc_call_limiter;
238 
239 	if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
240 		limiter = &rxrpc_kernel_call_limiter;
241 	up(limiter);
242 }
243 
244 /*
245  * Set up a call for the given parameters.
246  * - Called with the socket lock held, which it must release.
247  * - If it returns a call, the call's lock will need releasing by the caller.
248  */
249 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
250 					 struct rxrpc_conn_parameters *cp,
251 					 struct sockaddr_rxrpc *srx,
252 					 struct rxrpc_call_params *p,
253 					 gfp_t gfp,
254 					 unsigned int debug_id)
255 	__releases(&rx->sk.sk_lock.slock)
256 	__acquires(&call->user_mutex)
257 {
258 	struct rxrpc_call *call, *xcall;
259 	struct rxrpc_net *rxnet;
260 	struct semaphore *limiter;
261 	struct rb_node *parent, **pp;
262 	const void *here = __builtin_return_address(0);
263 	int ret;
264 
265 	_enter("%p,%lx", rx, p->user_call_ID);
266 
267 	limiter = rxrpc_get_call_slot(p, gfp);
268 	if (!limiter)
269 		return ERR_PTR(-ERESTARTSYS);
270 
271 	call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
272 	if (IS_ERR(call)) {
273 		release_sock(&rx->sk);
274 		up(limiter);
275 		_leave(" = %ld", PTR_ERR(call));
276 		return call;
277 	}
278 
279 	call->interruptibility = p->interruptibility;
280 	call->tx_total_len = p->tx_total_len;
281 	trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
282 			 atomic_read(&call->usage),
283 			 here, (const void *)p->user_call_ID);
284 	if (p->kernel)
285 		__set_bit(RXRPC_CALL_KERNEL, &call->flags);
286 
287 	/* We need to protect a partially set up call against the user as we
288 	 * will be acting outside the socket lock.
289 	 */
290 	mutex_lock(&call->user_mutex);
291 
292 	/* Publish the call, even though it is incompletely set up as yet */
293 	write_lock(&rx->call_lock);
294 
295 	pp = &rx->calls.rb_node;
296 	parent = NULL;
297 	while (*pp) {
298 		parent = *pp;
299 		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
300 
301 		if (p->user_call_ID < xcall->user_call_ID)
302 			pp = &(*pp)->rb_left;
303 		else if (p->user_call_ID > xcall->user_call_ID)
304 			pp = &(*pp)->rb_right;
305 		else
306 			goto error_dup_user_ID;
307 	}
308 
309 	rcu_assign_pointer(call->socket, rx);
310 	call->user_call_ID = p->user_call_ID;
311 	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
312 	rxrpc_get_call(call, rxrpc_call_got_userid);
313 	rb_link_node(&call->sock_node, parent, pp);
314 	rb_insert_color(&call->sock_node, &rx->calls);
315 	list_add(&call->sock_link, &rx->sock_calls);
316 
317 	write_unlock(&rx->call_lock);
318 
319 	rxnet = call->rxnet;
320 	write_lock(&rxnet->call_lock);
321 	list_add_tail(&call->link, &rxnet->calls);
322 	write_unlock(&rxnet->call_lock);
323 
324 	/* From this point on, the call is protected by its own lock. */
325 	release_sock(&rx->sk);
326 
327 	/* Set up or get a connection record and set the protocol parameters,
328 	 * including channel number and call ID.
329 	 */
330 	ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
331 	if (ret < 0)
332 		goto error_attached_to_socket;
333 
334 	trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
335 			 atomic_read(&call->usage), here, NULL);
336 
337 	rxrpc_start_call_timer(call);
338 
339 	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
340 
341 	_leave(" = %p [new]", call);
342 	return call;
343 
344 	/* We unexpectedly found the user ID in the list after taking
345 	 * the call_lock.  This shouldn't happen unless the user races
346 	 * with itself and tries to add the same user ID twice at the
347 	 * same time in different threads.
348 	 */
349 error_dup_user_ID:
350 	write_unlock(&rx->call_lock);
351 	release_sock(&rx->sk);
352 	__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
353 				    RX_CALL_DEAD, -EEXIST);
354 	trace_rxrpc_call(call->debug_id, rxrpc_call_error,
355 			 atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
356 	rxrpc_release_call(rx, call);
357 	mutex_unlock(&call->user_mutex);
358 	rxrpc_put_call(call, rxrpc_call_put);
359 	_leave(" = -EEXIST");
360 	return ERR_PTR(-EEXIST);
361 
362 	/* We got an error, but the call is attached to the socket and is in
363 	 * need of release.  However, we might now race with recvmsg() when
364 	 * completing the call queues it.  Return 0 from sys_sendmsg() and
365 	 * leave the error to recvmsg() to deal with.
366 	 */
367 error_attached_to_socket:
368 	trace_rxrpc_call(call->debug_id, rxrpc_call_error,
369 			 atomic_read(&call->usage), here, ERR_PTR(ret));
370 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
371 	__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
372 				    RX_CALL_DEAD, ret);
373 	_leave(" = c=%08x [err]", call->debug_id);
374 	return call;
375 }
376 
377 /*
378  * Set up an incoming call.  call->conn points to the connection.
379  * This is called in BH context and isn't allowed to fail.
380  */
381 void rxrpc_incoming_call(struct rxrpc_sock *rx,
382 			 struct rxrpc_call *call,
383 			 struct sk_buff *skb)
384 {
385 	struct rxrpc_connection *conn = call->conn;
386 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
387 	u32 chan;
388 
389 	_enter(",%d", call->conn->debug_id);
390 
391 	rcu_assign_pointer(call->socket, rx);
392 	call->call_id		= sp->hdr.callNumber;
393 	call->service_id	= sp->hdr.serviceId;
394 	call->cid		= sp->hdr.cid;
395 	call->state		= RXRPC_CALL_SERVER_SECURING;
396 	call->cong_tstamp	= skb->tstamp;
397 
398 	/* Set the channel for this call.  We don't get channel_lock as we're
399 	 * only defending against the data_ready handler (which we're called
400 	 * from) and the RESPONSE packet parser (which is only really
401 	 * interested in call_counter and can cope with a disagreement with the
402 	 * call pointer).
403 	 */
404 	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
405 	conn->channels[chan].call_counter = call->call_id;
406 	conn->channels[chan].call_id = call->call_id;
407 	rcu_assign_pointer(conn->channels[chan].call, call);
408 
409 	spin_lock(&conn->params.peer->lock);
410 	hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
411 	spin_unlock(&conn->params.peer->lock);
412 
413 	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
414 
415 	rxrpc_start_call_timer(call);
416 	_leave("");
417 }
418 
419 /*
420  * Queue a call's work processor, getting a ref to pass to the work queue.
421  */
422 bool rxrpc_queue_call(struct rxrpc_call *call)
423 {
424 	const void *here = __builtin_return_address(0);
425 	int n = atomic_fetch_add_unless(&call->usage, 1, 0);
426 	if (n == 0)
427 		return false;
428 	if (rxrpc_queue_work(&call->processor))
429 		trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
430 				 here, NULL);
431 	else
432 		rxrpc_put_call(call, rxrpc_call_put_noqueue);
433 	return true;
434 }
435 
436 /*
437  * Queue a call's work processor, passing the callers ref to the work queue.
438  */
439 bool __rxrpc_queue_call(struct rxrpc_call *call)
440 {
441 	const void *here = __builtin_return_address(0);
442 	int n = atomic_read(&call->usage);
443 	ASSERTCMP(n, >=, 1);
444 	if (rxrpc_queue_work(&call->processor))
445 		trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
446 				 here, NULL);
447 	else
448 		rxrpc_put_call(call, rxrpc_call_put_noqueue);
449 	return true;
450 }
451 
452 /*
453  * Note the re-emergence of a call.
454  */
455 void rxrpc_see_call(struct rxrpc_call *call)
456 {
457 	const void *here = __builtin_return_address(0);
458 	if (call) {
459 		int n = atomic_read(&call->usage);
460 
461 		trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
462 				 here, NULL);
463 	}
464 }
465 
466 /*
467  * Note the addition of a ref on a call.
468  */
469 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
470 {
471 	const void *here = __builtin_return_address(0);
472 	int n = atomic_inc_return(&call->usage);
473 
474 	trace_rxrpc_call(call->debug_id, op, n, here, NULL);
475 }
476 
477 /*
478  * Clean up the RxTx skb ring.
479  */
480 static void rxrpc_cleanup_ring(struct rxrpc_call *call)
481 {
482 	int i;
483 
484 	for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
485 		rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
486 		call->rxtx_buffer[i] = NULL;
487 	}
488 }
489 
490 /*
491  * Detach a call from its owning socket.
492  */
493 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
494 {
495 	const void *here = __builtin_return_address(0);
496 	struct rxrpc_connection *conn = call->conn;
497 	bool put = false;
498 
499 	_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
500 
501 	trace_rxrpc_call(call->debug_id, rxrpc_call_release,
502 			 atomic_read(&call->usage),
503 			 here, (const void *)call->flags);
504 
505 	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
506 
507 	spin_lock_bh(&call->lock);
508 	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
509 		BUG();
510 	spin_unlock_bh(&call->lock);
511 
512 	rxrpc_put_call_slot(call);
513 
514 	del_timer_sync(&call->timer);
515 
516 	/* Make sure we don't get any more notifications */
517 	write_lock_bh(&rx->recvmsg_lock);
518 
519 	if (!list_empty(&call->recvmsg_link)) {
520 		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
521 		       call, call->events, call->flags);
522 		list_del(&call->recvmsg_link);
523 		put = true;
524 	}
525 
526 	/* list_empty() must return false in rxrpc_notify_socket() */
527 	call->recvmsg_link.next = NULL;
528 	call->recvmsg_link.prev = NULL;
529 
530 	write_unlock_bh(&rx->recvmsg_lock);
531 	if (put)
532 		rxrpc_put_call(call, rxrpc_call_put);
533 
534 	write_lock(&rx->call_lock);
535 
536 	if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
537 		rb_erase(&call->sock_node, &rx->calls);
538 		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
539 		rxrpc_put_call(call, rxrpc_call_put_userid);
540 	}
541 
542 	list_del(&call->sock_link);
543 	write_unlock(&rx->call_lock);
544 
545 	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
546 
547 	if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
548 		rxrpc_disconnect_call(call);
549 	if (call->security)
550 		call->security->free_call_crypto(call);
551 
552 	rxrpc_cleanup_ring(call);
553 	_leave("");
554 }
555 
556 /*
557  * release all the calls associated with a socket
558  */
559 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
560 {
561 	struct rxrpc_call *call;
562 
563 	_enter("%p", rx);
564 
565 	while (!list_empty(&rx->to_be_accepted)) {
566 		call = list_entry(rx->to_be_accepted.next,
567 				  struct rxrpc_call, accept_link);
568 		list_del(&call->accept_link);
569 		rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
570 		rxrpc_put_call(call, rxrpc_call_put);
571 	}
572 
573 	while (!list_empty(&rx->sock_calls)) {
574 		call = list_entry(rx->sock_calls.next,
575 				  struct rxrpc_call, sock_link);
576 		rxrpc_get_call(call, rxrpc_call_got);
577 		rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
578 		rxrpc_send_abort_packet(call);
579 		rxrpc_release_call(rx, call);
580 		rxrpc_put_call(call, rxrpc_call_put);
581 	}
582 
583 	_leave("");
584 }
585 
586 /*
587  * release a call
588  */
589 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
590 {
591 	struct rxrpc_net *rxnet = call->rxnet;
592 	const void *here = __builtin_return_address(0);
593 	unsigned int debug_id = call->debug_id;
594 	int n;
595 
596 	ASSERT(call != NULL);
597 
598 	n = atomic_dec_return(&call->usage);
599 	trace_rxrpc_call(debug_id, op, n, here, NULL);
600 	ASSERTCMP(n, >=, 0);
601 	if (n == 0) {
602 		_debug("call %d dead", call->debug_id);
603 		ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
604 
605 		if (!list_empty(&call->link)) {
606 			write_lock(&rxnet->call_lock);
607 			list_del_init(&call->link);
608 			write_unlock(&rxnet->call_lock);
609 		}
610 
611 		rxrpc_cleanup_call(call);
612 	}
613 }
614 
615 /*
616  * Final call destruction - but must be done in process context.
617  */
618 static void rxrpc_destroy_call(struct work_struct *work)
619 {
620 	struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
621 	struct rxrpc_net *rxnet = call->rxnet;
622 
623 	rxrpc_put_connection(call->conn);
624 	rxrpc_put_peer(call->peer);
625 	kfree(call->rxtx_buffer);
626 	kfree(call->rxtx_annotations);
627 	kmem_cache_free(rxrpc_call_jar, call);
628 	if (atomic_dec_and_test(&rxnet->nr_calls))
629 		wake_up_var(&rxnet->nr_calls);
630 }
631 
632 /*
633  * Final call destruction under RCU.
634  */
635 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
636 {
637 	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
638 
639 	if (in_softirq()) {
640 		INIT_WORK(&call->processor, rxrpc_destroy_call);
641 		if (!rxrpc_queue_work(&call->processor))
642 			BUG();
643 	} else {
644 		rxrpc_destroy_call(&call->processor);
645 	}
646 }
647 
648 /*
649  * clean up a call
650  */
651 void rxrpc_cleanup_call(struct rxrpc_call *call)
652 {
653 	_net("DESTROY CALL %d", call->debug_id);
654 
655 	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
656 
657 	del_timer_sync(&call->timer);
658 
659 	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
660 	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
661 
662 	rxrpc_cleanup_ring(call);
663 	rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
664 
665 	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
666 }
667 
668 /*
669  * Make sure that all calls are gone from a network namespace.  To reach this
670  * point, any open UDP sockets in that namespace must have been closed, so any
671  * outstanding calls cannot be doing I/O.
672  */
673 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
674 {
675 	struct rxrpc_call *call;
676 
677 	_enter("");
678 
679 	if (!list_empty(&rxnet->calls)) {
680 		write_lock(&rxnet->call_lock);
681 
682 		while (!list_empty(&rxnet->calls)) {
683 			call = list_entry(rxnet->calls.next,
684 					  struct rxrpc_call, link);
685 			_debug("Zapping call %p", call);
686 
687 			rxrpc_see_call(call);
688 			list_del_init(&call->link);
689 
690 			pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
691 			       call, atomic_read(&call->usage),
692 			       rxrpc_call_states[call->state],
693 			       call->flags, call->events);
694 
695 			write_unlock(&rxnet->call_lock);
696 			cond_resched();
697 			write_lock(&rxnet->call_lock);
698 		}
699 
700 		write_unlock(&rxnet->call_lock);
701 	}
702 
703 	atomic_dec(&rxnet->nr_calls);
704 	wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
705 }
706