xref: /openbmc/linux/net/rxrpc/call_object.c (revision 9ad685db)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC individual remote procedure call handling
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/circ_buf.h>
13 #include <linux/spinlock_types.h>
14 #include <net/sock.h>
15 #include <net/af_rxrpc.h>
16 #include "ar-internal.h"
17 
18 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
20 	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
21 	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
22 	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
23 	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
24 	[RXRPC_CALL_SERVER_PREALLOC]		= "SvPrealc",
25 	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
26 	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
27 	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
28 	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
29 	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
30 	[RXRPC_CALL_COMPLETE]			= "Complete",
31 };
32 
33 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34 	[RXRPC_CALL_SUCCEEDED]			= "Complete",
35 	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
36 	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
37 	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
38 	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
39 };
40 
41 struct kmem_cache *rxrpc_call_jar;
42 
43 static struct semaphore rxrpc_call_limiter =
44 	__SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45 static struct semaphore rxrpc_kernel_call_limiter =
46 	__SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
47 
48 void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
49 {
50 	struct rxrpc_local *local = call->local;
51 	bool busy;
52 
53 	if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) {
54 		spin_lock_bh(&local->lock);
55 		busy = !list_empty(&call->attend_link);
56 		trace_rxrpc_poke_call(call, busy, what);
57 		if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke))
58 			busy = true;
59 		if (!busy) {
60 			list_add_tail(&call->attend_link, &local->call_attend_q);
61 		}
62 		spin_unlock_bh(&local->lock);
63 		if (!busy)
64 			rxrpc_wake_up_io_thread(local);
65 	}
66 }
67 
68 static void rxrpc_call_timer_expired(struct timer_list *t)
69 {
70 	struct rxrpc_call *call = from_timer(call, t, timer);
71 
72 	_enter("%d", call->debug_id);
73 
74 	if (!__rxrpc_call_is_complete(call)) {
75 		trace_rxrpc_timer_expired(call, jiffies);
76 		rxrpc_poke_call(call, rxrpc_call_poke_timer);
77 	}
78 }
79 
80 void rxrpc_reduce_call_timer(struct rxrpc_call *call,
81 			     unsigned long expire_at,
82 			     unsigned long now,
83 			     enum rxrpc_timer_trace why)
84 {
85 	trace_rxrpc_timer(call, why, now);
86 	timer_reduce(&call->timer, expire_at);
87 }
88 
89 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
90 
91 static void rxrpc_destroy_call(struct work_struct *);
92 
93 /*
94  * find an extant server call
95  * - called in process context with IRQs enabled
96  */
97 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
98 					      unsigned long user_call_ID)
99 {
100 	struct rxrpc_call *call;
101 	struct rb_node *p;
102 
103 	_enter("%p,%lx", rx, user_call_ID);
104 
105 	read_lock(&rx->call_lock);
106 
107 	p = rx->calls.rb_node;
108 	while (p) {
109 		call = rb_entry(p, struct rxrpc_call, sock_node);
110 
111 		if (user_call_ID < call->user_call_ID)
112 			p = p->rb_left;
113 		else if (user_call_ID > call->user_call_ID)
114 			p = p->rb_right;
115 		else
116 			goto found_extant_call;
117 	}
118 
119 	read_unlock(&rx->call_lock);
120 	_leave(" = NULL");
121 	return NULL;
122 
123 found_extant_call:
124 	rxrpc_get_call(call, rxrpc_call_get_sendmsg);
125 	read_unlock(&rx->call_lock);
126 	_leave(" = %p [%d]", call, refcount_read(&call->ref));
127 	return call;
128 }
129 
130 /*
131  * allocate a new call
132  */
133 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
134 				    unsigned int debug_id)
135 {
136 	struct rxrpc_call *call;
137 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
138 
139 	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
140 	if (!call)
141 		return NULL;
142 
143 	mutex_init(&call->user_mutex);
144 
145 	/* Prevent lockdep reporting a deadlock false positive between the afs
146 	 * filesystem and sys_sendmsg() via the mmap sem.
147 	 */
148 	if (rx->sk.sk_kern_sock)
149 		lockdep_set_class(&call->user_mutex,
150 				  &rxrpc_call_user_mutex_lock_class_key);
151 
152 	timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
153 	INIT_WORK(&call->destroyer, rxrpc_destroy_call);
154 	INIT_LIST_HEAD(&call->link);
155 	INIT_LIST_HEAD(&call->wait_link);
156 	INIT_LIST_HEAD(&call->accept_link);
157 	INIT_LIST_HEAD(&call->recvmsg_link);
158 	INIT_LIST_HEAD(&call->sock_link);
159 	INIT_LIST_HEAD(&call->attend_link);
160 	INIT_LIST_HEAD(&call->tx_sendmsg);
161 	INIT_LIST_HEAD(&call->tx_buffer);
162 	skb_queue_head_init(&call->recvmsg_queue);
163 	skb_queue_head_init(&call->rx_oos_queue);
164 	init_waitqueue_head(&call->waitq);
165 	spin_lock_init(&call->notify_lock);
166 	spin_lock_init(&call->tx_lock);
167 	refcount_set(&call->ref, 1);
168 	call->debug_id = debug_id;
169 	call->tx_total_len = -1;
170 	call->next_rx_timo = 20 * HZ;
171 	call->next_req_timo = 1 * HZ;
172 	call->ackr_window = 1;
173 	call->ackr_wtop = 1;
174 
175 	memset(&call->sock_node, 0xed, sizeof(call->sock_node));
176 
177 	call->rx_winsize = rxrpc_rx_window_size;
178 	call->tx_winsize = 16;
179 
180 	if (RXRPC_TX_SMSS > 2190)
181 		call->cong_cwnd = 2;
182 	else if (RXRPC_TX_SMSS > 1095)
183 		call->cong_cwnd = 3;
184 	else
185 		call->cong_cwnd = 4;
186 	call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
187 
188 	call->rxnet = rxnet;
189 	call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
190 	atomic_inc(&rxnet->nr_calls);
191 	return call;
192 }
193 
194 /*
195  * Allocate a new client call.
196  */
197 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
198 						  struct sockaddr_rxrpc *srx,
199 						  struct rxrpc_conn_parameters *cp,
200 						  struct rxrpc_call_params *p,
201 						  gfp_t gfp,
202 						  unsigned int debug_id)
203 {
204 	struct rxrpc_call *call;
205 	ktime_t now;
206 	int ret;
207 
208 	_enter("");
209 
210 	call = rxrpc_alloc_call(rx, gfp, debug_id);
211 	if (!call)
212 		return ERR_PTR(-ENOMEM);
213 	now = ktime_get_real();
214 	call->acks_latest_ts	= now;
215 	call->cong_tstamp	= now;
216 	call->dest_srx		= *srx;
217 	call->interruptibility	= p->interruptibility;
218 	call->tx_total_len	= p->tx_total_len;
219 	call->key		= key_get(cp->key);
220 	call->local		= rxrpc_get_local(cp->local, rxrpc_local_get_call);
221 	call->security_level	= cp->security_level;
222 	if (p->kernel)
223 		__set_bit(RXRPC_CALL_KERNEL, &call->flags);
224 	if (cp->upgrade)
225 		__set_bit(RXRPC_CALL_UPGRADE, &call->flags);
226 	if (cp->exclusive)
227 		__set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
228 
229 	if (p->timeouts.normal)
230 		call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL);
231 	if (p->timeouts.idle)
232 		call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL);
233 	if (p->timeouts.hard)
234 		call->hard_timo = p->timeouts.hard * HZ;
235 
236 	ret = rxrpc_init_client_call_security(call);
237 	if (ret < 0) {
238 		rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
239 		rxrpc_put_call(call, rxrpc_call_put_discard_error);
240 		return ERR_PTR(ret);
241 	}
242 
243 	rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN);
244 
245 	trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
246 			 p->user_call_ID, rxrpc_call_new_client);
247 
248 	_leave(" = %p", call);
249 	return call;
250 }
251 
252 /*
253  * Initiate the call ack/resend/expiry timer.
254  */
255 void rxrpc_start_call_timer(struct rxrpc_call *call)
256 {
257 	unsigned long now = jiffies;
258 	unsigned long j = now + MAX_JIFFY_OFFSET;
259 
260 	call->delay_ack_at = j;
261 	call->ack_lost_at = j;
262 	call->resend_at = j;
263 	call->ping_at = j;
264 	call->keepalive_at = j;
265 	call->expect_rx_by = j;
266 	call->expect_req_by = j;
267 	call->expect_term_by = j + call->hard_timo;
268 	call->timer.expires = now;
269 }
270 
271 /*
272  * Wait for a call slot to become available.
273  */
274 static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
275 {
276 	struct semaphore *limiter = &rxrpc_call_limiter;
277 
278 	if (p->kernel)
279 		limiter = &rxrpc_kernel_call_limiter;
280 	if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
281 		down(limiter);
282 		return limiter;
283 	}
284 	return down_interruptible(limiter) < 0 ? NULL : limiter;
285 }
286 
287 /*
288  * Release a call slot.
289  */
290 static void rxrpc_put_call_slot(struct rxrpc_call *call)
291 {
292 	struct semaphore *limiter = &rxrpc_call_limiter;
293 
294 	if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
295 		limiter = &rxrpc_kernel_call_limiter;
296 	up(limiter);
297 }
298 
299 /*
300  * Start the process of connecting a call.  We obtain a peer and a connection
301  * bundle, but the actual association of a call with a connection is offloaded
302  * to the I/O thread to simplify locking.
303  */
304 static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
305 {
306 	struct rxrpc_local *local = call->local;
307 	int ret = -ENOMEM;
308 
309 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
310 
311 	call->peer = rxrpc_lookup_peer(local, &call->dest_srx, gfp);
312 	if (!call->peer)
313 		goto error;
314 
315 	ret = rxrpc_look_up_bundle(call, gfp);
316 	if (ret < 0)
317 		goto error;
318 
319 	trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call);
320 	rxrpc_get_call(call, rxrpc_call_get_io_thread);
321 	spin_lock(&local->client_call_lock);
322 	list_add_tail(&call->wait_link, &local->new_client_calls);
323 	spin_unlock(&local->client_call_lock);
324 	rxrpc_wake_up_io_thread(local);
325 	return 0;
326 
327 error:
328 	__set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
329 	return ret;
330 }
331 
332 /*
333  * Set up a call for the given parameters.
334  * - Called with the socket lock held, which it must release.
335  * - If it returns a call, the call's lock will need releasing by the caller.
336  */
337 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
338 					 struct rxrpc_conn_parameters *cp,
339 					 struct sockaddr_rxrpc *srx,
340 					 struct rxrpc_call_params *p,
341 					 gfp_t gfp,
342 					 unsigned int debug_id)
343 	__releases(&rx->sk.sk_lock.slock)
344 	__acquires(&call->user_mutex)
345 {
346 	struct rxrpc_call *call, *xcall;
347 	struct rxrpc_net *rxnet;
348 	struct semaphore *limiter;
349 	struct rb_node *parent, **pp;
350 	int ret;
351 
352 	_enter("%p,%lx", rx, p->user_call_ID);
353 
354 	limiter = rxrpc_get_call_slot(p, gfp);
355 	if (!limiter) {
356 		release_sock(&rx->sk);
357 		return ERR_PTR(-ERESTARTSYS);
358 	}
359 
360 	call = rxrpc_alloc_client_call(rx, srx, cp, p, gfp, debug_id);
361 	if (IS_ERR(call)) {
362 		release_sock(&rx->sk);
363 		up(limiter);
364 		_leave(" = %ld", PTR_ERR(call));
365 		return call;
366 	}
367 
368 	/* We need to protect a partially set up call against the user as we
369 	 * will be acting outside the socket lock.
370 	 */
371 	mutex_lock(&call->user_mutex);
372 
373 	/* Publish the call, even though it is incompletely set up as yet */
374 	write_lock(&rx->call_lock);
375 
376 	pp = &rx->calls.rb_node;
377 	parent = NULL;
378 	while (*pp) {
379 		parent = *pp;
380 		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
381 
382 		if (p->user_call_ID < xcall->user_call_ID)
383 			pp = &(*pp)->rb_left;
384 		else if (p->user_call_ID > xcall->user_call_ID)
385 			pp = &(*pp)->rb_right;
386 		else
387 			goto error_dup_user_ID;
388 	}
389 
390 	rcu_assign_pointer(call->socket, rx);
391 	call->user_call_ID = p->user_call_ID;
392 	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
393 	rxrpc_get_call(call, rxrpc_call_get_userid);
394 	rb_link_node(&call->sock_node, parent, pp);
395 	rb_insert_color(&call->sock_node, &rx->calls);
396 	list_add(&call->sock_link, &rx->sock_calls);
397 
398 	write_unlock(&rx->call_lock);
399 
400 	rxnet = call->rxnet;
401 	spin_lock(&rxnet->call_lock);
402 	list_add_tail_rcu(&call->link, &rxnet->calls);
403 	spin_unlock(&rxnet->call_lock);
404 
405 	/* From this point on, the call is protected by its own lock. */
406 	release_sock(&rx->sk);
407 
408 	/* Set up or get a connection record and set the protocol parameters,
409 	 * including channel number and call ID.
410 	 */
411 	ret = rxrpc_connect_call(call, gfp);
412 	if (ret < 0)
413 		goto error_attached_to_socket;
414 
415 	_leave(" = %p [new]", call);
416 	return call;
417 
418 	/* We unexpectedly found the user ID in the list after taking
419 	 * the call_lock.  This shouldn't happen unless the user races
420 	 * with itself and tries to add the same user ID twice at the
421 	 * same time in different threads.
422 	 */
423 error_dup_user_ID:
424 	write_unlock(&rx->call_lock);
425 	release_sock(&rx->sk);
426 	rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST);
427 	trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0,
428 			 rxrpc_call_see_userid_exists);
429 	mutex_unlock(&call->user_mutex);
430 	rxrpc_put_call(call, rxrpc_call_put_userid_exists);
431 	_leave(" = -EEXIST");
432 	return ERR_PTR(-EEXIST);
433 
434 	/* We got an error, but the call is attached to the socket and is in
435 	 * need of release.  However, we might now race with recvmsg() when it
436 	 * completion notifies the socket.  Return 0 from sys_sendmsg() and
437 	 * leave the error to recvmsg() to deal with.
438 	 */
439 error_attached_to_socket:
440 	trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret,
441 			 rxrpc_call_see_connect_failed);
442 	rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
443 	_leave(" = c=%08x [err]", call->debug_id);
444 	return call;
445 }
446 
447 /*
448  * Set up an incoming call.  call->conn points to the connection.
449  * This is called in BH context and isn't allowed to fail.
450  */
451 void rxrpc_incoming_call(struct rxrpc_sock *rx,
452 			 struct rxrpc_call *call,
453 			 struct sk_buff *skb)
454 {
455 	struct rxrpc_connection *conn = call->conn;
456 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
457 	u32 chan;
458 
459 	_enter(",%d", call->conn->debug_id);
460 
461 	rcu_assign_pointer(call->socket, rx);
462 	call->call_id		= sp->hdr.callNumber;
463 	call->dest_srx.srx_service = sp->hdr.serviceId;
464 	call->cid		= sp->hdr.cid;
465 	call->cong_tstamp	= skb->tstamp;
466 
467 	__set_bit(RXRPC_CALL_EXPOSED, &call->flags);
468 	rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
469 
470 	spin_lock(&conn->state_lock);
471 
472 	switch (conn->state) {
473 	case RXRPC_CONN_SERVICE_UNSECURED:
474 	case RXRPC_CONN_SERVICE_CHALLENGING:
475 		rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
476 		break;
477 	case RXRPC_CONN_SERVICE:
478 		rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
479 		break;
480 
481 	case RXRPC_CONN_ABORTED:
482 		rxrpc_set_call_completion(call, conn->completion,
483 					  conn->abort_code, conn->error);
484 		break;
485 	default:
486 		BUG();
487 	}
488 
489 	rxrpc_get_call(call, rxrpc_call_get_io_thread);
490 
491 	/* Set the channel for this call.  We don't get channel_lock as we're
492 	 * only defending against the data_ready handler (which we're called
493 	 * from) and the RESPONSE packet parser (which is only really
494 	 * interested in call_counter and can cope with a disagreement with the
495 	 * call pointer).
496 	 */
497 	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
498 	conn->channels[chan].call_counter = call->call_id;
499 	conn->channels[chan].call_id = call->call_id;
500 	conn->channels[chan].call = call;
501 	spin_unlock(&conn->state_lock);
502 
503 	spin_lock(&conn->peer->lock);
504 	hlist_add_head(&call->error_link, &conn->peer->error_targets);
505 	spin_unlock(&conn->peer->lock);
506 
507 	rxrpc_start_call_timer(call);
508 	_leave("");
509 }
510 
511 /*
512  * Note the re-emergence of a call.
513  */
514 void rxrpc_see_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
515 {
516 	if (call) {
517 		int r = refcount_read(&call->ref);
518 
519 		trace_rxrpc_call(call->debug_id, r, 0, why);
520 	}
521 }
522 
523 struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *call,
524 				      enum rxrpc_call_trace why)
525 {
526 	int r;
527 
528 	if (!call || !__refcount_inc_not_zero(&call->ref, &r))
529 		return NULL;
530 	trace_rxrpc_call(call->debug_id, r + 1, 0, why);
531 	return call;
532 }
533 
534 /*
535  * Note the addition of a ref on a call.
536  */
537 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
538 {
539 	int r;
540 
541 	__refcount_inc(&call->ref, &r);
542 	trace_rxrpc_call(call->debug_id, r + 1, 0, why);
543 }
544 
545 /*
546  * Clean up the Rx skb ring.
547  */
548 static void rxrpc_cleanup_ring(struct rxrpc_call *call)
549 {
550 	skb_queue_purge(&call->recvmsg_queue);
551 	skb_queue_purge(&call->rx_oos_queue);
552 }
553 
554 /*
555  * Detach a call from its owning socket.
556  */
557 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
558 {
559 	struct rxrpc_connection *conn = call->conn;
560 	bool put = false, putu = false;
561 
562 	_enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
563 
564 	trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
565 			 call->flags, rxrpc_call_see_release);
566 
567 	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
568 		BUG();
569 
570 	rxrpc_put_call_slot(call);
571 
572 	/* Make sure we don't get any more notifications */
573 	spin_lock(&rx->recvmsg_lock);
574 
575 	if (!list_empty(&call->recvmsg_link)) {
576 		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
577 		       call, call->events, call->flags);
578 		list_del(&call->recvmsg_link);
579 		put = true;
580 	}
581 
582 	/* list_empty() must return false in rxrpc_notify_socket() */
583 	call->recvmsg_link.next = NULL;
584 	call->recvmsg_link.prev = NULL;
585 
586 	spin_unlock(&rx->recvmsg_lock);
587 	if (put)
588 		rxrpc_put_call(call, rxrpc_call_put_unnotify);
589 
590 	write_lock(&rx->call_lock);
591 
592 	if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
593 		rb_erase(&call->sock_node, &rx->calls);
594 		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
595 		putu = true;
596 	}
597 
598 	list_del(&call->sock_link);
599 	write_unlock(&rx->call_lock);
600 
601 	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
602 
603 	if (putu)
604 		rxrpc_put_call(call, rxrpc_call_put_userid);
605 
606 	_leave("");
607 }
608 
609 /*
610  * release all the calls associated with a socket
611  */
612 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
613 {
614 	struct rxrpc_call *call;
615 
616 	_enter("%p", rx);
617 
618 	while (!list_empty(&rx->to_be_accepted)) {
619 		call = list_entry(rx->to_be_accepted.next,
620 				  struct rxrpc_call, accept_link);
621 		list_del(&call->accept_link);
622 		rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
623 				    rxrpc_abort_call_sock_release_tba);
624 		rxrpc_put_call(call, rxrpc_call_put_release_sock_tba);
625 	}
626 
627 	while (!list_empty(&rx->sock_calls)) {
628 		call = list_entry(rx->sock_calls.next,
629 				  struct rxrpc_call, sock_link);
630 		rxrpc_get_call(call, rxrpc_call_get_release_sock);
631 		rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
632 				    rxrpc_abort_call_sock_release);
633 		rxrpc_release_call(rx, call);
634 		rxrpc_put_call(call, rxrpc_call_put_release_sock);
635 	}
636 
637 	_leave("");
638 }
639 
640 /*
641  * release a call
642  */
643 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
644 {
645 	struct rxrpc_net *rxnet = call->rxnet;
646 	unsigned int debug_id = call->debug_id;
647 	bool dead;
648 	int r;
649 
650 	ASSERT(call != NULL);
651 
652 	dead = __refcount_dec_and_test(&call->ref, &r);
653 	trace_rxrpc_call(debug_id, r - 1, 0, why);
654 	if (dead) {
655 		ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
656 
657 		if (!list_empty(&call->link)) {
658 			spin_lock(&rxnet->call_lock);
659 			list_del_init(&call->link);
660 			spin_unlock(&rxnet->call_lock);
661 		}
662 
663 		rxrpc_cleanup_call(call);
664 	}
665 }
666 
667 /*
668  * Free up the call under RCU.
669  */
670 static void rxrpc_rcu_free_call(struct rcu_head *rcu)
671 {
672 	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
673 	struct rxrpc_net *rxnet = READ_ONCE(call->rxnet);
674 
675 	kmem_cache_free(rxrpc_call_jar, call);
676 	if (atomic_dec_and_test(&rxnet->nr_calls))
677 		wake_up_var(&rxnet->nr_calls);
678 }
679 
680 /*
681  * Final call destruction - but must be done in process context.
682  */
683 static void rxrpc_destroy_call(struct work_struct *work)
684 {
685 	struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer);
686 	struct rxrpc_txbuf *txb;
687 
688 	del_timer_sync(&call->timer);
689 
690 	rxrpc_cleanup_ring(call);
691 	while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
692 					       struct rxrpc_txbuf, call_link))) {
693 		list_del(&txb->call_link);
694 		rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
695 	}
696 	while ((txb = list_first_entry_or_null(&call->tx_buffer,
697 					       struct rxrpc_txbuf, call_link))) {
698 		list_del(&txb->call_link);
699 		rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
700 	}
701 
702 	rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
703 	rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
704 	rxrpc_deactivate_bundle(call->bundle);
705 	rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call);
706 	rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
707 	rxrpc_put_local(call->local, rxrpc_local_put_call);
708 	call_rcu(&call->rcu, rxrpc_rcu_free_call);
709 }
710 
711 /*
712  * clean up a call
713  */
714 void rxrpc_cleanup_call(struct rxrpc_call *call)
715 {
716 	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
717 
718 	ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
719 	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
720 
721 	del_timer(&call->timer);
722 
723 	if (rcu_read_lock_held())
724 		/* Can't use the rxrpc workqueue as we need to cancel/flush
725 		 * something that may be running/waiting there.
726 		 */
727 		schedule_work(&call->destroyer);
728 	else
729 		rxrpc_destroy_call(&call->destroyer);
730 }
731 
732 /*
733  * Make sure that all calls are gone from a network namespace.  To reach this
734  * point, any open UDP sockets in that namespace must have been closed, so any
735  * outstanding calls cannot be doing I/O.
736  */
737 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
738 {
739 	struct rxrpc_call *call;
740 
741 	_enter("");
742 
743 	if (!list_empty(&rxnet->calls)) {
744 		spin_lock(&rxnet->call_lock);
745 
746 		while (!list_empty(&rxnet->calls)) {
747 			call = list_entry(rxnet->calls.next,
748 					  struct rxrpc_call, link);
749 			_debug("Zapping call %p", call);
750 
751 			rxrpc_see_call(call, rxrpc_call_see_zap);
752 			list_del_init(&call->link);
753 
754 			pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
755 			       call, refcount_read(&call->ref),
756 			       rxrpc_call_states[__rxrpc_call_state(call)],
757 			       call->flags, call->events);
758 
759 			spin_unlock(&rxnet->call_lock);
760 			cond_resched();
761 			spin_lock(&rxnet->call_lock);
762 		}
763 
764 		spin_unlock(&rxnet->call_lock);
765 	}
766 
767 	atomic_dec(&rxnet->nr_calls);
768 	wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
769 }
770