xref: /openbmc/linux/net/rxrpc/call_object.c (revision af338a9e)
1 /* RxRPC individual remote procedure call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21 
22 /*
23  * Maximum lifetime of a call (in jiffies).
24  */
25 unsigned int rxrpc_max_call_lifetime = 60 * HZ;
26 
27 /*
28  * Time till dead call expires after last use (in jiffies).
29  */
30 unsigned int rxrpc_dead_call_expiry = 2 * HZ;
31 
32 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
33 	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
34 	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
35 	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
36 	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
37 	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
38 	[RXRPC_CALL_CLIENT_FINAL_ACK]		= "ClFnlACK",
39 	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
40 	[RXRPC_CALL_SERVER_ACCEPTING]		= "SvAccept",
41 	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
42 	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
43 	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
44 	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
45 	[RXRPC_CALL_COMPLETE]			= "Complete",
46 	[RXRPC_CALL_DEAD]			= "Dead    ",
47 };
48 
49 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
50 	[RXRPC_CALL_SUCCEEDED]			= "Complete",
51 	[RXRPC_CALL_SERVER_BUSY]		= "SvBusy  ",
52 	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
53 	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
54 	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
55 	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
56 };
57 
58 struct kmem_cache *rxrpc_call_jar;
59 LIST_HEAD(rxrpc_calls);
60 DEFINE_RWLOCK(rxrpc_call_lock);
61 
62 static void rxrpc_destroy_call(struct work_struct *work);
63 static void rxrpc_call_life_expired(unsigned long _call);
64 static void rxrpc_dead_call_expired(unsigned long _call);
65 static void rxrpc_ack_time_expired(unsigned long _call);
66 static void rxrpc_resend_time_expired(unsigned long _call);
67 
68 /*
69  * find an extant server call
70  * - called in process context with IRQs enabled
71  */
72 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
73 					      unsigned long user_call_ID)
74 {
75 	struct rxrpc_call *call;
76 	struct rb_node *p;
77 
78 	_enter("%p,%lx", rx, user_call_ID);
79 
80 	read_lock(&rx->call_lock);
81 
82 	p = rx->calls.rb_node;
83 	while (p) {
84 		call = rb_entry(p, struct rxrpc_call, sock_node);
85 
86 		if (user_call_ID < call->user_call_ID)
87 			p = p->rb_left;
88 		else if (user_call_ID > call->user_call_ID)
89 			p = p->rb_right;
90 		else
91 			goto found_extant_call;
92 	}
93 
94 	read_unlock(&rx->call_lock);
95 	_leave(" = NULL");
96 	return NULL;
97 
98 found_extant_call:
99 	rxrpc_get_call(call);
100 	read_unlock(&rx->call_lock);
101 	_leave(" = %p [%d]", call, atomic_read(&call->usage));
102 	return call;
103 }
104 
105 /*
106  * allocate a new call
107  */
108 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
109 {
110 	struct rxrpc_call *call;
111 
112 	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
113 	if (!call)
114 		return NULL;
115 
116 	call->acks_winsz = 16;
117 	call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
118 				    gfp);
119 	if (!call->acks_window) {
120 		kmem_cache_free(rxrpc_call_jar, call);
121 		return NULL;
122 	}
123 
124 	setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
125 		    (unsigned long) call);
126 	setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
127 		    (unsigned long) call);
128 	setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
129 		    (unsigned long) call);
130 	setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
131 		    (unsigned long) call);
132 	INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
133 	INIT_WORK(&call->processor, &rxrpc_process_call);
134 	INIT_LIST_HEAD(&call->link);
135 	INIT_LIST_HEAD(&call->chan_wait_link);
136 	INIT_LIST_HEAD(&call->accept_link);
137 	skb_queue_head_init(&call->rx_queue);
138 	skb_queue_head_init(&call->rx_oos_queue);
139 	skb_queue_head_init(&call->knlrecv_queue);
140 	init_waitqueue_head(&call->waitq);
141 	spin_lock_init(&call->lock);
142 	rwlock_init(&call->state_lock);
143 	atomic_set(&call->usage, 1);
144 	call->debug_id = atomic_inc_return(&rxrpc_debug_id);
145 
146 	memset(&call->sock_node, 0xed, sizeof(call->sock_node));
147 
148 	call->rx_data_expect = 1;
149 	call->rx_data_eaten = 0;
150 	call->rx_first_oos = 0;
151 	call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
152 	call->creation_jif = jiffies;
153 	return call;
154 }
155 
156 /*
157  * Allocate a new client call.
158  */
159 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
160 						  struct sockaddr_rxrpc *srx,
161 						  gfp_t gfp)
162 {
163 	struct rxrpc_call *call;
164 
165 	_enter("");
166 
167 	ASSERT(rx->local != NULL);
168 
169 	call = rxrpc_alloc_call(gfp);
170 	if (!call)
171 		return ERR_PTR(-ENOMEM);
172 	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
173 
174 	sock_hold(&rx->sk);
175 	call->socket = rx;
176 	call->rx_data_post = 1;
177 	call->service_id = srx->srx_service;
178 
179 	_leave(" = %p", call);
180 	return call;
181 }
182 
183 /*
184  * Begin client call.
185  */
186 static int rxrpc_begin_client_call(struct rxrpc_call *call,
187 				   struct rxrpc_conn_parameters *cp,
188 				   struct sockaddr_rxrpc *srx,
189 				   gfp_t gfp)
190 {
191 	int ret;
192 
193 	/* Set up or get a connection record and set the protocol parameters,
194 	 * including channel number and call ID.
195 	 */
196 	ret = rxrpc_connect_call(call, cp, srx, gfp);
197 	if (ret < 0)
198 		return ret;
199 
200 	spin_lock(&call->conn->params.peer->lock);
201 	hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
202 	spin_unlock(&call->conn->params.peer->lock);
203 
204 	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
205 	add_timer(&call->lifetimer);
206 	return 0;
207 }
208 
209 /*
210  * set up a call for the given data
211  * - called in process context with IRQs enabled
212  */
213 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
214 					 struct rxrpc_conn_parameters *cp,
215 					 struct sockaddr_rxrpc *srx,
216 					 unsigned long user_call_ID,
217 					 gfp_t gfp)
218 {
219 	struct rxrpc_call *call, *xcall;
220 	struct rb_node *parent, **pp;
221 	const void *here = __builtin_return_address(0);
222 	int ret;
223 
224 	_enter("%p,%lx", rx, user_call_ID);
225 
226 	call = rxrpc_alloc_client_call(rx, srx, gfp);
227 	if (IS_ERR(call)) {
228 		_leave(" = %ld", PTR_ERR(call));
229 		return call;
230 	}
231 
232 	trace_rxrpc_call(call, 0, atomic_read(&call->usage), 0, here,
233 			 (const void *)user_call_ID);
234 
235 	/* Publish the call, even though it is incompletely set up as yet */
236 	call->user_call_ID = user_call_ID;
237 	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
238 
239 	write_lock(&rx->call_lock);
240 
241 	pp = &rx->calls.rb_node;
242 	parent = NULL;
243 	while (*pp) {
244 		parent = *pp;
245 		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
246 
247 		if (user_call_ID < xcall->user_call_ID)
248 			pp = &(*pp)->rb_left;
249 		else if (user_call_ID > xcall->user_call_ID)
250 			pp = &(*pp)->rb_right;
251 		else
252 			goto found_user_ID_now_present;
253 	}
254 
255 	rxrpc_get_call(call);
256 
257 	rb_link_node(&call->sock_node, parent, pp);
258 	rb_insert_color(&call->sock_node, &rx->calls);
259 	write_unlock(&rx->call_lock);
260 
261 	write_lock_bh(&rxrpc_call_lock);
262 	list_add_tail(&call->link, &rxrpc_calls);
263 	write_unlock_bh(&rxrpc_call_lock);
264 
265 	ret = rxrpc_begin_client_call(call, cp, srx, gfp);
266 	if (ret < 0)
267 		goto error;
268 
269 	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
270 
271 	_leave(" = %p [new]", call);
272 	return call;
273 
274 error:
275 	write_lock(&rx->call_lock);
276 	rb_erase(&call->sock_node, &rx->calls);
277 	write_unlock(&rx->call_lock);
278 	rxrpc_put_call(call);
279 
280 	write_lock_bh(&rxrpc_call_lock);
281 	list_del_init(&call->link);
282 	write_unlock_bh(&rxrpc_call_lock);
283 
284 	set_bit(RXRPC_CALL_RELEASED, &call->flags);
285 	call->state = RXRPC_CALL_DEAD;
286 	rxrpc_put_call(call);
287 	_leave(" = %d", ret);
288 	return ERR_PTR(ret);
289 
290 	/* We unexpectedly found the user ID in the list after taking
291 	 * the call_lock.  This shouldn't happen unless the user races
292 	 * with itself and tries to add the same user ID twice at the
293 	 * same time in different threads.
294 	 */
295 found_user_ID_now_present:
296 	write_unlock(&rx->call_lock);
297 	set_bit(RXRPC_CALL_RELEASED, &call->flags);
298 	call->state = RXRPC_CALL_DEAD;
299 	rxrpc_put_call(call);
300 	_leave(" = -EEXIST [%p]", call);
301 	return ERR_PTR(-EEXIST);
302 }
303 
304 /*
305  * set up an incoming call
306  * - called in process context with IRQs enabled
307  */
308 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
309 				       struct rxrpc_connection *conn,
310 				       struct sk_buff *skb)
311 {
312 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
313 	struct rxrpc_call *call, *candidate;
314 	const void *here = __builtin_return_address(0);
315 	u32 call_id, chan;
316 
317 	_enter(",%d", conn->debug_id);
318 
319 	ASSERT(rx != NULL);
320 
321 	candidate = rxrpc_alloc_call(GFP_NOIO);
322 	if (!candidate)
323 		return ERR_PTR(-EBUSY);
324 
325 	trace_rxrpc_call(candidate, 1, atomic_read(&candidate->usage),
326 			 0, here, NULL);
327 
328 	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
329 	candidate->socket	= rx;
330 	candidate->conn		= conn;
331 	candidate->peer		= conn->params.peer;
332 	candidate->cid		= sp->hdr.cid;
333 	candidate->call_id	= sp->hdr.callNumber;
334 	candidate->rx_data_post	= 0;
335 	candidate->state	= RXRPC_CALL_SERVER_ACCEPTING;
336 	candidate->flags	|= (1 << RXRPC_CALL_IS_SERVICE);
337 	if (conn->security_ix > 0)
338 		candidate->state = RXRPC_CALL_SERVER_SECURING;
339 
340 	spin_lock(&conn->channel_lock);
341 
342 	/* set the channel for this call */
343 	call = rcu_dereference_protected(conn->channels[chan].call,
344 					 lockdep_is_held(&conn->channel_lock));
345 
346 	_debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
347 	if (call && call->call_id == sp->hdr.callNumber) {
348 		/* already set; must've been a duplicate packet */
349 		_debug("extant call [%d]", call->state);
350 		ASSERTCMP(call->conn, ==, conn);
351 
352 		read_lock(&call->state_lock);
353 		switch (call->state) {
354 		case RXRPC_CALL_LOCALLY_ABORTED:
355 			if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
356 				rxrpc_queue_call(call);
357 		case RXRPC_CALL_REMOTELY_ABORTED:
358 			read_unlock(&call->state_lock);
359 			goto aborted_call;
360 		default:
361 			rxrpc_get_call(call);
362 			read_unlock(&call->state_lock);
363 			goto extant_call;
364 		}
365 	}
366 
367 	if (call) {
368 		/* it seems the channel is still in use from the previous call
369 		 * - ditch the old binding if its call is now complete */
370 		_debug("CALL: %u { %s }",
371 		       call->debug_id, rxrpc_call_states[call->state]);
372 
373 		if (call->state == RXRPC_CALL_COMPLETE) {
374 			__rxrpc_disconnect_call(conn, call);
375 		} else {
376 			spin_unlock(&conn->channel_lock);
377 			kmem_cache_free(rxrpc_call_jar, candidate);
378 			_leave(" = -EBUSY");
379 			return ERR_PTR(-EBUSY);
380 		}
381 	}
382 
383 	/* check the call number isn't duplicate */
384 	_debug("check dup");
385 	call_id = sp->hdr.callNumber;
386 
387 	/* We just ignore calls prior to the current call ID.  Terminated calls
388 	 * are handled via the connection.
389 	 */
390 	if (call_id <= conn->channels[chan].call_counter)
391 		goto old_call; /* TODO: Just drop packet */
392 
393 	/* make the call available */
394 	_debug("new call");
395 	call = candidate;
396 	candidate = NULL;
397 	conn->channels[chan].call_counter = call_id;
398 	rcu_assign_pointer(conn->channels[chan].call, call);
399 	sock_hold(&rx->sk);
400 	rxrpc_get_connection(conn);
401 	rxrpc_get_peer(call->peer);
402 	spin_unlock(&conn->channel_lock);
403 
404 	spin_lock(&conn->params.peer->lock);
405 	hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
406 	spin_unlock(&conn->params.peer->lock);
407 
408 	write_lock_bh(&rxrpc_call_lock);
409 	list_add_tail(&call->link, &rxrpc_calls);
410 	write_unlock_bh(&rxrpc_call_lock);
411 
412 	call->service_id = conn->params.service_id;
413 
414 	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
415 
416 	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
417 	add_timer(&call->lifetimer);
418 	_leave(" = %p {%d} [new]", call, call->debug_id);
419 	return call;
420 
421 extant_call:
422 	spin_unlock(&conn->channel_lock);
423 	kmem_cache_free(rxrpc_call_jar, candidate);
424 	_leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
425 	return call;
426 
427 aborted_call:
428 	spin_unlock(&conn->channel_lock);
429 	kmem_cache_free(rxrpc_call_jar, candidate);
430 	_leave(" = -ECONNABORTED");
431 	return ERR_PTR(-ECONNABORTED);
432 
433 old_call:
434 	spin_unlock(&conn->channel_lock);
435 	kmem_cache_free(rxrpc_call_jar, candidate);
436 	_leave(" = -ECONNRESET [old]");
437 	return ERR_PTR(-ECONNRESET);
438 }
439 
440 /*
441  * Note the re-emergence of a call.
442  */
443 void rxrpc_see_call(struct rxrpc_call *call)
444 {
445 	const void *here = __builtin_return_address(0);
446 	if (call) {
447 		int n = atomic_read(&call->usage);
448 		int m = atomic_read(&call->skb_count);
449 
450 		trace_rxrpc_call(call, 2, n, m, here, 0);
451 	}
452 }
453 
454 /*
455  * Note the addition of a ref on a call.
456  */
457 void rxrpc_get_call(struct rxrpc_call *call)
458 {
459 	const void *here = __builtin_return_address(0);
460 	int n = atomic_inc_return(&call->usage);
461 	int m = atomic_read(&call->skb_count);
462 
463 	trace_rxrpc_call(call, 3, n, m, here, 0);
464 }
465 
466 /*
467  * Note the addition of a ref on a call for a socket buffer.
468  */
469 void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
470 {
471 	const void *here = __builtin_return_address(0);
472 	int n = atomic_inc_return(&call->usage);
473 	int m = atomic_inc_return(&call->skb_count);
474 
475 	trace_rxrpc_call(call, 4, n, m, here, skb);
476 }
477 
478 /*
479  * detach a call from a socket and set up for release
480  */
481 void rxrpc_release_call(struct rxrpc_call *call)
482 {
483 	struct rxrpc_connection *conn = call->conn;
484 	struct rxrpc_sock *rx = call->socket;
485 
486 	_enter("{%d,%d,%d,%d}",
487 	       call->debug_id, atomic_read(&call->usage),
488 	       atomic_read(&call->ackr_not_idle),
489 	       call->rx_first_oos);
490 
491 	rxrpc_see_call(call);
492 
493 	spin_lock_bh(&call->lock);
494 	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
495 		BUG();
496 	spin_unlock_bh(&call->lock);
497 
498 	/* dissociate from the socket
499 	 * - the socket's ref on the call is passed to the death timer
500 	 */
501 	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
502 
503 	spin_lock(&conn->params.peer->lock);
504 	hlist_del_init(&call->error_link);
505 	spin_unlock(&conn->params.peer->lock);
506 
507 	write_lock_bh(&rx->call_lock);
508 	if (!list_empty(&call->accept_link)) {
509 		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
510 		       call, call->events, call->flags);
511 		ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
512 		list_del_init(&call->accept_link);
513 		sk_acceptq_removed(&rx->sk);
514 	} else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
515 		rb_erase(&call->sock_node, &rx->calls);
516 		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
517 		clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
518 	}
519 	write_unlock_bh(&rx->call_lock);
520 
521 	/* free up the channel for reuse */
522 	write_lock_bh(&call->state_lock);
523 
524 	if (call->state < RXRPC_CALL_COMPLETE &&
525 	    call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
526 		_debug("+++ ABORTING STATE %d +++\n", call->state);
527 		__rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
528 	}
529 	write_unlock_bh(&call->state_lock);
530 
531 	rxrpc_disconnect_call(call);
532 
533 	/* clean up the Rx queue */
534 	if (!skb_queue_empty(&call->rx_queue) ||
535 	    !skb_queue_empty(&call->rx_oos_queue)) {
536 		struct rxrpc_skb_priv *sp;
537 		struct sk_buff *skb;
538 
539 		_debug("purge Rx queues");
540 
541 		spin_lock_bh(&call->lock);
542 		while ((skb = skb_dequeue(&call->rx_queue)) ||
543 		       (skb = skb_dequeue(&call->rx_oos_queue))) {
544 			spin_unlock_bh(&call->lock);
545 
546 			sp = rxrpc_skb(skb);
547 			_debug("- zap %s %%%u #%u",
548 			       rxrpc_pkts[sp->hdr.type],
549 			       sp->hdr.serial, sp->hdr.seq);
550 			rxrpc_free_skb(skb);
551 			spin_lock_bh(&call->lock);
552 		}
553 		spin_unlock_bh(&call->lock);
554 	}
555 
556 	del_timer_sync(&call->resend_timer);
557 	del_timer_sync(&call->ack_timer);
558 	del_timer_sync(&call->lifetimer);
559 	call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
560 	add_timer(&call->deadspan);
561 
562 	_leave("");
563 }
564 
565 /*
566  * handle a dead call being ready for reaping
567  */
568 static void rxrpc_dead_call_expired(unsigned long _call)
569 {
570 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
571 
572 	_enter("{%d}", call->debug_id);
573 
574 	rxrpc_see_call(call);
575 	write_lock_bh(&call->state_lock);
576 	call->state = RXRPC_CALL_DEAD;
577 	write_unlock_bh(&call->state_lock);
578 	rxrpc_put_call(call);
579 }
580 
581 /*
582  * mark a call as to be released, aborting it if it's still in progress
583  * - called with softirqs disabled
584  */
585 static void rxrpc_mark_call_released(struct rxrpc_call *call)
586 {
587 	bool sched = false;
588 
589 	rxrpc_see_call(call);
590 	write_lock(&call->state_lock);
591 	if (call->state < RXRPC_CALL_DEAD) {
592 		sched = __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
593 		if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
594 			sched = true;
595 	}
596 	write_unlock(&call->state_lock);
597 	if (sched)
598 		rxrpc_queue_call(call);
599 }
600 
601 /*
602  * release all the calls associated with a socket
603  */
604 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
605 {
606 	struct rxrpc_call *call;
607 	struct rb_node *p;
608 
609 	_enter("%p", rx);
610 
611 	read_lock_bh(&rx->call_lock);
612 
613 	/* kill the not-yet-accepted incoming calls */
614 	list_for_each_entry(call, &rx->secureq, accept_link) {
615 		rxrpc_mark_call_released(call);
616 	}
617 
618 	list_for_each_entry(call, &rx->acceptq, accept_link) {
619 		rxrpc_mark_call_released(call);
620 	}
621 
622 	/* mark all the calls as no longer wanting incoming packets */
623 	for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
624 		call = rb_entry(p, struct rxrpc_call, sock_node);
625 		rxrpc_mark_call_released(call);
626 	}
627 
628 	read_unlock_bh(&rx->call_lock);
629 	_leave("");
630 }
631 
632 /*
633  * release a call
634  */
635 void rxrpc_put_call(struct rxrpc_call *call)
636 {
637 	const void *here = __builtin_return_address(0);
638 	int n, m;
639 
640 	ASSERT(call != NULL);
641 
642 	n = atomic_dec_return(&call->usage);
643 	m = atomic_read(&call->skb_count);
644 	trace_rxrpc_call(call, 5, n, m, here, NULL);
645 	ASSERTCMP(n, >=, 0);
646 	if (n == 0) {
647 		_debug("call %d dead", call->debug_id);
648 		WARN_ON(m != 0);
649 		ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
650 		rxrpc_queue_work(&call->destroyer);
651 	}
652 }
653 
654 /*
655  * Release a call ref held by a socket buffer.
656  */
657 void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
658 {
659 	const void *here = __builtin_return_address(0);
660 	int n, m;
661 
662 	n = atomic_dec_return(&call->usage);
663 	m = atomic_dec_return(&call->skb_count);
664 	trace_rxrpc_call(call, 6, n, m, here, skb);
665 	ASSERTCMP(n, >=, 0);
666 	if (n == 0) {
667 		_debug("call %d dead", call->debug_id);
668 		WARN_ON(m != 0);
669 		ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
670 		rxrpc_queue_work(&call->destroyer);
671 	}
672 }
673 
674 /*
675  * Final call destruction under RCU.
676  */
677 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
678 {
679 	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
680 
681 	rxrpc_purge_queue(&call->rx_queue);
682 	rxrpc_purge_queue(&call->knlrecv_queue);
683 	rxrpc_put_peer(call->peer);
684 	kmem_cache_free(rxrpc_call_jar, call);
685 }
686 
687 /*
688  * clean up a call
689  */
690 static void rxrpc_cleanup_call(struct rxrpc_call *call)
691 {
692 	_net("DESTROY CALL %d", call->debug_id);
693 
694 	ASSERT(call->socket);
695 
696 	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
697 
698 	del_timer_sync(&call->lifetimer);
699 	del_timer_sync(&call->deadspan);
700 	del_timer_sync(&call->ack_timer);
701 	del_timer_sync(&call->resend_timer);
702 
703 	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
704 	ASSERTCMP(call->events, ==, 0);
705 	if (work_pending(&call->processor)) {
706 		_debug("defer destroy");
707 		rxrpc_queue_work(&call->destroyer);
708 		return;
709 	}
710 
711 	ASSERTCMP(call->conn, ==, NULL);
712 
713 	if (call->acks_window) {
714 		_debug("kill Tx window %d",
715 		       CIRC_CNT(call->acks_head, call->acks_tail,
716 				call->acks_winsz));
717 		smp_mb();
718 		while (CIRC_CNT(call->acks_head, call->acks_tail,
719 				call->acks_winsz) > 0) {
720 			struct rxrpc_skb_priv *sp;
721 			unsigned long _skb;
722 
723 			_skb = call->acks_window[call->acks_tail] & ~1;
724 			sp = rxrpc_skb((struct sk_buff *)_skb);
725 			_debug("+++ clear Tx %u", sp->hdr.seq);
726 			rxrpc_free_skb((struct sk_buff *)_skb);
727 			call->acks_tail =
728 				(call->acks_tail + 1) & (call->acks_winsz - 1);
729 		}
730 
731 		kfree(call->acks_window);
732 	}
733 
734 	rxrpc_free_skb(call->tx_pending);
735 
736 	rxrpc_purge_queue(&call->rx_queue);
737 	ASSERT(skb_queue_empty(&call->rx_oos_queue));
738 	rxrpc_purge_queue(&call->knlrecv_queue);
739 	sock_put(&call->socket->sk);
740 	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
741 }
742 
743 /*
744  * destroy a call
745  */
746 static void rxrpc_destroy_call(struct work_struct *work)
747 {
748 	struct rxrpc_call *call =
749 		container_of(work, struct rxrpc_call, destroyer);
750 
751 	_enter("%p{%d,%x,%p}",
752 	       call, atomic_read(&call->usage), call->cid, call->conn);
753 
754 	ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
755 
756 	write_lock_bh(&rxrpc_call_lock);
757 	list_del_init(&call->link);
758 	write_unlock_bh(&rxrpc_call_lock);
759 
760 	rxrpc_cleanup_call(call);
761 	_leave("");
762 }
763 
764 /*
765  * preemptively destroy all the call records from a transport endpoint rather
766  * than waiting for them to time out
767  */
768 void __exit rxrpc_destroy_all_calls(void)
769 {
770 	struct rxrpc_call *call;
771 
772 	_enter("");
773 	write_lock_bh(&rxrpc_call_lock);
774 
775 	while (!list_empty(&rxrpc_calls)) {
776 		call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
777 		_debug("Zapping call %p", call);
778 
779 		rxrpc_see_call(call);
780 		list_del_init(&call->link);
781 
782 		switch (atomic_read(&call->usage)) {
783 		case 0:
784 			ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
785 			break;
786 		case 1:
787 			if (del_timer_sync(&call->deadspan) != 0 &&
788 			    call->state != RXRPC_CALL_DEAD)
789 				rxrpc_dead_call_expired((unsigned long) call);
790 			if (call->state != RXRPC_CALL_DEAD)
791 				break;
792 		default:
793 			pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
794 			       call, atomic_read(&call->usage),
795 			       atomic_read(&call->ackr_not_idle),
796 			       rxrpc_call_states[call->state],
797 			       call->flags, call->events);
798 			if (!skb_queue_empty(&call->rx_queue))
799 				pr_err("Rx queue occupied\n");
800 			if (!skb_queue_empty(&call->rx_oos_queue))
801 				pr_err("OOS queue occupied\n");
802 			break;
803 		}
804 
805 		write_unlock_bh(&rxrpc_call_lock);
806 		cond_resched();
807 		write_lock_bh(&rxrpc_call_lock);
808 	}
809 
810 	write_unlock_bh(&rxrpc_call_lock);
811 	_leave("");
812 }
813 
814 /*
815  * handle call lifetime being exceeded
816  */
817 static void rxrpc_call_life_expired(unsigned long _call)
818 {
819 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
820 
821 	_enter("{%d}", call->debug_id);
822 
823 	rxrpc_see_call(call);
824 	if (call->state >= RXRPC_CALL_COMPLETE)
825 		return;
826 
827 	set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
828 	rxrpc_queue_call(call);
829 }
830 
831 /*
832  * handle resend timer expiry
833  * - may not take call->state_lock as this can deadlock against del_timer_sync()
834  */
835 static void rxrpc_resend_time_expired(unsigned long _call)
836 {
837 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
838 
839 	_enter("{%d}", call->debug_id);
840 
841 	rxrpc_see_call(call);
842 	if (call->state >= RXRPC_CALL_COMPLETE)
843 		return;
844 
845 	clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
846 	if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
847 		rxrpc_queue_call(call);
848 }
849 
850 /*
851  * handle ACK timer expiry
852  */
853 static void rxrpc_ack_time_expired(unsigned long _call)
854 {
855 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
856 
857 	_enter("{%d}", call->debug_id);
858 
859 	rxrpc_see_call(call);
860 	if (call->state >= RXRPC_CALL_COMPLETE)
861 		return;
862 
863 	if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
864 		rxrpc_queue_call(call);
865 }
866