xref: /openbmc/linux/net/rxrpc/call_object.c (revision e34d4234)
1 /* RxRPC individual remote procedure call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21 
22 /*
23  * Maximum lifetime of a call (in jiffies).
24  */
25 unsigned int rxrpc_max_call_lifetime = 60 * HZ;
26 
27 /*
28  * Time till dead call expires after last use (in jiffies).
29  */
30 unsigned int rxrpc_dead_call_expiry = 2 * HZ;
31 
32 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
33 	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
34 	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
35 	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
36 	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
37 	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
38 	[RXRPC_CALL_CLIENT_FINAL_ACK]		= "ClFnlACK",
39 	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
40 	[RXRPC_CALL_SERVER_ACCEPTING]		= "SvAccept",
41 	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
42 	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
43 	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
44 	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
45 	[RXRPC_CALL_COMPLETE]			= "Complete",
46 	[RXRPC_CALL_DEAD]			= "Dead    ",
47 };
48 
49 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
50 	[RXRPC_CALL_SUCCEEDED]			= "Complete",
51 	[RXRPC_CALL_SERVER_BUSY]		= "SvBusy  ",
52 	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
53 	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
54 	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
55 	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
56 };
57 
58 struct kmem_cache *rxrpc_call_jar;
59 LIST_HEAD(rxrpc_calls);
60 DEFINE_RWLOCK(rxrpc_call_lock);
61 
62 static void rxrpc_destroy_call(struct work_struct *work);
63 static void rxrpc_call_life_expired(unsigned long _call);
64 static void rxrpc_dead_call_expired(unsigned long _call);
65 static void rxrpc_ack_time_expired(unsigned long _call);
66 static void rxrpc_resend_time_expired(unsigned long _call);
67 
68 /*
69  * find an extant server call
70  * - called in process context with IRQs enabled
71  */
72 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
73 					      unsigned long user_call_ID)
74 {
75 	struct rxrpc_call *call;
76 	struct rb_node *p;
77 
78 	_enter("%p,%lx", rx, user_call_ID);
79 
80 	read_lock(&rx->call_lock);
81 
82 	p = rx->calls.rb_node;
83 	while (p) {
84 		call = rb_entry(p, struct rxrpc_call, sock_node);
85 
86 		if (user_call_ID < call->user_call_ID)
87 			p = p->rb_left;
88 		else if (user_call_ID > call->user_call_ID)
89 			p = p->rb_right;
90 		else
91 			goto found_extant_call;
92 	}
93 
94 	read_unlock(&rx->call_lock);
95 	_leave(" = NULL");
96 	return NULL;
97 
98 found_extant_call:
99 	rxrpc_get_call(call);
100 	read_unlock(&rx->call_lock);
101 	_leave(" = %p [%d]", call, atomic_read(&call->usage));
102 	return call;
103 }
104 
105 /*
106  * allocate a new call
107  */
108 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
109 {
110 	struct rxrpc_call *call;
111 
112 	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
113 	if (!call)
114 		return NULL;
115 
116 	call->acks_winsz = 16;
117 	call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
118 				    gfp);
119 	if (!call->acks_window) {
120 		kmem_cache_free(rxrpc_call_jar, call);
121 		return NULL;
122 	}
123 
124 	setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
125 		    (unsigned long) call);
126 	setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
127 		    (unsigned long) call);
128 	setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
129 		    (unsigned long) call);
130 	setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
131 		    (unsigned long) call);
132 	INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
133 	INIT_WORK(&call->processor, &rxrpc_process_call);
134 	INIT_LIST_HEAD(&call->link);
135 	INIT_LIST_HEAD(&call->chan_wait_link);
136 	INIT_LIST_HEAD(&call->accept_link);
137 	skb_queue_head_init(&call->rx_queue);
138 	skb_queue_head_init(&call->rx_oos_queue);
139 	init_waitqueue_head(&call->waitq);
140 	spin_lock_init(&call->lock);
141 	rwlock_init(&call->state_lock);
142 	atomic_set(&call->usage, 1);
143 	call->debug_id = atomic_inc_return(&rxrpc_debug_id);
144 
145 	memset(&call->sock_node, 0xed, sizeof(call->sock_node));
146 
147 	call->rx_data_expect = 1;
148 	call->rx_data_eaten = 0;
149 	call->rx_first_oos = 0;
150 	call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
151 	call->creation_jif = jiffies;
152 	return call;
153 }
154 
155 /*
156  * Allocate a new client call.
157  */
158 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
159 						  struct sockaddr_rxrpc *srx,
160 						  gfp_t gfp)
161 {
162 	struct rxrpc_call *call;
163 
164 	_enter("");
165 
166 	ASSERT(rx->local != NULL);
167 
168 	call = rxrpc_alloc_call(gfp);
169 	if (!call)
170 		return ERR_PTR(-ENOMEM);
171 	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
172 
173 	sock_hold(&rx->sk);
174 	call->socket = rx;
175 	call->rx_data_post = 1;
176 	call->service_id = srx->srx_service;
177 
178 	_leave(" = %p", call);
179 	return call;
180 }
181 
182 /*
183  * Begin client call.
184  */
185 static int rxrpc_begin_client_call(struct rxrpc_call *call,
186 				   struct rxrpc_conn_parameters *cp,
187 				   struct sockaddr_rxrpc *srx,
188 				   gfp_t gfp)
189 {
190 	int ret;
191 
192 	/* Set up or get a connection record and set the protocol parameters,
193 	 * including channel number and call ID.
194 	 */
195 	ret = rxrpc_connect_call(call, cp, srx, gfp);
196 	if (ret < 0)
197 		return ret;
198 
199 	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
200 
201 	spin_lock(&call->conn->params.peer->lock);
202 	hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
203 	spin_unlock(&call->conn->params.peer->lock);
204 
205 	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
206 	add_timer(&call->lifetimer);
207 	return 0;
208 }
209 
210 /*
211  * set up a call for the given data
212  * - called in process context with IRQs enabled
213  */
214 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
215 					 struct rxrpc_conn_parameters *cp,
216 					 struct sockaddr_rxrpc *srx,
217 					 unsigned long user_call_ID,
218 					 gfp_t gfp)
219 {
220 	struct rxrpc_call *call, *xcall;
221 	struct rb_node *parent, **pp;
222 	const void *here = __builtin_return_address(0);
223 	int ret;
224 
225 	_enter("%p,%lx", rx, user_call_ID);
226 
227 	call = rxrpc_alloc_client_call(rx, srx, gfp);
228 	if (IS_ERR(call)) {
229 		_leave(" = %ld", PTR_ERR(call));
230 		return call;
231 	}
232 
233 	trace_rxrpc_call(call, 0, atomic_read(&call->usage), 0, here,
234 			 (const void *)user_call_ID);
235 
236 	/* Publish the call, even though it is incompletely set up as yet */
237 	call->user_call_ID = user_call_ID;
238 	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
239 
240 	write_lock(&rx->call_lock);
241 
242 	pp = &rx->calls.rb_node;
243 	parent = NULL;
244 	while (*pp) {
245 		parent = *pp;
246 		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
247 
248 		if (user_call_ID < xcall->user_call_ID)
249 			pp = &(*pp)->rb_left;
250 		else if (user_call_ID > xcall->user_call_ID)
251 			pp = &(*pp)->rb_right;
252 		else
253 			goto found_user_ID_now_present;
254 	}
255 
256 	rxrpc_get_call(call);
257 
258 	rb_link_node(&call->sock_node, parent, pp);
259 	rb_insert_color(&call->sock_node, &rx->calls);
260 	write_unlock(&rx->call_lock);
261 
262 	write_lock_bh(&rxrpc_call_lock);
263 	list_add_tail(&call->link, &rxrpc_calls);
264 	write_unlock_bh(&rxrpc_call_lock);
265 
266 	ret = rxrpc_begin_client_call(call, cp, srx, gfp);
267 	if (ret < 0)
268 		goto error;
269 
270 	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
271 
272 	_leave(" = %p [new]", call);
273 	return call;
274 
275 error:
276 	write_lock(&rx->call_lock);
277 	rb_erase(&call->sock_node, &rx->calls);
278 	write_unlock(&rx->call_lock);
279 	rxrpc_put_call(call);
280 
281 	write_lock_bh(&rxrpc_call_lock);
282 	list_del_init(&call->link);
283 	write_unlock_bh(&rxrpc_call_lock);
284 
285 	set_bit(RXRPC_CALL_RELEASED, &call->flags);
286 	call->state = RXRPC_CALL_DEAD;
287 	rxrpc_put_call(call);
288 	_leave(" = %d", ret);
289 	return ERR_PTR(ret);
290 
291 	/* We unexpectedly found the user ID in the list after taking
292 	 * the call_lock.  This shouldn't happen unless the user races
293 	 * with itself and tries to add the same user ID twice at the
294 	 * same time in different threads.
295 	 */
296 found_user_ID_now_present:
297 	write_unlock(&rx->call_lock);
298 	set_bit(RXRPC_CALL_RELEASED, &call->flags);
299 	call->state = RXRPC_CALL_DEAD;
300 	rxrpc_put_call(call);
301 	_leave(" = -EEXIST [%p]", call);
302 	return ERR_PTR(-EEXIST);
303 }
304 
305 /*
306  * set up an incoming call
307  * - called in process context with IRQs enabled
308  */
309 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
310 				       struct rxrpc_connection *conn,
311 				       struct sk_buff *skb)
312 {
313 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
314 	struct rxrpc_call *call, *candidate;
315 	const void *here = __builtin_return_address(0);
316 	u32 call_id, chan;
317 
318 	_enter(",%d", conn->debug_id);
319 
320 	ASSERT(rx != NULL);
321 
322 	candidate = rxrpc_alloc_call(GFP_NOIO);
323 	if (!candidate)
324 		return ERR_PTR(-EBUSY);
325 
326 	trace_rxrpc_call(candidate, 1, atomic_read(&candidate->usage),
327 			 0, here, NULL);
328 
329 	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
330 	candidate->socket	= rx;
331 	candidate->conn		= conn;
332 	candidate->peer		= conn->params.peer;
333 	candidate->cid		= sp->hdr.cid;
334 	candidate->call_id	= sp->hdr.callNumber;
335 	candidate->rx_data_post	= 0;
336 	candidate->state	= RXRPC_CALL_SERVER_ACCEPTING;
337 	candidate->flags	|= (1 << RXRPC_CALL_IS_SERVICE);
338 	if (conn->security_ix > 0)
339 		candidate->state = RXRPC_CALL_SERVER_SECURING;
340 
341 	spin_lock(&conn->channel_lock);
342 
343 	/* set the channel for this call */
344 	call = rcu_dereference_protected(conn->channels[chan].call,
345 					 lockdep_is_held(&conn->channel_lock));
346 
347 	_debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
348 	if (call && call->call_id == sp->hdr.callNumber) {
349 		/* already set; must've been a duplicate packet */
350 		_debug("extant call [%d]", call->state);
351 		ASSERTCMP(call->conn, ==, conn);
352 
353 		read_lock(&call->state_lock);
354 		switch (call->state) {
355 		case RXRPC_CALL_LOCALLY_ABORTED:
356 			if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
357 				rxrpc_queue_call(call);
358 		case RXRPC_CALL_REMOTELY_ABORTED:
359 			read_unlock(&call->state_lock);
360 			goto aborted_call;
361 		default:
362 			rxrpc_get_call(call);
363 			read_unlock(&call->state_lock);
364 			goto extant_call;
365 		}
366 	}
367 
368 	if (call) {
369 		/* it seems the channel is still in use from the previous call
370 		 * - ditch the old binding if its call is now complete */
371 		_debug("CALL: %u { %s }",
372 		       call->debug_id, rxrpc_call_states[call->state]);
373 
374 		if (call->state == RXRPC_CALL_COMPLETE) {
375 			__rxrpc_disconnect_call(conn, call);
376 		} else {
377 			spin_unlock(&conn->channel_lock);
378 			kmem_cache_free(rxrpc_call_jar, candidate);
379 			_leave(" = -EBUSY");
380 			return ERR_PTR(-EBUSY);
381 		}
382 	}
383 
384 	/* check the call number isn't duplicate */
385 	_debug("check dup");
386 	call_id = sp->hdr.callNumber;
387 
388 	/* We just ignore calls prior to the current call ID.  Terminated calls
389 	 * are handled via the connection.
390 	 */
391 	if (call_id <= conn->channels[chan].call_counter)
392 		goto old_call; /* TODO: Just drop packet */
393 
394 	/* make the call available */
395 	_debug("new call");
396 	call = candidate;
397 	candidate = NULL;
398 	conn->channels[chan].call_counter = call_id;
399 	rcu_assign_pointer(conn->channels[chan].call, call);
400 	sock_hold(&rx->sk);
401 	rxrpc_get_connection(conn);
402 	rxrpc_get_peer(call->peer);
403 	spin_unlock(&conn->channel_lock);
404 
405 	spin_lock(&conn->params.peer->lock);
406 	hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
407 	spin_unlock(&conn->params.peer->lock);
408 
409 	write_lock_bh(&rxrpc_call_lock);
410 	list_add_tail(&call->link, &rxrpc_calls);
411 	write_unlock_bh(&rxrpc_call_lock);
412 
413 	call->service_id = conn->params.service_id;
414 
415 	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
416 
417 	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
418 	add_timer(&call->lifetimer);
419 	_leave(" = %p {%d} [new]", call, call->debug_id);
420 	return call;
421 
422 extant_call:
423 	spin_unlock(&conn->channel_lock);
424 	kmem_cache_free(rxrpc_call_jar, candidate);
425 	_leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
426 	return call;
427 
428 aborted_call:
429 	spin_unlock(&conn->channel_lock);
430 	kmem_cache_free(rxrpc_call_jar, candidate);
431 	_leave(" = -ECONNABORTED");
432 	return ERR_PTR(-ECONNABORTED);
433 
434 old_call:
435 	spin_unlock(&conn->channel_lock);
436 	kmem_cache_free(rxrpc_call_jar, candidate);
437 	_leave(" = -ECONNRESET [old]");
438 	return ERR_PTR(-ECONNRESET);
439 }
440 
441 /*
442  * Note the re-emergence of a call.
443  */
444 void rxrpc_see_call(struct rxrpc_call *call)
445 {
446 	const void *here = __builtin_return_address(0);
447 	if (call) {
448 		int n = atomic_read(&call->usage);
449 		int m = atomic_read(&call->skb_count);
450 
451 		trace_rxrpc_call(call, 2, n, m, here, 0);
452 	}
453 }
454 
455 /*
456  * Note the addition of a ref on a call.
457  */
458 void rxrpc_get_call(struct rxrpc_call *call)
459 {
460 	const void *here = __builtin_return_address(0);
461 	int n = atomic_inc_return(&call->usage);
462 	int m = atomic_read(&call->skb_count);
463 
464 	trace_rxrpc_call(call, 3, n, m, here, 0);
465 }
466 
467 /*
468  * Note the addition of a ref on a call for a socket buffer.
469  */
470 void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
471 {
472 	const void *here = __builtin_return_address(0);
473 	int n = atomic_inc_return(&call->usage);
474 	int m = atomic_inc_return(&call->skb_count);
475 
476 	trace_rxrpc_call(call, 4, n, m, here, skb);
477 }
478 
479 /*
480  * detach a call from a socket and set up for release
481  */
482 void rxrpc_release_call(struct rxrpc_call *call)
483 {
484 	struct rxrpc_connection *conn = call->conn;
485 	struct rxrpc_sock *rx = call->socket;
486 
487 	_enter("{%d,%d,%d,%d}",
488 	       call->debug_id, atomic_read(&call->usage),
489 	       atomic_read(&call->ackr_not_idle),
490 	       call->rx_first_oos);
491 
492 	rxrpc_see_call(call);
493 
494 	spin_lock_bh(&call->lock);
495 	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
496 		BUG();
497 	spin_unlock_bh(&call->lock);
498 
499 	/* dissociate from the socket
500 	 * - the socket's ref on the call is passed to the death timer
501 	 */
502 	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
503 
504 	spin_lock(&conn->params.peer->lock);
505 	hlist_del_init(&call->error_link);
506 	spin_unlock(&conn->params.peer->lock);
507 
508 	write_lock_bh(&rx->call_lock);
509 	if (!list_empty(&call->accept_link)) {
510 		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
511 		       call, call->events, call->flags);
512 		ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
513 		list_del_init(&call->accept_link);
514 		sk_acceptq_removed(&rx->sk);
515 	} else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
516 		rb_erase(&call->sock_node, &rx->calls);
517 		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
518 		clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
519 	}
520 	write_unlock_bh(&rx->call_lock);
521 
522 	/* free up the channel for reuse */
523 	write_lock_bh(&call->state_lock);
524 
525 	if (call->state < RXRPC_CALL_COMPLETE &&
526 	    call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
527 		_debug("+++ ABORTING STATE %d +++\n", call->state);
528 		__rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
529 	}
530 	write_unlock_bh(&call->state_lock);
531 
532 	rxrpc_disconnect_call(call);
533 
534 	/* clean up the Rx queue */
535 	if (!skb_queue_empty(&call->rx_queue) ||
536 	    !skb_queue_empty(&call->rx_oos_queue)) {
537 		struct rxrpc_skb_priv *sp;
538 		struct sk_buff *skb;
539 
540 		_debug("purge Rx queues");
541 
542 		spin_lock_bh(&call->lock);
543 		while ((skb = skb_dequeue(&call->rx_queue)) ||
544 		       (skb = skb_dequeue(&call->rx_oos_queue))) {
545 			spin_unlock_bh(&call->lock);
546 
547 			sp = rxrpc_skb(skb);
548 			_debug("- zap %s %%%u #%u",
549 			       rxrpc_pkts[sp->hdr.type],
550 			       sp->hdr.serial, sp->hdr.seq);
551 			rxrpc_free_skb(skb);
552 			spin_lock_bh(&call->lock);
553 		}
554 		spin_unlock_bh(&call->lock);
555 
556 		ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
557 	}
558 
559 	del_timer_sync(&call->resend_timer);
560 	del_timer_sync(&call->ack_timer);
561 	del_timer_sync(&call->lifetimer);
562 	call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
563 	add_timer(&call->deadspan);
564 
565 	_leave("");
566 }
567 
568 /*
569  * handle a dead call being ready for reaping
570  */
571 static void rxrpc_dead_call_expired(unsigned long _call)
572 {
573 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
574 
575 	_enter("{%d}", call->debug_id);
576 
577 	rxrpc_see_call(call);
578 	write_lock_bh(&call->state_lock);
579 	call->state = RXRPC_CALL_DEAD;
580 	write_unlock_bh(&call->state_lock);
581 	rxrpc_put_call(call);
582 }
583 
584 /*
585  * mark a call as to be released, aborting it if it's still in progress
586  * - called with softirqs disabled
587  */
588 static void rxrpc_mark_call_released(struct rxrpc_call *call)
589 {
590 	bool sched;
591 
592 	rxrpc_see_call(call);
593 	write_lock(&call->state_lock);
594 	if (call->state < RXRPC_CALL_DEAD) {
595 		sched = __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
596 		if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
597 			sched = true;
598 	}
599 	write_unlock(&call->state_lock);
600 	if (sched)
601 		rxrpc_queue_call(call);
602 }
603 
604 /*
605  * release all the calls associated with a socket
606  */
607 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
608 {
609 	struct rxrpc_call *call;
610 	struct rb_node *p;
611 
612 	_enter("%p", rx);
613 
614 	read_lock_bh(&rx->call_lock);
615 
616 	/* kill the not-yet-accepted incoming calls */
617 	list_for_each_entry(call, &rx->secureq, accept_link) {
618 		rxrpc_mark_call_released(call);
619 	}
620 
621 	list_for_each_entry(call, &rx->acceptq, accept_link) {
622 		rxrpc_mark_call_released(call);
623 	}
624 
625 	/* mark all the calls as no longer wanting incoming packets */
626 	for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
627 		call = rb_entry(p, struct rxrpc_call, sock_node);
628 		rxrpc_mark_call_released(call);
629 	}
630 
631 	read_unlock_bh(&rx->call_lock);
632 	_leave("");
633 }
634 
635 /*
636  * release a call
637  */
638 void rxrpc_put_call(struct rxrpc_call *call)
639 {
640 	const void *here = __builtin_return_address(0);
641 	int n, m;
642 
643 	ASSERT(call != NULL);
644 
645 	n = atomic_dec_return(&call->usage);
646 	m = atomic_read(&call->skb_count);
647 	trace_rxrpc_call(call, 5, n, m, here, NULL);
648 	ASSERTCMP(n, >=, 0);
649 	if (n == 0) {
650 		_debug("call %d dead", call->debug_id);
651 		WARN_ON(m != 0);
652 		ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
653 		rxrpc_queue_work(&call->destroyer);
654 	}
655 }
656 
657 /*
658  * Release a call ref held by a socket buffer.
659  */
660 void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
661 {
662 	const void *here = __builtin_return_address(0);
663 	int n, m;
664 
665 	n = atomic_dec_return(&call->usage);
666 	m = atomic_dec_return(&call->skb_count);
667 	trace_rxrpc_call(call, 6, n, m, here, skb);
668 	ASSERTCMP(n, >=, 0);
669 	if (n == 0) {
670 		_debug("call %d dead", call->debug_id);
671 		WARN_ON(m != 0);
672 		ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
673 		rxrpc_queue_work(&call->destroyer);
674 	}
675 }
676 
677 /*
678  * Final call destruction under RCU.
679  */
680 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
681 {
682 	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
683 
684 	rxrpc_purge_queue(&call->rx_queue);
685 	rxrpc_put_peer(call->peer);
686 	kmem_cache_free(rxrpc_call_jar, call);
687 }
688 
689 /*
690  * clean up a call
691  */
692 static void rxrpc_cleanup_call(struct rxrpc_call *call)
693 {
694 	_net("DESTROY CALL %d", call->debug_id);
695 
696 	ASSERT(call->socket);
697 
698 	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
699 
700 	del_timer_sync(&call->lifetimer);
701 	del_timer_sync(&call->deadspan);
702 	del_timer_sync(&call->ack_timer);
703 	del_timer_sync(&call->resend_timer);
704 
705 	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
706 	ASSERTCMP(call->events, ==, 0);
707 	if (work_pending(&call->processor)) {
708 		_debug("defer destroy");
709 		rxrpc_queue_work(&call->destroyer);
710 		return;
711 	}
712 
713 	ASSERTCMP(call->conn, ==, NULL);
714 
715 	if (call->acks_window) {
716 		_debug("kill Tx window %d",
717 		       CIRC_CNT(call->acks_head, call->acks_tail,
718 				call->acks_winsz));
719 		smp_mb();
720 		while (CIRC_CNT(call->acks_head, call->acks_tail,
721 				call->acks_winsz) > 0) {
722 			struct rxrpc_skb_priv *sp;
723 			unsigned long _skb;
724 
725 			_skb = call->acks_window[call->acks_tail] & ~1;
726 			sp = rxrpc_skb((struct sk_buff *)_skb);
727 			_debug("+++ clear Tx %u", sp->hdr.seq);
728 			rxrpc_free_skb((struct sk_buff *)_skb);
729 			call->acks_tail =
730 				(call->acks_tail + 1) & (call->acks_winsz - 1);
731 		}
732 
733 		kfree(call->acks_window);
734 	}
735 
736 	rxrpc_free_skb(call->tx_pending);
737 
738 	rxrpc_purge_queue(&call->rx_queue);
739 	ASSERT(skb_queue_empty(&call->rx_oos_queue));
740 	sock_put(&call->socket->sk);
741 	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
742 }
743 
744 /*
745  * destroy a call
746  */
747 static void rxrpc_destroy_call(struct work_struct *work)
748 {
749 	struct rxrpc_call *call =
750 		container_of(work, struct rxrpc_call, destroyer);
751 
752 	_enter("%p{%d,%x,%p}",
753 	       call, atomic_read(&call->usage), call->cid, call->conn);
754 
755 	ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
756 
757 	write_lock_bh(&rxrpc_call_lock);
758 	list_del_init(&call->link);
759 	write_unlock_bh(&rxrpc_call_lock);
760 
761 	rxrpc_cleanup_call(call);
762 	_leave("");
763 }
764 
765 /*
766  * preemptively destroy all the call records from a transport endpoint rather
767  * than waiting for them to time out
768  */
769 void __exit rxrpc_destroy_all_calls(void)
770 {
771 	struct rxrpc_call *call;
772 
773 	_enter("");
774 	write_lock_bh(&rxrpc_call_lock);
775 
776 	while (!list_empty(&rxrpc_calls)) {
777 		call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
778 		_debug("Zapping call %p", call);
779 
780 		rxrpc_see_call(call);
781 		list_del_init(&call->link);
782 
783 		switch (atomic_read(&call->usage)) {
784 		case 0:
785 			ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
786 			break;
787 		case 1:
788 			if (del_timer_sync(&call->deadspan) != 0 &&
789 			    call->state != RXRPC_CALL_DEAD)
790 				rxrpc_dead_call_expired((unsigned long) call);
791 			if (call->state != RXRPC_CALL_DEAD)
792 				break;
793 		default:
794 			pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
795 			       call, atomic_read(&call->usage),
796 			       atomic_read(&call->ackr_not_idle),
797 			       rxrpc_call_states[call->state],
798 			       call->flags, call->events);
799 			if (!skb_queue_empty(&call->rx_queue))
800 				pr_err("Rx queue occupied\n");
801 			if (!skb_queue_empty(&call->rx_oos_queue))
802 				pr_err("OOS queue occupied\n");
803 			break;
804 		}
805 
806 		write_unlock_bh(&rxrpc_call_lock);
807 		cond_resched();
808 		write_lock_bh(&rxrpc_call_lock);
809 	}
810 
811 	write_unlock_bh(&rxrpc_call_lock);
812 	_leave("");
813 }
814 
815 /*
816  * handle call lifetime being exceeded
817  */
818 static void rxrpc_call_life_expired(unsigned long _call)
819 {
820 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
821 
822 	_enter("{%d}", call->debug_id);
823 
824 	rxrpc_see_call(call);
825 	if (call->state >= RXRPC_CALL_COMPLETE)
826 		return;
827 
828 	set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
829 	rxrpc_queue_call(call);
830 }
831 
832 /*
833  * handle resend timer expiry
834  * - may not take call->state_lock as this can deadlock against del_timer_sync()
835  */
836 static void rxrpc_resend_time_expired(unsigned long _call)
837 {
838 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
839 
840 	_enter("{%d}", call->debug_id);
841 
842 	rxrpc_see_call(call);
843 	if (call->state >= RXRPC_CALL_COMPLETE)
844 		return;
845 
846 	clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
847 	if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
848 		rxrpc_queue_call(call);
849 }
850 
851 /*
852  * handle ACK timer expiry
853  */
854 static void rxrpc_ack_time_expired(unsigned long _call)
855 {
856 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
857 
858 	_enter("{%d}", call->debug_id);
859 
860 	rxrpc_see_call(call);
861 	if (call->state >= RXRPC_CALL_COMPLETE)
862 		return;
863 
864 	if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
865 		rxrpc_queue_call(call);
866 }
867