xref: /openbmc/linux/net/rxrpc/call_object.c (revision 2ab27215)
1 /* RxRPC individual remote procedure call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21 
22 /*
23  * Maximum lifetime of a call (in jiffies).
24  */
25 unsigned int rxrpc_max_call_lifetime = 60 * HZ;
26 
27 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
28 	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
29 	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
30 	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
31 	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
32 	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
33 	[RXRPC_CALL_CLIENT_FINAL_ACK]		= "ClFnlACK",
34 	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
35 	[RXRPC_CALL_SERVER_ACCEPTING]		= "SvAccept",
36 	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
37 	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
38 	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
39 	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
40 	[RXRPC_CALL_COMPLETE]			= "Complete",
41 };
42 
43 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
44 	[RXRPC_CALL_SUCCEEDED]			= "Complete",
45 	[RXRPC_CALL_SERVER_BUSY]		= "SvBusy  ",
46 	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
47 	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
48 	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
49 	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
50 };
51 
52 const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
53 	[rxrpc_call_new_client]		= "NWc",
54 	[rxrpc_call_new_service]	= "NWs",
55 	[rxrpc_call_queued]		= "QUE",
56 	[rxrpc_call_queued_ref]		= "QUR",
57 	[rxrpc_call_seen]		= "SEE",
58 	[rxrpc_call_got]		= "GOT",
59 	[rxrpc_call_got_skb]		= "Gsk",
60 	[rxrpc_call_got_userid]		= "Gus",
61 	[rxrpc_call_put]		= "PUT",
62 	[rxrpc_call_put_skb]		= "Psk",
63 	[rxrpc_call_put_userid]		= "Pus",
64 	[rxrpc_call_put_noqueue]	= "PNQ",
65 };
66 
67 struct kmem_cache *rxrpc_call_jar;
68 LIST_HEAD(rxrpc_calls);
69 DEFINE_RWLOCK(rxrpc_call_lock);
70 
71 static void rxrpc_call_life_expired(unsigned long _call);
72 static void rxrpc_ack_time_expired(unsigned long _call);
73 static void rxrpc_resend_time_expired(unsigned long _call);
74 static void rxrpc_cleanup_call(struct rxrpc_call *call);
75 
76 /*
77  * find an extant server call
78  * - called in process context with IRQs enabled
79  */
80 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
81 					      unsigned long user_call_ID)
82 {
83 	struct rxrpc_call *call;
84 	struct rb_node *p;
85 
86 	_enter("%p,%lx", rx, user_call_ID);
87 
88 	read_lock(&rx->call_lock);
89 
90 	p = rx->calls.rb_node;
91 	while (p) {
92 		call = rb_entry(p, struct rxrpc_call, sock_node);
93 
94 		if (user_call_ID < call->user_call_ID)
95 			p = p->rb_left;
96 		else if (user_call_ID > call->user_call_ID)
97 			p = p->rb_right;
98 		else
99 			goto found_extant_call;
100 	}
101 
102 	read_unlock(&rx->call_lock);
103 	_leave(" = NULL");
104 	return NULL;
105 
106 found_extant_call:
107 	rxrpc_get_call(call, rxrpc_call_got);
108 	read_unlock(&rx->call_lock);
109 	_leave(" = %p [%d]", call, atomic_read(&call->usage));
110 	return call;
111 }
112 
113 /*
114  * allocate a new call
115  */
116 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
117 {
118 	struct rxrpc_call *call;
119 
120 	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
121 	if (!call)
122 		return NULL;
123 
124 	call->acks_winsz = 16;
125 	call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
126 				    gfp);
127 	if (!call->acks_window) {
128 		kmem_cache_free(rxrpc_call_jar, call);
129 		return NULL;
130 	}
131 
132 	setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
133 		    (unsigned long) call);
134 	setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
135 		    (unsigned long) call);
136 	setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
137 		    (unsigned long) call);
138 	INIT_WORK(&call->processor, &rxrpc_process_call);
139 	INIT_LIST_HEAD(&call->link);
140 	INIT_LIST_HEAD(&call->chan_wait_link);
141 	INIT_LIST_HEAD(&call->accept_link);
142 	skb_queue_head_init(&call->rx_queue);
143 	skb_queue_head_init(&call->rx_oos_queue);
144 	skb_queue_head_init(&call->knlrecv_queue);
145 	init_waitqueue_head(&call->waitq);
146 	spin_lock_init(&call->lock);
147 	rwlock_init(&call->state_lock);
148 	atomic_set(&call->usage, 1);
149 	call->debug_id = atomic_inc_return(&rxrpc_debug_id);
150 
151 	memset(&call->sock_node, 0xed, sizeof(call->sock_node));
152 
153 	call->rx_data_expect = 1;
154 	call->rx_data_eaten = 0;
155 	call->rx_first_oos = 0;
156 	call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
157 	call->creation_jif = jiffies;
158 	return call;
159 }
160 
161 /*
162  * Allocate a new client call.
163  */
164 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
165 						  struct sockaddr_rxrpc *srx,
166 						  gfp_t gfp)
167 {
168 	struct rxrpc_call *call;
169 
170 	_enter("");
171 
172 	ASSERT(rx->local != NULL);
173 
174 	call = rxrpc_alloc_call(gfp);
175 	if (!call)
176 		return ERR_PTR(-ENOMEM);
177 	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
178 	call->rx_data_post = 1;
179 	call->service_id = srx->srx_service;
180 	rcu_assign_pointer(call->socket, rx);
181 
182 	_leave(" = %p", call);
183 	return call;
184 }
185 
186 /*
187  * Begin client call.
188  */
189 static int rxrpc_begin_client_call(struct rxrpc_call *call,
190 				   struct rxrpc_conn_parameters *cp,
191 				   struct sockaddr_rxrpc *srx,
192 				   gfp_t gfp)
193 {
194 	int ret;
195 
196 	/* Set up or get a connection record and set the protocol parameters,
197 	 * including channel number and call ID.
198 	 */
199 	ret = rxrpc_connect_call(call, cp, srx, gfp);
200 	if (ret < 0)
201 		return ret;
202 
203 	spin_lock(&call->conn->params.peer->lock);
204 	hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
205 	spin_unlock(&call->conn->params.peer->lock);
206 
207 	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
208 	add_timer(&call->lifetimer);
209 	return 0;
210 }
211 
212 /*
213  * set up a call for the given data
214  * - called in process context with IRQs enabled
215  */
216 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
217 					 struct rxrpc_conn_parameters *cp,
218 					 struct sockaddr_rxrpc *srx,
219 					 unsigned long user_call_ID,
220 					 gfp_t gfp)
221 {
222 	struct rxrpc_call *call, *xcall;
223 	struct rb_node *parent, **pp;
224 	const void *here = __builtin_return_address(0);
225 	int ret;
226 
227 	_enter("%p,%lx", rx, user_call_ID);
228 
229 	call = rxrpc_alloc_client_call(rx, srx, gfp);
230 	if (IS_ERR(call)) {
231 		_leave(" = %ld", PTR_ERR(call));
232 		return call;
233 	}
234 
235 	trace_rxrpc_call(call, 0, atomic_read(&call->usage), here,
236 			 (const void *)user_call_ID);
237 
238 	/* Publish the call, even though it is incompletely set up as yet */
239 	call->user_call_ID = user_call_ID;
240 	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
241 
242 	write_lock(&rx->call_lock);
243 
244 	pp = &rx->calls.rb_node;
245 	parent = NULL;
246 	while (*pp) {
247 		parent = *pp;
248 		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
249 
250 		if (user_call_ID < xcall->user_call_ID)
251 			pp = &(*pp)->rb_left;
252 		else if (user_call_ID > xcall->user_call_ID)
253 			pp = &(*pp)->rb_right;
254 		else
255 			goto found_user_ID_now_present;
256 	}
257 
258 	rxrpc_get_call(call, rxrpc_call_got_userid);
259 	rb_link_node(&call->sock_node, parent, pp);
260 	rb_insert_color(&call->sock_node, &rx->calls);
261 	write_unlock(&rx->call_lock);
262 
263 	write_lock_bh(&rxrpc_call_lock);
264 	list_add_tail(&call->link, &rxrpc_calls);
265 	write_unlock_bh(&rxrpc_call_lock);
266 
267 	ret = rxrpc_begin_client_call(call, cp, srx, gfp);
268 	if (ret < 0)
269 		goto error;
270 
271 	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
272 
273 	_leave(" = %p [new]", call);
274 	return call;
275 
276 error:
277 	write_lock(&rx->call_lock);
278 	rb_erase(&call->sock_node, &rx->calls);
279 	write_unlock(&rx->call_lock);
280 	rxrpc_put_call(call, rxrpc_call_put_userid);
281 
282 	write_lock_bh(&rxrpc_call_lock);
283 	list_del_init(&call->link);
284 	write_unlock_bh(&rxrpc_call_lock);
285 
286 error_out:
287 	__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
288 				    RX_CALL_DEAD, ret);
289 	set_bit(RXRPC_CALL_RELEASED, &call->flags);
290 	rxrpc_put_call(call, rxrpc_call_put);
291 	_leave(" = %d", ret);
292 	return ERR_PTR(ret);
293 
294 	/* We unexpectedly found the user ID in the list after taking
295 	 * the call_lock.  This shouldn't happen unless the user races
296 	 * with itself and tries to add the same user ID twice at the
297 	 * same time in different threads.
298 	 */
299 found_user_ID_now_present:
300 	write_unlock(&rx->call_lock);
301 	ret = -EEXIST;
302 	goto error_out;
303 }
304 
305 /*
306  * set up an incoming call
307  * - called in process context with IRQs enabled
308  */
309 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
310 				       struct rxrpc_connection *conn,
311 				       struct sk_buff *skb)
312 {
313 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
314 	struct rxrpc_call *call, *candidate;
315 	const void *here = __builtin_return_address(0);
316 	u32 call_id, chan;
317 
318 	_enter(",%d", conn->debug_id);
319 
320 	ASSERT(rx != NULL);
321 
322 	candidate = rxrpc_alloc_call(GFP_NOIO);
323 	if (!candidate)
324 		return ERR_PTR(-EBUSY);
325 
326 	trace_rxrpc_call(candidate, rxrpc_call_new_service,
327 			 atomic_read(&candidate->usage), here, NULL);
328 
329 	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
330 	candidate->conn		= conn;
331 	candidate->peer		= conn->params.peer;
332 	candidate->cid		= sp->hdr.cid;
333 	candidate->call_id	= sp->hdr.callNumber;
334 	candidate->security_ix	= sp->hdr.securityIndex;
335 	candidate->rx_data_post	= 0;
336 	candidate->state	= RXRPC_CALL_SERVER_ACCEPTING;
337 	candidate->flags	|= (1 << RXRPC_CALL_IS_SERVICE);
338 	if (conn->security_ix > 0)
339 		candidate->state = RXRPC_CALL_SERVER_SECURING;
340 	rcu_assign_pointer(candidate->socket, rx);
341 
342 	spin_lock(&conn->channel_lock);
343 
344 	/* set the channel for this call */
345 	call = rcu_dereference_protected(conn->channels[chan].call,
346 					 lockdep_is_held(&conn->channel_lock));
347 
348 	_debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
349 	if (call && call->call_id == sp->hdr.callNumber) {
350 		/* already set; must've been a duplicate packet */
351 		_debug("extant call [%d]", call->state);
352 		ASSERTCMP(call->conn, ==, conn);
353 
354 		read_lock(&call->state_lock);
355 		switch (call->state) {
356 		case RXRPC_CALL_LOCALLY_ABORTED:
357 			if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
358 				rxrpc_queue_call(call);
359 		case RXRPC_CALL_REMOTELY_ABORTED:
360 			read_unlock(&call->state_lock);
361 			goto aborted_call;
362 		default:
363 			rxrpc_get_call(call, rxrpc_call_got);
364 			read_unlock(&call->state_lock);
365 			goto extant_call;
366 		}
367 	}
368 
369 	if (call) {
370 		/* it seems the channel is still in use from the previous call
371 		 * - ditch the old binding if its call is now complete */
372 		_debug("CALL: %u { %s }",
373 		       call->debug_id, rxrpc_call_states[call->state]);
374 
375 		if (call->state == RXRPC_CALL_COMPLETE) {
376 			__rxrpc_disconnect_call(conn, call);
377 		} else {
378 			spin_unlock(&conn->channel_lock);
379 			kmem_cache_free(rxrpc_call_jar, candidate);
380 			_leave(" = -EBUSY");
381 			return ERR_PTR(-EBUSY);
382 		}
383 	}
384 
385 	/* check the call number isn't duplicate */
386 	_debug("check dup");
387 	call_id = sp->hdr.callNumber;
388 
389 	/* We just ignore calls prior to the current call ID.  Terminated calls
390 	 * are handled via the connection.
391 	 */
392 	if (call_id <= conn->channels[chan].call_counter)
393 		goto old_call; /* TODO: Just drop packet */
394 
395 	/* make the call available */
396 	_debug("new call");
397 	call = candidate;
398 	candidate = NULL;
399 	conn->channels[chan].call_counter = call_id;
400 	rcu_assign_pointer(conn->channels[chan].call, call);
401 	rxrpc_get_connection(conn);
402 	rxrpc_get_peer(call->peer);
403 	spin_unlock(&conn->channel_lock);
404 
405 	spin_lock(&conn->params.peer->lock);
406 	hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
407 	spin_unlock(&conn->params.peer->lock);
408 
409 	write_lock_bh(&rxrpc_call_lock);
410 	list_add_tail(&call->link, &rxrpc_calls);
411 	write_unlock_bh(&rxrpc_call_lock);
412 
413 	call->service_id = conn->params.service_id;
414 
415 	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
416 
417 	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
418 	add_timer(&call->lifetimer);
419 	_leave(" = %p {%d} [new]", call, call->debug_id);
420 	return call;
421 
422 extant_call:
423 	spin_unlock(&conn->channel_lock);
424 	kmem_cache_free(rxrpc_call_jar, candidate);
425 	_leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
426 	return call;
427 
428 aborted_call:
429 	spin_unlock(&conn->channel_lock);
430 	kmem_cache_free(rxrpc_call_jar, candidate);
431 	_leave(" = -ECONNABORTED");
432 	return ERR_PTR(-ECONNABORTED);
433 
434 old_call:
435 	spin_unlock(&conn->channel_lock);
436 	kmem_cache_free(rxrpc_call_jar, candidate);
437 	_leave(" = -ECONNRESET [old]");
438 	return ERR_PTR(-ECONNRESET);
439 }
440 
441 /*
442  * Queue a call's work processor, getting a ref to pass to the work queue.
443  */
444 bool rxrpc_queue_call(struct rxrpc_call *call)
445 {
446 	const void *here = __builtin_return_address(0);
447 	int n = __atomic_add_unless(&call->usage, 1, 0);
448 	if (n == 0)
449 		return false;
450 	if (rxrpc_queue_work(&call->processor))
451 		trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
452 	else
453 		rxrpc_put_call(call, rxrpc_call_put_noqueue);
454 	return true;
455 }
456 
457 /*
458  * Queue a call's work processor, passing the callers ref to the work queue.
459  */
460 bool __rxrpc_queue_call(struct rxrpc_call *call)
461 {
462 	const void *here = __builtin_return_address(0);
463 	int n = atomic_read(&call->usage);
464 	ASSERTCMP(n, >=, 1);
465 	if (rxrpc_queue_work(&call->processor))
466 		trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
467 	else
468 		rxrpc_put_call(call, rxrpc_call_put_noqueue);
469 	return true;
470 }
471 
472 /*
473  * Note the re-emergence of a call.
474  */
475 void rxrpc_see_call(struct rxrpc_call *call)
476 {
477 	const void *here = __builtin_return_address(0);
478 	if (call) {
479 		int n = atomic_read(&call->usage);
480 
481 		trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
482 	}
483 }
484 
485 /*
486  * Note the addition of a ref on a call.
487  */
488 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
489 {
490 	const void *here = __builtin_return_address(0);
491 	int n = atomic_inc_return(&call->usage);
492 
493 	trace_rxrpc_call(call, op, n, here, NULL);
494 }
495 
496 /*
497  * Note the addition of a ref on a call for a socket buffer.
498  */
499 void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
500 {
501 	const void *here = __builtin_return_address(0);
502 	int n = atomic_inc_return(&call->usage);
503 
504 	trace_rxrpc_call(call, rxrpc_call_got_skb, n, here, skb);
505 }
506 
507 /*
508  * detach a call from a socket and set up for release
509  */
510 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
511 {
512 	_enter("{%d,%d,%d,%d}",
513 	       call->debug_id, atomic_read(&call->usage),
514 	       atomic_read(&call->ackr_not_idle),
515 	       call->rx_first_oos);
516 
517 	rxrpc_see_call(call);
518 
519 	spin_lock_bh(&call->lock);
520 	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
521 		BUG();
522 	spin_unlock_bh(&call->lock);
523 
524 	/* dissociate from the socket
525 	 * - the socket's ref on the call is passed to the death timer
526 	 */
527 	_debug("RELEASE CALL %p (%d)", call, call->debug_id);
528 
529 	if (call->peer) {
530 		spin_lock(&call->peer->lock);
531 		hlist_del_init(&call->error_link);
532 		spin_unlock(&call->peer->lock);
533 	}
534 
535 	write_lock_bh(&rx->call_lock);
536 	if (!list_empty(&call->accept_link)) {
537 		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
538 		       call, call->events, call->flags);
539 		ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
540 		list_del_init(&call->accept_link);
541 		sk_acceptq_removed(&rx->sk);
542 	} else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
543 		rb_erase(&call->sock_node, &rx->calls);
544 		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
545 		clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
546 		rxrpc_put_call(call, rxrpc_call_put_userid);
547 	}
548 	write_unlock_bh(&rx->call_lock);
549 
550 	/* free up the channel for reuse */
551 	if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK) {
552 		clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
553 		rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
554 		rxrpc_call_completed(call);
555 	} else {
556 		write_lock_bh(&call->state_lock);
557 
558 		if (call->state < RXRPC_CALL_COMPLETE) {
559 			_debug("+++ ABORTING STATE %d +++\n", call->state);
560 			__rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
561 			clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
562 			rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
563 		}
564 
565 		write_unlock_bh(&call->state_lock);
566 	}
567 
568 	if (call->conn)
569 		rxrpc_disconnect_call(call);
570 
571 	/* clean up the Rx queue */
572 	if (!skb_queue_empty(&call->rx_queue) ||
573 	    !skb_queue_empty(&call->rx_oos_queue)) {
574 		struct rxrpc_skb_priv *sp;
575 		struct sk_buff *skb;
576 
577 		_debug("purge Rx queues");
578 
579 		spin_lock_bh(&call->lock);
580 		while ((skb = skb_dequeue(&call->rx_queue)) ||
581 		       (skb = skb_dequeue(&call->rx_oos_queue))) {
582 			spin_unlock_bh(&call->lock);
583 
584 			sp = rxrpc_skb(skb);
585 			_debug("- zap %s %%%u #%u",
586 			       rxrpc_pkts[sp->hdr.type],
587 			       sp->hdr.serial, sp->hdr.seq);
588 			rxrpc_free_skb(skb);
589 			spin_lock_bh(&call->lock);
590 		}
591 		spin_unlock_bh(&call->lock);
592 	}
593 	rxrpc_purge_queue(&call->knlrecv_queue);
594 
595 	del_timer_sync(&call->resend_timer);
596 	del_timer_sync(&call->ack_timer);
597 	del_timer_sync(&call->lifetimer);
598 
599 	_leave("");
600 }
601 
602 /*
603  * release all the calls associated with a socket
604  */
605 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
606 {
607 	struct rxrpc_call *call;
608 	struct rb_node *p;
609 
610 	_enter("%p", rx);
611 
612 	read_lock_bh(&rx->call_lock);
613 
614 	/* kill the not-yet-accepted incoming calls */
615 	list_for_each_entry(call, &rx->secureq, accept_link) {
616 		rxrpc_release_call(rx, call);
617 	}
618 
619 	list_for_each_entry(call, &rx->acceptq, accept_link) {
620 		rxrpc_release_call(rx, call);
621 	}
622 
623 	/* mark all the calls as no longer wanting incoming packets */
624 	for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
625 		call = rb_entry(p, struct rxrpc_call, sock_node);
626 		rxrpc_release_call(rx, call);
627 	}
628 
629 	read_unlock_bh(&rx->call_lock);
630 	_leave("");
631 }
632 
633 /*
634  * release a call
635  */
636 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
637 {
638 	const void *here = __builtin_return_address(0);
639 	int n;
640 
641 	ASSERT(call != NULL);
642 
643 	n = atomic_dec_return(&call->usage);
644 	trace_rxrpc_call(call, op, n, here, NULL);
645 	ASSERTCMP(n, >=, 0);
646 	if (n == 0) {
647 		_debug("call %d dead", call->debug_id);
648 		rxrpc_cleanup_call(call);
649 	}
650 }
651 
652 /*
653  * Release a call ref held by a socket buffer.
654  */
655 void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
656 {
657 	const void *here = __builtin_return_address(0);
658 	int n;
659 
660 	n = atomic_dec_return(&call->usage);
661 	trace_rxrpc_call(call, rxrpc_call_put_skb, n, here, skb);
662 	ASSERTCMP(n, >=, 0);
663 	if (n == 0) {
664 		_debug("call %d dead", call->debug_id);
665 		rxrpc_cleanup_call(call);
666 	}
667 }
668 
669 /*
670  * Final call destruction under RCU.
671  */
672 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
673 {
674 	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
675 
676 	rxrpc_purge_queue(&call->rx_queue);
677 	rxrpc_purge_queue(&call->knlrecv_queue);
678 	rxrpc_put_peer(call->peer);
679 	kmem_cache_free(rxrpc_call_jar, call);
680 }
681 
682 /*
683  * clean up a call
684  */
685 static void rxrpc_cleanup_call(struct rxrpc_call *call)
686 {
687 	_net("DESTROY CALL %d", call->debug_id);
688 
689 	write_lock_bh(&rxrpc_call_lock);
690 	list_del_init(&call->link);
691 	write_unlock_bh(&rxrpc_call_lock);
692 
693 	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
694 
695 	del_timer_sync(&call->lifetimer);
696 	del_timer_sync(&call->ack_timer);
697 	del_timer_sync(&call->resend_timer);
698 
699 	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
700 	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
701 	ASSERT(!work_pending(&call->processor));
702 	ASSERTCMP(call->conn, ==, NULL);
703 
704 	if (call->acks_window) {
705 		_debug("kill Tx window %d",
706 		       CIRC_CNT(call->acks_head, call->acks_tail,
707 				call->acks_winsz));
708 		smp_mb();
709 		while (CIRC_CNT(call->acks_head, call->acks_tail,
710 				call->acks_winsz) > 0) {
711 			struct rxrpc_skb_priv *sp;
712 			unsigned long _skb;
713 
714 			_skb = call->acks_window[call->acks_tail] & ~1;
715 			sp = rxrpc_skb((struct sk_buff *)_skb);
716 			_debug("+++ clear Tx %u", sp->hdr.seq);
717 			rxrpc_free_skb((struct sk_buff *)_skb);
718 			call->acks_tail =
719 				(call->acks_tail + 1) & (call->acks_winsz - 1);
720 		}
721 
722 		kfree(call->acks_window);
723 	}
724 
725 	rxrpc_free_skb(call->tx_pending);
726 
727 	rxrpc_purge_queue(&call->rx_queue);
728 	ASSERT(skb_queue_empty(&call->rx_oos_queue));
729 	rxrpc_purge_queue(&call->knlrecv_queue);
730 	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
731 }
732 
733 /*
734  * Make sure that all calls are gone.
735  */
736 void __exit rxrpc_destroy_all_calls(void)
737 {
738 	struct rxrpc_call *call;
739 
740 	_enter("");
741 
742 	if (list_empty(&rxrpc_calls))
743 		return;
744 
745 	write_lock_bh(&rxrpc_call_lock);
746 
747 	while (!list_empty(&rxrpc_calls)) {
748 		call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
749 		_debug("Zapping call %p", call);
750 
751 		rxrpc_see_call(call);
752 		list_del_init(&call->link);
753 
754 		pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
755 		       call, atomic_read(&call->usage),
756 		       atomic_read(&call->ackr_not_idle),
757 		       rxrpc_call_states[call->state],
758 		       call->flags, call->events);
759 		if (!skb_queue_empty(&call->rx_queue))
760 			pr_err("Rx queue occupied\n");
761 		if (!skb_queue_empty(&call->rx_oos_queue))
762 			pr_err("OOS queue occupied\n");
763 
764 		write_unlock_bh(&rxrpc_call_lock);
765 		cond_resched();
766 		write_lock_bh(&rxrpc_call_lock);
767 	}
768 
769 	write_unlock_bh(&rxrpc_call_lock);
770 	_leave("");
771 }
772 
773 /*
774  * handle call lifetime being exceeded
775  */
776 static void rxrpc_call_life_expired(unsigned long _call)
777 {
778 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
779 
780 	_enter("{%d}", call->debug_id);
781 
782 	rxrpc_see_call(call);
783 	if (call->state >= RXRPC_CALL_COMPLETE)
784 		return;
785 
786 	set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
787 	rxrpc_queue_call(call);
788 }
789 
790 /*
791  * handle resend timer expiry
792  * - may not take call->state_lock as this can deadlock against del_timer_sync()
793  */
794 static void rxrpc_resend_time_expired(unsigned long _call)
795 {
796 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
797 
798 	_enter("{%d}", call->debug_id);
799 
800 	rxrpc_see_call(call);
801 	if (call->state >= RXRPC_CALL_COMPLETE)
802 		return;
803 
804 	clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
805 	if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
806 		rxrpc_queue_call(call);
807 }
808 
809 /*
810  * handle ACK timer expiry
811  */
812 static void rxrpc_ack_time_expired(unsigned long _call)
813 {
814 	struct rxrpc_call *call = (struct rxrpc_call *) _call;
815 
816 	_enter("{%d}", call->debug_id);
817 
818 	rxrpc_see_call(call);
819 	if (call->state >= RXRPC_CALL_COMPLETE)
820 		return;
821 
822 	if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
823 		rxrpc_queue_call(call);
824 }
825