xref: /openbmc/linux/net/rxrpc/conn_client.c (revision a89988a6)
1 /* Client connection-specific management code.
2  *
3  * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  *
11  *
12  * Client connections need to be cached for a little while after they've made a
13  * call so as to handle retransmitted DATA packets in case the server didn't
14  * receive the final ACK or terminating ABORT we sent it.
15  *
16  * Client connections can be in one of a number of cache states:
17  *
18  *  (1) INACTIVE - The connection is not held in any list and may not have been
19  *      exposed to the world.  If it has been previously exposed, it was
20  *      discarded from the idle list after expiring.
21  *
22  *  (2) WAITING - The connection is waiting for the number of client conns to
23  *      drop below the maximum capacity.  Calls may be in progress upon it from
24  *      when it was active and got culled.
25  *
26  *	The connection is on the rxrpc_waiting_client_conns list which is kept
27  *	in to-be-granted order.  Culled conns with waiters go to the back of
28  *	the queue just like new conns.
29  *
30  *  (3) ACTIVE - The connection has at least one call in progress upon it, it
31  *      may freely grant available channels to new calls and calls may be
32  *      waiting on it for channels to become available.
33  *
34  *	The connection is on the rxrpc_active_client_conns list which is kept
35  *	in activation order for culling purposes.
36  *
37  *	rxrpc_nr_active_client_conns is held incremented also.
38  *
39  *  (4) CULLED - The connection got summarily culled to try and free up
40  *      capacity.  Calls currently in progress on the connection are allowed to
41  *      continue, but new calls will have to wait.  There can be no waiters in
42  *      this state - the conn would have to go to the WAITING state instead.
43  *
44  *  (5) IDLE - The connection has no calls in progress upon it and must have
45  *      been exposed to the world (ie. the EXPOSED flag must be set).  When it
46  *      expires, the EXPOSED flag is cleared and the connection transitions to
47  *      the INACTIVE state.
48  *
49  *	The connection is on the rxrpc_idle_client_conns list which is kept in
50  *	order of how soon they'll expire.
51  *
52  * There are flags of relevance to the cache:
53  *
54  *  (1) EXPOSED - The connection ID got exposed to the world.  If this flag is
55  *      set, an extra ref is added to the connection preventing it from being
56  *      reaped when it has no calls outstanding.  This flag is cleared and the
57  *      ref dropped when a conn is discarded from the idle list.
58  *
59  *      This allows us to move terminal call state retransmission to the
60  *      connection and to discard the call immediately we think it is done
61  *      with.  It also give us a chance to reuse the connection.
62  *
63  *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
64  *      should not be reused.  This is set when an exclusive connection is used
65  *      or a call ID counter overflows.
66  *
67  * The caching state may only be changed if the cache lock is held.
68  *
69  * There are two idle client connection expiry durations.  If the total number
70  * of connections is below the reap threshold, we use the normal duration; if
71  * it's above, we use the fast duration.
72  */
73 
74 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
75 
76 #include <linux/slab.h>
77 #include <linux/idr.h>
78 #include <linux/timer.h>
79 #include <linux/sched/signal.h>
80 
81 #include "ar-internal.h"
82 
83 __read_mostly unsigned int rxrpc_max_client_connections = 1000;
84 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
85 __read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
86 __read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
87 
88 static unsigned int rxrpc_nr_client_conns;
89 static unsigned int rxrpc_nr_active_client_conns;
90 static __read_mostly bool rxrpc_kill_all_client_conns;
91 
92 static DEFINE_SPINLOCK(rxrpc_client_conn_cache_lock);
93 static DEFINE_SPINLOCK(rxrpc_client_conn_discard_mutex);
94 static LIST_HEAD(rxrpc_waiting_client_conns);
95 static LIST_HEAD(rxrpc_active_client_conns);
96 static LIST_HEAD(rxrpc_idle_client_conns);
97 
98 /*
99  * We use machine-unique IDs for our client connections.
100  */
101 DEFINE_IDR(rxrpc_client_conn_ids);
102 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
103 
104 static void rxrpc_cull_active_client_conns(void);
105 static void rxrpc_discard_expired_client_conns(struct work_struct *);
106 
107 static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap,
108 			    rxrpc_discard_expired_client_conns);
109 
110 /*
111  * Get a connection ID and epoch for a client connection from the global pool.
112  * The connection struct pointer is then recorded in the idr radix tree.  The
113  * epoch doesn't change until the client is rebooted (or, at least, unless the
114  * module is unloaded).
115  */
116 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
117 					  gfp_t gfp)
118 {
119 	int id;
120 
121 	_enter("");
122 
123 	idr_preload(gfp);
124 	spin_lock(&rxrpc_conn_id_lock);
125 
126 	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
127 			      1, 0x40000000, GFP_NOWAIT);
128 	if (id < 0)
129 		goto error;
130 
131 	spin_unlock(&rxrpc_conn_id_lock);
132 	idr_preload_end();
133 
134 	conn->proto.epoch = rxrpc_epoch;
135 	conn->proto.cid = id << RXRPC_CIDSHIFT;
136 	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
137 	_leave(" [CID %x]", conn->proto.cid);
138 	return 0;
139 
140 error:
141 	spin_unlock(&rxrpc_conn_id_lock);
142 	idr_preload_end();
143 	_leave(" = %d", id);
144 	return id;
145 }
146 
147 /*
148  * Release a connection ID for a client connection from the global pool.
149  */
150 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
151 {
152 	if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
153 		spin_lock(&rxrpc_conn_id_lock);
154 		idr_remove(&rxrpc_client_conn_ids,
155 			   conn->proto.cid >> RXRPC_CIDSHIFT);
156 		spin_unlock(&rxrpc_conn_id_lock);
157 	}
158 }
159 
160 /*
161  * Destroy the client connection ID tree.
162  */
163 void rxrpc_destroy_client_conn_ids(void)
164 {
165 	struct rxrpc_connection *conn;
166 	int id;
167 
168 	if (!idr_is_empty(&rxrpc_client_conn_ids)) {
169 		idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
170 			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
171 			       conn, atomic_read(&conn->usage));
172 		}
173 		BUG();
174 	}
175 
176 	idr_destroy(&rxrpc_client_conn_ids);
177 }
178 
179 /*
180  * Allocate a client connection.
181  */
182 static struct rxrpc_connection *
183 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
184 {
185 	struct rxrpc_connection *conn;
186 	int ret;
187 
188 	_enter("");
189 
190 	conn = rxrpc_alloc_connection(gfp);
191 	if (!conn) {
192 		_leave(" = -ENOMEM");
193 		return ERR_PTR(-ENOMEM);
194 	}
195 
196 	atomic_set(&conn->usage, 1);
197 	if (cp->exclusive)
198 		__set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
199 
200 	conn->params		= *cp;
201 	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
202 	conn->state		= RXRPC_CONN_CLIENT;
203 
204 	ret = rxrpc_get_client_connection_id(conn, gfp);
205 	if (ret < 0)
206 		goto error_0;
207 
208 	ret = rxrpc_init_client_conn_security(conn);
209 	if (ret < 0)
210 		goto error_1;
211 
212 	ret = conn->security->prime_packet_security(conn);
213 	if (ret < 0)
214 		goto error_2;
215 
216 	write_lock(&rxrpc_connection_lock);
217 	list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
218 	write_unlock(&rxrpc_connection_lock);
219 
220 	/* We steal the caller's peer ref. */
221 	cp->peer = NULL;
222 	rxrpc_get_local(conn->params.local);
223 	key_get(conn->params.key);
224 
225 	trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
226 			 __builtin_return_address(0));
227 	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
228 	_leave(" = %p", conn);
229 	return conn;
230 
231 error_2:
232 	conn->security->clear(conn);
233 error_1:
234 	rxrpc_put_client_connection_id(conn);
235 error_0:
236 	kfree(conn);
237 	_leave(" = %d", ret);
238 	return ERR_PTR(ret);
239 }
240 
241 /*
242  * Determine if a connection may be reused.
243  */
244 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
245 {
246 	int id_cursor, id, distance, limit;
247 
248 	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
249 		goto dont_reuse;
250 
251 	if (conn->proto.epoch != rxrpc_epoch)
252 		goto mark_dont_reuse;
253 
254 	/* The IDR tree gets very expensive on memory if the connection IDs are
255 	 * widely scattered throughout the number space, so we shall want to
256 	 * kill off connections that, say, have an ID more than about four
257 	 * times the maximum number of client conns away from the current
258 	 * allocation point to try and keep the IDs concentrated.
259 	 */
260 	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
261 	id = conn->proto.cid >> RXRPC_CIDSHIFT;
262 	distance = id - id_cursor;
263 	if (distance < 0)
264 		distance = -distance;
265 	limit = max(rxrpc_max_client_connections * 4, 1024U);
266 	if (distance > limit)
267 		goto mark_dont_reuse;
268 
269 	return true;
270 
271 mark_dont_reuse:
272 	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
273 dont_reuse:
274 	return false;
275 }
276 
277 /*
278  * Create or find a client connection to use for a call.
279  *
280  * If we return with a connection, the call will be on its waiting list.  It's
281  * left to the caller to assign a channel and wake up the call.
282  */
283 static int rxrpc_get_client_conn(struct rxrpc_call *call,
284 				 struct rxrpc_conn_parameters *cp,
285 				 struct sockaddr_rxrpc *srx,
286 				 gfp_t gfp)
287 {
288 	struct rxrpc_connection *conn, *candidate = NULL;
289 	struct rxrpc_local *local = cp->local;
290 	struct rb_node *p, **pp, *parent;
291 	long diff;
292 	int ret = -ENOMEM;
293 
294 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
295 
296 	cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
297 	if (!cp->peer)
298 		goto error;
299 
300 	/* If the connection is not meant to be exclusive, search the available
301 	 * connections to see if the connection we want to use already exists.
302 	 */
303 	if (!cp->exclusive) {
304 		_debug("search 1");
305 		spin_lock(&local->client_conns_lock);
306 		p = local->client_conns.rb_node;
307 		while (p) {
308 			conn = rb_entry(p, struct rxrpc_connection, client_node);
309 
310 #define cmp(X) ((long)conn->params.X - (long)cp->X)
311 			diff = (cmp(peer) ?:
312 				cmp(key) ?:
313 				cmp(security_level));
314 #undef cmp
315 			if (diff < 0) {
316 				p = p->rb_left;
317 			} else if (diff > 0) {
318 				p = p->rb_right;
319 			} else {
320 				if (rxrpc_may_reuse_conn(conn) &&
321 				    rxrpc_get_connection_maybe(conn))
322 					goto found_extant_conn;
323 				/* The connection needs replacing.  It's better
324 				 * to effect that when we have something to
325 				 * replace it with so that we don't have to
326 				 * rebalance the tree twice.
327 				 */
328 				break;
329 			}
330 		}
331 		spin_unlock(&local->client_conns_lock);
332 	}
333 
334 	/* There wasn't a connection yet or we need an exclusive connection.
335 	 * We need to create a candidate and then potentially redo the search
336 	 * in case we're racing with another thread also trying to connect on a
337 	 * shareable connection.
338 	 */
339 	_debug("new conn");
340 	candidate = rxrpc_alloc_client_connection(cp, gfp);
341 	if (IS_ERR(candidate)) {
342 		ret = PTR_ERR(candidate);
343 		goto error_peer;
344 	}
345 
346 	/* Add the call to the new connection's waiting list in case we're
347 	 * going to have to wait for the connection to come live.  It's our
348 	 * connection, so we want first dibs on the channel slots.  We would
349 	 * normally have to take channel_lock but we do this before anyone else
350 	 * can see the connection.
351 	 */
352 	list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
353 
354 	if (cp->exclusive) {
355 		call->conn = candidate;
356 		call->security_ix = candidate->security_ix;
357 		_leave(" = 0 [exclusive %d]", candidate->debug_id);
358 		return 0;
359 	}
360 
361 	/* Publish the new connection for userspace to find.  We need to redo
362 	 * the search before doing this lest we race with someone else adding a
363 	 * conflicting instance.
364 	 */
365 	_debug("search 2");
366 	spin_lock(&local->client_conns_lock);
367 
368 	pp = &local->client_conns.rb_node;
369 	parent = NULL;
370 	while (*pp) {
371 		parent = *pp;
372 		conn = rb_entry(parent, struct rxrpc_connection, client_node);
373 
374 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
375 		diff = (cmp(peer) ?:
376 			cmp(key) ?:
377 			cmp(security_level));
378 #undef cmp
379 		if (diff < 0) {
380 			pp = &(*pp)->rb_left;
381 		} else if (diff > 0) {
382 			pp = &(*pp)->rb_right;
383 		} else {
384 			if (rxrpc_may_reuse_conn(conn) &&
385 			    rxrpc_get_connection_maybe(conn))
386 				goto found_extant_conn;
387 			/* The old connection is from an outdated epoch. */
388 			_debug("replace conn");
389 			clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
390 			rb_replace_node(&conn->client_node,
391 					&candidate->client_node,
392 					&local->client_conns);
393 			trace_rxrpc_client(conn, -1, rxrpc_client_replace);
394 			goto candidate_published;
395 		}
396 	}
397 
398 	_debug("new conn");
399 	rb_link_node(&candidate->client_node, parent, pp);
400 	rb_insert_color(&candidate->client_node, &local->client_conns);
401 
402 candidate_published:
403 	set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
404 	call->conn = candidate;
405 	call->security_ix = candidate->security_ix;
406 	spin_unlock(&local->client_conns_lock);
407 	_leave(" = 0 [new %d]", candidate->debug_id);
408 	return 0;
409 
410 	/* We come here if we found a suitable connection already in existence.
411 	 * Discard any candidate we may have allocated, and try to get a
412 	 * channel on this one.
413 	 */
414 found_extant_conn:
415 	_debug("found conn");
416 	spin_unlock(&local->client_conns_lock);
417 
418 	if (candidate) {
419 		trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
420 		rxrpc_put_connection(candidate);
421 		candidate = NULL;
422 	}
423 
424 	spin_lock(&conn->channel_lock);
425 	call->conn = conn;
426 	call->security_ix = conn->security_ix;
427 	list_add(&call->chan_wait_link, &conn->waiting_calls);
428 	spin_unlock(&conn->channel_lock);
429 	_leave(" = 0 [extant %d]", conn->debug_id);
430 	return 0;
431 
432 error_peer:
433 	rxrpc_put_peer(cp->peer);
434 	cp->peer = NULL;
435 error:
436 	_leave(" = %d", ret);
437 	return ret;
438 }
439 
440 /*
441  * Activate a connection.
442  */
443 static void rxrpc_activate_conn(struct rxrpc_connection *conn)
444 {
445 	trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
446 	conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
447 	rxrpc_nr_active_client_conns++;
448 	list_move_tail(&conn->cache_link, &rxrpc_active_client_conns);
449 }
450 
451 /*
452  * Attempt to animate a connection for a new call.
453  *
454  * If it's not exclusive, the connection is in the endpoint tree, and we're in
455  * the conn's list of those waiting to grab a channel.  There is, however, a
456  * limit on the number of live connections allowed at any one time, so we may
457  * have to wait for capacity to become available.
458  *
459  * Note that a connection on the waiting queue might *also* have active
460  * channels if it has been culled to make space and then re-requested by a new
461  * call.
462  */
463 static void rxrpc_animate_client_conn(struct rxrpc_connection *conn)
464 {
465 	unsigned int nr_conns;
466 
467 	_enter("%d,%d", conn->debug_id, conn->cache_state);
468 
469 	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE)
470 		goto out;
471 
472 	spin_lock(&rxrpc_client_conn_cache_lock);
473 
474 	nr_conns = rxrpc_nr_client_conns;
475 	if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
476 		trace_rxrpc_client(conn, -1, rxrpc_client_count);
477 		rxrpc_nr_client_conns = nr_conns + 1;
478 	}
479 
480 	switch (conn->cache_state) {
481 	case RXRPC_CONN_CLIENT_ACTIVE:
482 	case RXRPC_CONN_CLIENT_WAITING:
483 		break;
484 
485 	case RXRPC_CONN_CLIENT_INACTIVE:
486 	case RXRPC_CONN_CLIENT_CULLED:
487 	case RXRPC_CONN_CLIENT_IDLE:
488 		if (nr_conns >= rxrpc_max_client_connections)
489 			goto wait_for_capacity;
490 		goto activate_conn;
491 
492 	default:
493 		BUG();
494 	}
495 
496 out_unlock:
497 	spin_unlock(&rxrpc_client_conn_cache_lock);
498 out:
499 	_leave(" [%d]", conn->cache_state);
500 	return;
501 
502 activate_conn:
503 	_debug("activate");
504 	rxrpc_activate_conn(conn);
505 	goto out_unlock;
506 
507 wait_for_capacity:
508 	_debug("wait");
509 	trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
510 	conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
511 	list_move_tail(&conn->cache_link, &rxrpc_waiting_client_conns);
512 	goto out_unlock;
513 }
514 
515 /*
516  * Deactivate a channel.
517  */
518 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
519 					 unsigned int channel)
520 {
521 	struct rxrpc_channel *chan = &conn->channels[channel];
522 
523 	rcu_assign_pointer(chan->call, NULL);
524 	conn->active_chans &= ~(1 << channel);
525 }
526 
527 /*
528  * Assign a channel to the call at the front of the queue and wake the call up.
529  * We don't increment the callNumber counter until this number has been exposed
530  * to the world.
531  */
532 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
533 				       unsigned int channel)
534 {
535 	struct rxrpc_channel *chan = &conn->channels[channel];
536 	struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
537 					     struct rxrpc_call, chan_wait_link);
538 	u32 call_id = chan->call_counter + 1;
539 
540 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
541 
542 	write_lock_bh(&call->state_lock);
543 	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
544 	write_unlock_bh(&call->state_lock);
545 
546 	rxrpc_see_call(call);
547 	list_del_init(&call->chan_wait_link);
548 	conn->active_chans |= 1 << channel;
549 	call->peer	= rxrpc_get_peer(conn->params.peer);
550 	call->cid	= conn->proto.cid | channel;
551 	call->call_id	= call_id;
552 
553 	trace_rxrpc_connect_call(call);
554 	_net("CONNECT call %08x:%08x as call %d on conn %d",
555 	     call->cid, call->call_id, call->debug_id, conn->debug_id);
556 
557 	/* Paired with the read barrier in rxrpc_wait_for_channel().  This
558 	 * orders cid and epoch in the connection wrt to call_id without the
559 	 * need to take the channel_lock.
560 	 *
561 	 * We provisionally assign a callNumber at this point, but we don't
562 	 * confirm it until the call is about to be exposed.
563 	 *
564 	 * TODO: Pair with a barrier in the data_ready handler when that looks
565 	 * at the call ID through a connection channel.
566 	 */
567 	smp_wmb();
568 	chan->call_id	= call_id;
569 	rcu_assign_pointer(chan->call, call);
570 	wake_up(&call->waitq);
571 }
572 
573 /*
574  * Assign channels and callNumbers to waiting calls with channel_lock
575  * held by caller.
576  */
577 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
578 {
579 	u8 avail, mask;
580 
581 	switch (conn->cache_state) {
582 	case RXRPC_CONN_CLIENT_ACTIVE:
583 		mask = RXRPC_ACTIVE_CHANS_MASK;
584 		break;
585 	default:
586 		return;
587 	}
588 
589 	while (!list_empty(&conn->waiting_calls) &&
590 	       (avail = ~conn->active_chans,
591 		avail &= mask,
592 		avail != 0))
593 		rxrpc_activate_one_channel(conn, __ffs(avail));
594 }
595 
596 /*
597  * Assign channels and callNumbers to waiting calls.
598  */
599 static void rxrpc_activate_channels(struct rxrpc_connection *conn)
600 {
601 	_enter("%d", conn->debug_id);
602 
603 	trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
604 
605 	if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
606 		return;
607 
608 	spin_lock(&conn->channel_lock);
609 	rxrpc_activate_channels_locked(conn);
610 	spin_unlock(&conn->channel_lock);
611 	_leave("");
612 }
613 
614 /*
615  * Wait for a callNumber and a channel to be granted to a call.
616  */
617 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
618 {
619 	int ret = 0;
620 
621 	_enter("%d", call->debug_id);
622 
623 	if (!call->call_id) {
624 		DECLARE_WAITQUEUE(myself, current);
625 
626 		if (!gfpflags_allow_blocking(gfp)) {
627 			ret = -EAGAIN;
628 			goto out;
629 		}
630 
631 		add_wait_queue_exclusive(&call->waitq, &myself);
632 		for (;;) {
633 			set_current_state(TASK_INTERRUPTIBLE);
634 			if (call->call_id)
635 				break;
636 			if (signal_pending(current)) {
637 				ret = -ERESTARTSYS;
638 				break;
639 			}
640 			schedule();
641 		}
642 		remove_wait_queue(&call->waitq, &myself);
643 		__set_current_state(TASK_RUNNING);
644 	}
645 
646 	/* Paired with the write barrier in rxrpc_activate_one_channel(). */
647 	smp_rmb();
648 
649 out:
650 	_leave(" = %d", ret);
651 	return ret;
652 }
653 
654 /*
655  * find a connection for a call
656  * - called in process context with IRQs enabled
657  */
658 int rxrpc_connect_call(struct rxrpc_call *call,
659 		       struct rxrpc_conn_parameters *cp,
660 		       struct sockaddr_rxrpc *srx,
661 		       gfp_t gfp)
662 {
663 	int ret;
664 
665 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
666 
667 	rxrpc_discard_expired_client_conns(NULL);
668 	rxrpc_cull_active_client_conns();
669 
670 	ret = rxrpc_get_client_conn(call, cp, srx, gfp);
671 	if (ret < 0)
672 		return ret;
673 
674 	rxrpc_animate_client_conn(call->conn);
675 	rxrpc_activate_channels(call->conn);
676 
677 	ret = rxrpc_wait_for_channel(call, gfp);
678 	if (ret < 0)
679 		rxrpc_disconnect_client_call(call);
680 
681 	_leave(" = %d", ret);
682 	return ret;
683 }
684 
685 /*
686  * Note that a connection is about to be exposed to the world.  Once it is
687  * exposed, we maintain an extra ref on it that stops it from being summarily
688  * discarded before it's (a) had a chance to deal with retransmission and (b)
689  * had a chance at re-use (the per-connection security negotiation is
690  * expensive).
691  */
692 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
693 				     unsigned int channel)
694 {
695 	if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
696 		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
697 		rxrpc_get_connection(conn);
698 	}
699 }
700 
701 /*
702  * Note that a call, and thus a connection, is about to be exposed to the
703  * world.
704  */
705 void rxrpc_expose_client_call(struct rxrpc_call *call)
706 {
707 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
708 	struct rxrpc_connection *conn = call->conn;
709 	struct rxrpc_channel *chan = &conn->channels[channel];
710 
711 	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
712 		/* Mark the call ID as being used.  If the callNumber counter
713 		 * exceeds ~2 billion, we kill the connection after its
714 		 * outstanding calls have finished so that the counter doesn't
715 		 * wrap.
716 		 */
717 		chan->call_counter++;
718 		if (chan->call_counter >= INT_MAX)
719 			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
720 		rxrpc_expose_client_conn(conn, channel);
721 	}
722 }
723 
724 /*
725  * Disconnect a client call.
726  */
727 void rxrpc_disconnect_client_call(struct rxrpc_call *call)
728 {
729 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
730 	struct rxrpc_connection *conn = call->conn;
731 	struct rxrpc_channel *chan = &conn->channels[channel];
732 
733 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
734 	call->conn = NULL;
735 
736 	spin_lock(&conn->channel_lock);
737 
738 	/* Calls that have never actually been assigned a channel can simply be
739 	 * discarded.  If the conn didn't get used either, it will follow
740 	 * immediately unless someone else grabs it in the meantime.
741 	 */
742 	if (!list_empty(&call->chan_wait_link)) {
743 		_debug("call is waiting");
744 		ASSERTCMP(call->call_id, ==, 0);
745 		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
746 		list_del_init(&call->chan_wait_link);
747 
748 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
749 
750 		/* We must deactivate or idle the connection if it's now
751 		 * waiting for nothing.
752 		 */
753 		spin_lock(&rxrpc_client_conn_cache_lock);
754 		if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
755 		    list_empty(&conn->waiting_calls) &&
756 		    !conn->active_chans)
757 			goto idle_connection;
758 		goto out;
759 	}
760 
761 	ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
762 
763 	/* If a client call was exposed to the world, we save the result for
764 	 * retransmission.
765 	 *
766 	 * We use a barrier here so that the call number and abort code can be
767 	 * read without needing to take a lock.
768 	 *
769 	 * TODO: Make the incoming packet handler check this and handle
770 	 * terminal retransmission without requiring access to the call.
771 	 */
772 	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
773 		_debug("exposed %u,%u", call->call_id, call->abort_code);
774 		__rxrpc_disconnect_call(conn, call);
775 	}
776 
777 	/* See if we can pass the channel directly to another call. */
778 	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
779 	    !list_empty(&conn->waiting_calls)) {
780 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
781 		rxrpc_activate_one_channel(conn, channel);
782 		goto out_2;
783 	}
784 
785 	/* Things are more complex and we need the cache lock.  We might be
786 	 * able to simply idle the conn or it might now be lurking on the wait
787 	 * list.  It might even get moved back to the active list whilst we're
788 	 * waiting for the lock.
789 	 */
790 	spin_lock(&rxrpc_client_conn_cache_lock);
791 
792 	switch (conn->cache_state) {
793 	case RXRPC_CONN_CLIENT_ACTIVE:
794 		if (list_empty(&conn->waiting_calls)) {
795 			rxrpc_deactivate_one_channel(conn, channel);
796 			if (!conn->active_chans) {
797 				rxrpc_nr_active_client_conns--;
798 				goto idle_connection;
799 			}
800 			goto out;
801 		}
802 
803 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
804 		rxrpc_activate_one_channel(conn, channel);
805 		goto out;
806 
807 	case RXRPC_CONN_CLIENT_CULLED:
808 		rxrpc_deactivate_one_channel(conn, channel);
809 		ASSERT(list_empty(&conn->waiting_calls));
810 		if (!conn->active_chans)
811 			goto idle_connection;
812 		goto out;
813 
814 	case RXRPC_CONN_CLIENT_WAITING:
815 		rxrpc_deactivate_one_channel(conn, channel);
816 		goto out;
817 
818 	default:
819 		BUG();
820 	}
821 
822 out:
823 	spin_unlock(&rxrpc_client_conn_cache_lock);
824 out_2:
825 	spin_unlock(&conn->channel_lock);
826 	rxrpc_put_connection(conn);
827 	_leave("");
828 	return;
829 
830 idle_connection:
831 	/* As no channels remain active, the connection gets deactivated
832 	 * immediately or moved to the idle list for a short while.
833 	 */
834 	if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
835 		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
836 		conn->idle_timestamp = jiffies;
837 		conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
838 		list_move_tail(&conn->cache_link, &rxrpc_idle_client_conns);
839 		if (rxrpc_idle_client_conns.next == &conn->cache_link &&
840 		    !rxrpc_kill_all_client_conns)
841 			queue_delayed_work(rxrpc_workqueue,
842 					   &rxrpc_client_conn_reap,
843 					   rxrpc_conn_idle_client_expiry);
844 	} else {
845 		trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
846 		conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
847 		list_del_init(&conn->cache_link);
848 	}
849 	goto out;
850 }
851 
852 /*
853  * Clean up a dead client connection.
854  */
855 static struct rxrpc_connection *
856 rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
857 {
858 	struct rxrpc_connection *next = NULL;
859 	struct rxrpc_local *local = conn->params.local;
860 	unsigned int nr_conns;
861 
862 	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
863 
864 	if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
865 		spin_lock(&local->client_conns_lock);
866 		if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
867 				       &conn->flags))
868 			rb_erase(&conn->client_node, &local->client_conns);
869 		spin_unlock(&local->client_conns_lock);
870 	}
871 
872 	rxrpc_put_client_connection_id(conn);
873 
874 	ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
875 
876 	if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
877 		trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
878 		spin_lock(&rxrpc_client_conn_cache_lock);
879 		nr_conns = --rxrpc_nr_client_conns;
880 
881 		if (nr_conns < rxrpc_max_client_connections &&
882 		    !list_empty(&rxrpc_waiting_client_conns)) {
883 			next = list_entry(rxrpc_waiting_client_conns.next,
884 					  struct rxrpc_connection, cache_link);
885 			rxrpc_get_connection(next);
886 			rxrpc_activate_conn(next);
887 		}
888 
889 		spin_unlock(&rxrpc_client_conn_cache_lock);
890 	}
891 
892 	rxrpc_kill_connection(conn);
893 	if (next)
894 		rxrpc_activate_channels(next);
895 
896 	/* We need to get rid of the temporary ref we took upon next, but we
897 	 * can't call rxrpc_put_connection() recursively.
898 	 */
899 	return next;
900 }
901 
902 /*
903  * Clean up a dead client connections.
904  */
905 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
906 {
907 	const void *here = __builtin_return_address(0);
908 	int n;
909 
910 	do {
911 		n = atomic_dec_return(&conn->usage);
912 		trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
913 		if (n > 0)
914 			return;
915 		ASSERTCMP(n, >=, 0);
916 
917 		conn = rxrpc_put_one_client_conn(conn);
918 	} while (conn);
919 }
920 
921 /*
922  * Kill the longest-active client connections to make room for new ones.
923  */
924 static void rxrpc_cull_active_client_conns(void)
925 {
926 	struct rxrpc_connection *conn;
927 	unsigned int nr_conns = rxrpc_nr_client_conns;
928 	unsigned int nr_active, limit;
929 
930 	_enter("");
931 
932 	ASSERTCMP(nr_conns, >=, 0);
933 	if (nr_conns < rxrpc_max_client_connections) {
934 		_leave(" [ok]");
935 		return;
936 	}
937 	limit = rxrpc_reap_client_connections;
938 
939 	spin_lock(&rxrpc_client_conn_cache_lock);
940 	nr_active = rxrpc_nr_active_client_conns;
941 
942 	while (nr_active > limit) {
943 		ASSERT(!list_empty(&rxrpc_active_client_conns));
944 		conn = list_entry(rxrpc_active_client_conns.next,
945 				  struct rxrpc_connection, cache_link);
946 		ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE);
947 
948 		if (list_empty(&conn->waiting_calls)) {
949 			trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
950 			conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
951 			list_del_init(&conn->cache_link);
952 		} else {
953 			trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
954 			conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
955 			list_move_tail(&conn->cache_link,
956 				       &rxrpc_waiting_client_conns);
957 		}
958 
959 		nr_active--;
960 	}
961 
962 	rxrpc_nr_active_client_conns = nr_active;
963 	spin_unlock(&rxrpc_client_conn_cache_lock);
964 	ASSERTCMP(nr_active, >=, 0);
965 	_leave(" [culled]");
966 }
967 
968 /*
969  * Discard expired client connections from the idle list.  Each conn in the
970  * idle list has been exposed and holds an extra ref because of that.
971  *
972  * This may be called from conn setup or from a work item so cannot be
973  * considered non-reentrant.
974  */
975 static void rxrpc_discard_expired_client_conns(struct work_struct *work)
976 {
977 	struct rxrpc_connection *conn;
978 	unsigned long expiry, conn_expires_at, now;
979 	unsigned int nr_conns;
980 	bool did_discard = false;
981 
982 	_enter("%c", work ? 'w' : 'n');
983 
984 	if (list_empty(&rxrpc_idle_client_conns)) {
985 		_leave(" [empty]");
986 		return;
987 	}
988 
989 	/* Don't double up on the discarding */
990 	if (!spin_trylock(&rxrpc_client_conn_discard_mutex)) {
991 		_leave(" [already]");
992 		return;
993 	}
994 
995 	/* We keep an estimate of what the number of conns ought to be after
996 	 * we've discarded some so that we don't overdo the discarding.
997 	 */
998 	nr_conns = rxrpc_nr_client_conns;
999 
1000 next:
1001 	spin_lock(&rxrpc_client_conn_cache_lock);
1002 
1003 	if (list_empty(&rxrpc_idle_client_conns))
1004 		goto out;
1005 
1006 	conn = list_entry(rxrpc_idle_client_conns.next,
1007 			  struct rxrpc_connection, cache_link);
1008 	ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
1009 
1010 	if (!rxrpc_kill_all_client_conns) {
1011 		/* If the number of connections is over the reap limit, we
1012 		 * expedite discard by reducing the expiry timeout.  We must,
1013 		 * however, have at least a short grace period to be able to do
1014 		 * final-ACK or ABORT retransmission.
1015 		 */
1016 		expiry = rxrpc_conn_idle_client_expiry;
1017 		if (nr_conns > rxrpc_reap_client_connections)
1018 			expiry = rxrpc_conn_idle_client_fast_expiry;
1019 
1020 		conn_expires_at = conn->idle_timestamp + expiry;
1021 
1022 		now = READ_ONCE(jiffies);
1023 		if (time_after(conn_expires_at, now))
1024 			goto not_yet_expired;
1025 	}
1026 
1027 	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1028 	if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1029 		BUG();
1030 	conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1031 	list_del_init(&conn->cache_link);
1032 
1033 	spin_unlock(&rxrpc_client_conn_cache_lock);
1034 
1035 	/* When we cleared the EXPOSED flag, we took on responsibility for the
1036 	 * reference that that had on the usage count.  We deal with that here.
1037 	 * If someone re-sets the flag and re-gets the ref, that's fine.
1038 	 */
1039 	rxrpc_put_connection(conn);
1040 	did_discard = true;
1041 	nr_conns--;
1042 	goto next;
1043 
1044 not_yet_expired:
1045 	/* The connection at the front of the queue hasn't yet expired, so
1046 	 * schedule the work item for that point if we discarded something.
1047 	 *
1048 	 * We don't worry if the work item is already scheduled - it can look
1049 	 * after rescheduling itself at a later time.  We could cancel it, but
1050 	 * then things get messier.
1051 	 */
1052 	_debug("not yet");
1053 	if (!rxrpc_kill_all_client_conns)
1054 		queue_delayed_work(rxrpc_workqueue,
1055 				   &rxrpc_client_conn_reap,
1056 				   conn_expires_at - now);
1057 
1058 out:
1059 	spin_unlock(&rxrpc_client_conn_cache_lock);
1060 	spin_unlock(&rxrpc_client_conn_discard_mutex);
1061 	_leave("");
1062 }
1063 
1064 /*
1065  * Preemptively destroy all the client connection records rather than waiting
1066  * for them to time out
1067  */
1068 void __exit rxrpc_destroy_all_client_connections(void)
1069 {
1070 	_enter("");
1071 
1072 	spin_lock(&rxrpc_client_conn_cache_lock);
1073 	rxrpc_kill_all_client_conns = true;
1074 	spin_unlock(&rxrpc_client_conn_cache_lock);
1075 
1076 	cancel_delayed_work(&rxrpc_client_conn_reap);
1077 
1078 	if (!queue_delayed_work(rxrpc_workqueue, &rxrpc_client_conn_reap, 0))
1079 		_debug("destroy: queue failed");
1080 
1081 	_leave("");
1082 }
1083