xref: /openbmc/linux/net/rxrpc/conn_client.c (revision b8d312aa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Client connection-specific management code.
3  *
4  * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  *
7  * Client connections need to be cached for a little while after they've made a
8  * call so as to handle retransmitted DATA packets in case the server didn't
9  * receive the final ACK or terminating ABORT we sent it.
10  *
11  * Client connections can be in one of a number of cache states:
12  *
13  *  (1) INACTIVE - The connection is not held in any list and may not have been
14  *      exposed to the world.  If it has been previously exposed, it was
15  *      discarded from the idle list after expiring.
16  *
17  *  (2) WAITING - The connection is waiting for the number of client conns to
18  *      drop below the maximum capacity.  Calls may be in progress upon it from
19  *      when it was active and got culled.
20  *
21  *	The connection is on the rxrpc_waiting_client_conns list which is kept
22  *	in to-be-granted order.  Culled conns with waiters go to the back of
23  *	the queue just like new conns.
24  *
25  *  (3) ACTIVE - The connection has at least one call in progress upon it, it
26  *      may freely grant available channels to new calls and calls may be
27  *      waiting on it for channels to become available.
28  *
29  *	The connection is on the rxnet->active_client_conns list which is kept
30  *	in activation order for culling purposes.
31  *
32  *	rxrpc_nr_active_client_conns is held incremented also.
33  *
34  *  (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
35  *      being used to probe for service upgrade.
36  *
37  *  (5) CULLED - The connection got summarily culled to try and free up
38  *      capacity.  Calls currently in progress on the connection are allowed to
39  *      continue, but new calls will have to wait.  There can be no waiters in
40  *      this state - the conn would have to go to the WAITING state instead.
41  *
42  *  (6) IDLE - The connection has no calls in progress upon it and must have
43  *      been exposed to the world (ie. the EXPOSED flag must be set).  When it
44  *      expires, the EXPOSED flag is cleared and the connection transitions to
45  *      the INACTIVE state.
46  *
47  *	The connection is on the rxnet->idle_client_conns list which is kept in
48  *	order of how soon they'll expire.
49  *
50  * There are flags of relevance to the cache:
51  *
52  *  (1) EXPOSED - The connection ID got exposed to the world.  If this flag is
53  *      set, an extra ref is added to the connection preventing it from being
54  *      reaped when it has no calls outstanding.  This flag is cleared and the
55  *      ref dropped when a conn is discarded from the idle list.
56  *
57  *      This allows us to move terminal call state retransmission to the
58  *      connection and to discard the call immediately we think it is done
59  *      with.  It also give us a chance to reuse the connection.
60  *
61  *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
62  *      should not be reused.  This is set when an exclusive connection is used
63  *      or a call ID counter overflows.
64  *
65  * The caching state may only be changed if the cache lock is held.
66  *
67  * There are two idle client connection expiry durations.  If the total number
68  * of connections is below the reap threshold, we use the normal duration; if
69  * it's above, we use the fast duration.
70  */
71 
72 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73 
74 #include <linux/slab.h>
75 #include <linux/idr.h>
76 #include <linux/timer.h>
77 #include <linux/sched/signal.h>
78 
79 #include "ar-internal.h"
80 
81 __read_mostly unsigned int rxrpc_max_client_connections = 1000;
82 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
83 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
84 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
85 
86 /*
87  * We use machine-unique IDs for our client connections.
88  */
89 DEFINE_IDR(rxrpc_client_conn_ids);
90 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
91 
92 static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
93 
94 /*
95  * Get a connection ID and epoch for a client connection from the global pool.
96  * The connection struct pointer is then recorded in the idr radix tree.  The
97  * epoch doesn't change until the client is rebooted (or, at least, unless the
98  * module is unloaded).
99  */
100 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
101 					  gfp_t gfp)
102 {
103 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
104 	int id;
105 
106 	_enter("");
107 
108 	idr_preload(gfp);
109 	spin_lock(&rxrpc_conn_id_lock);
110 
111 	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
112 			      1, 0x40000000, GFP_NOWAIT);
113 	if (id < 0)
114 		goto error;
115 
116 	spin_unlock(&rxrpc_conn_id_lock);
117 	idr_preload_end();
118 
119 	conn->proto.epoch = rxnet->epoch;
120 	conn->proto.cid = id << RXRPC_CIDSHIFT;
121 	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
122 	_leave(" [CID %x]", conn->proto.cid);
123 	return 0;
124 
125 error:
126 	spin_unlock(&rxrpc_conn_id_lock);
127 	idr_preload_end();
128 	_leave(" = %d", id);
129 	return id;
130 }
131 
132 /*
133  * Release a connection ID for a client connection from the global pool.
134  */
135 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
136 {
137 	if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
138 		spin_lock(&rxrpc_conn_id_lock);
139 		idr_remove(&rxrpc_client_conn_ids,
140 			   conn->proto.cid >> RXRPC_CIDSHIFT);
141 		spin_unlock(&rxrpc_conn_id_lock);
142 	}
143 }
144 
145 /*
146  * Destroy the client connection ID tree.
147  */
148 void rxrpc_destroy_client_conn_ids(void)
149 {
150 	struct rxrpc_connection *conn;
151 	int id;
152 
153 	if (!idr_is_empty(&rxrpc_client_conn_ids)) {
154 		idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
155 			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
156 			       conn, atomic_read(&conn->usage));
157 		}
158 		BUG();
159 	}
160 
161 	idr_destroy(&rxrpc_client_conn_ids);
162 }
163 
164 /*
165  * Allocate a client connection.
166  */
167 static struct rxrpc_connection *
168 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
169 {
170 	struct rxrpc_connection *conn;
171 	struct rxrpc_net *rxnet = cp->local->rxnet;
172 	int ret;
173 
174 	_enter("");
175 
176 	conn = rxrpc_alloc_connection(gfp);
177 	if (!conn) {
178 		_leave(" = -ENOMEM");
179 		return ERR_PTR(-ENOMEM);
180 	}
181 
182 	atomic_set(&conn->usage, 1);
183 	if (cp->exclusive)
184 		__set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
185 	if (cp->upgrade)
186 		__set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
187 
188 	conn->params		= *cp;
189 	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
190 	conn->state		= RXRPC_CONN_CLIENT;
191 	conn->service_id	= cp->service_id;
192 
193 	ret = rxrpc_get_client_connection_id(conn, gfp);
194 	if (ret < 0)
195 		goto error_0;
196 
197 	ret = rxrpc_init_client_conn_security(conn);
198 	if (ret < 0)
199 		goto error_1;
200 
201 	ret = conn->security->prime_packet_security(conn);
202 	if (ret < 0)
203 		goto error_2;
204 
205 	atomic_inc(&rxnet->nr_conns);
206 	write_lock(&rxnet->conn_lock);
207 	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
208 	write_unlock(&rxnet->conn_lock);
209 
210 	/* We steal the caller's peer ref. */
211 	cp->peer = NULL;
212 	rxrpc_get_local(conn->params.local);
213 	key_get(conn->params.key);
214 
215 	trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
216 			 __builtin_return_address(0));
217 	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
218 	_leave(" = %p", conn);
219 	return conn;
220 
221 error_2:
222 	conn->security->clear(conn);
223 error_1:
224 	rxrpc_put_client_connection_id(conn);
225 error_0:
226 	kfree(conn);
227 	_leave(" = %d", ret);
228 	return ERR_PTR(ret);
229 }
230 
231 /*
232  * Determine if a connection may be reused.
233  */
234 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
235 {
236 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
237 	int id_cursor, id, distance, limit;
238 
239 	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
240 		goto dont_reuse;
241 
242 	if (conn->proto.epoch != rxnet->epoch)
243 		goto mark_dont_reuse;
244 
245 	/* The IDR tree gets very expensive on memory if the connection IDs are
246 	 * widely scattered throughout the number space, so we shall want to
247 	 * kill off connections that, say, have an ID more than about four
248 	 * times the maximum number of client conns away from the current
249 	 * allocation point to try and keep the IDs concentrated.
250 	 */
251 	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
252 	id = conn->proto.cid >> RXRPC_CIDSHIFT;
253 	distance = id - id_cursor;
254 	if (distance < 0)
255 		distance = -distance;
256 	limit = max(rxrpc_max_client_connections * 4, 1024U);
257 	if (distance > limit)
258 		goto mark_dont_reuse;
259 
260 	return true;
261 
262 mark_dont_reuse:
263 	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
264 dont_reuse:
265 	return false;
266 }
267 
268 /*
269  * Create or find a client connection to use for a call.
270  *
271  * If we return with a connection, the call will be on its waiting list.  It's
272  * left to the caller to assign a channel and wake up the call.
273  */
274 static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
275 				 struct rxrpc_call *call,
276 				 struct rxrpc_conn_parameters *cp,
277 				 struct sockaddr_rxrpc *srx,
278 				 gfp_t gfp)
279 {
280 	struct rxrpc_connection *conn, *candidate = NULL;
281 	struct rxrpc_local *local = cp->local;
282 	struct rb_node *p, **pp, *parent;
283 	long diff;
284 	int ret = -ENOMEM;
285 
286 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
287 
288 	cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
289 	if (!cp->peer)
290 		goto error;
291 
292 	call->cong_cwnd = cp->peer->cong_cwnd;
293 	if (call->cong_cwnd >= call->cong_ssthresh)
294 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
295 	else
296 		call->cong_mode = RXRPC_CALL_SLOW_START;
297 
298 	/* If the connection is not meant to be exclusive, search the available
299 	 * connections to see if the connection we want to use already exists.
300 	 */
301 	if (!cp->exclusive) {
302 		_debug("search 1");
303 		spin_lock(&local->client_conns_lock);
304 		p = local->client_conns.rb_node;
305 		while (p) {
306 			conn = rb_entry(p, struct rxrpc_connection, client_node);
307 
308 #define cmp(X) ((long)conn->params.X - (long)cp->X)
309 			diff = (cmp(peer) ?:
310 				cmp(key) ?:
311 				cmp(security_level) ?:
312 				cmp(upgrade));
313 #undef cmp
314 			if (diff < 0) {
315 				p = p->rb_left;
316 			} else if (diff > 0) {
317 				p = p->rb_right;
318 			} else {
319 				if (rxrpc_may_reuse_conn(conn) &&
320 				    rxrpc_get_connection_maybe(conn))
321 					goto found_extant_conn;
322 				/* The connection needs replacing.  It's better
323 				 * to effect that when we have something to
324 				 * replace it with so that we don't have to
325 				 * rebalance the tree twice.
326 				 */
327 				break;
328 			}
329 		}
330 		spin_unlock(&local->client_conns_lock);
331 	}
332 
333 	/* There wasn't a connection yet or we need an exclusive connection.
334 	 * We need to create a candidate and then potentially redo the search
335 	 * in case we're racing with another thread also trying to connect on a
336 	 * shareable connection.
337 	 */
338 	_debug("new conn");
339 	candidate = rxrpc_alloc_client_connection(cp, gfp);
340 	if (IS_ERR(candidate)) {
341 		ret = PTR_ERR(candidate);
342 		goto error_peer;
343 	}
344 
345 	/* Add the call to the new connection's waiting list in case we're
346 	 * going to have to wait for the connection to come live.  It's our
347 	 * connection, so we want first dibs on the channel slots.  We would
348 	 * normally have to take channel_lock but we do this before anyone else
349 	 * can see the connection.
350 	 */
351 	list_add(&call->chan_wait_link, &candidate->waiting_calls);
352 
353 	if (cp->exclusive) {
354 		call->conn = candidate;
355 		call->security_ix = candidate->security_ix;
356 		call->service_id = candidate->service_id;
357 		_leave(" = 0 [exclusive %d]", candidate->debug_id);
358 		return 0;
359 	}
360 
361 	/* Publish the new connection for userspace to find.  We need to redo
362 	 * the search before doing this lest we race with someone else adding a
363 	 * conflicting instance.
364 	 */
365 	_debug("search 2");
366 	spin_lock(&local->client_conns_lock);
367 
368 	pp = &local->client_conns.rb_node;
369 	parent = NULL;
370 	while (*pp) {
371 		parent = *pp;
372 		conn = rb_entry(parent, struct rxrpc_connection, client_node);
373 
374 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
375 		diff = (cmp(peer) ?:
376 			cmp(key) ?:
377 			cmp(security_level) ?:
378 			cmp(upgrade));
379 #undef cmp
380 		if (diff < 0) {
381 			pp = &(*pp)->rb_left;
382 		} else if (diff > 0) {
383 			pp = &(*pp)->rb_right;
384 		} else {
385 			if (rxrpc_may_reuse_conn(conn) &&
386 			    rxrpc_get_connection_maybe(conn))
387 				goto found_extant_conn;
388 			/* The old connection is from an outdated epoch. */
389 			_debug("replace conn");
390 			clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
391 			rb_replace_node(&conn->client_node,
392 					&candidate->client_node,
393 					&local->client_conns);
394 			trace_rxrpc_client(conn, -1, rxrpc_client_replace);
395 			goto candidate_published;
396 		}
397 	}
398 
399 	_debug("new conn");
400 	rb_link_node(&candidate->client_node, parent, pp);
401 	rb_insert_color(&candidate->client_node, &local->client_conns);
402 
403 candidate_published:
404 	set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
405 	call->conn = candidate;
406 	call->security_ix = candidate->security_ix;
407 	call->service_id = candidate->service_id;
408 	spin_unlock(&local->client_conns_lock);
409 	_leave(" = 0 [new %d]", candidate->debug_id);
410 	return 0;
411 
412 	/* We come here if we found a suitable connection already in existence.
413 	 * Discard any candidate we may have allocated, and try to get a
414 	 * channel on this one.
415 	 */
416 found_extant_conn:
417 	_debug("found conn");
418 	spin_unlock(&local->client_conns_lock);
419 
420 	if (candidate) {
421 		trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
422 		rxrpc_put_connection(candidate);
423 		candidate = NULL;
424 	}
425 
426 	spin_lock(&conn->channel_lock);
427 	call->conn = conn;
428 	call->security_ix = conn->security_ix;
429 	call->service_id = conn->service_id;
430 	list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
431 	spin_unlock(&conn->channel_lock);
432 	_leave(" = 0 [extant %d]", conn->debug_id);
433 	return 0;
434 
435 error_peer:
436 	rxrpc_put_peer(cp->peer);
437 	cp->peer = NULL;
438 error:
439 	_leave(" = %d", ret);
440 	return ret;
441 }
442 
443 /*
444  * Activate a connection.
445  */
446 static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
447 				struct rxrpc_connection *conn)
448 {
449 	if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
450 		trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
451 		conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
452 	} else {
453 		trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
454 		conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
455 	}
456 	rxnet->nr_active_client_conns++;
457 	list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
458 }
459 
460 /*
461  * Attempt to animate a connection for a new call.
462  *
463  * If it's not exclusive, the connection is in the endpoint tree, and we're in
464  * the conn's list of those waiting to grab a channel.  There is, however, a
465  * limit on the number of live connections allowed at any one time, so we may
466  * have to wait for capacity to become available.
467  *
468  * Note that a connection on the waiting queue might *also* have active
469  * channels if it has been culled to make space and then re-requested by a new
470  * call.
471  */
472 static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
473 				      struct rxrpc_connection *conn)
474 {
475 	unsigned int nr_conns;
476 
477 	_enter("%d,%d", conn->debug_id, conn->cache_state);
478 
479 	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
480 	    conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
481 		goto out;
482 
483 	spin_lock(&rxnet->client_conn_cache_lock);
484 
485 	nr_conns = rxnet->nr_client_conns;
486 	if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
487 		trace_rxrpc_client(conn, -1, rxrpc_client_count);
488 		rxnet->nr_client_conns = nr_conns + 1;
489 	}
490 
491 	switch (conn->cache_state) {
492 	case RXRPC_CONN_CLIENT_ACTIVE:
493 	case RXRPC_CONN_CLIENT_UPGRADE:
494 	case RXRPC_CONN_CLIENT_WAITING:
495 		break;
496 
497 	case RXRPC_CONN_CLIENT_INACTIVE:
498 	case RXRPC_CONN_CLIENT_CULLED:
499 	case RXRPC_CONN_CLIENT_IDLE:
500 		if (nr_conns >= rxrpc_max_client_connections)
501 			goto wait_for_capacity;
502 		goto activate_conn;
503 
504 	default:
505 		BUG();
506 	}
507 
508 out_unlock:
509 	spin_unlock(&rxnet->client_conn_cache_lock);
510 out:
511 	_leave(" [%d]", conn->cache_state);
512 	return;
513 
514 activate_conn:
515 	_debug("activate");
516 	rxrpc_activate_conn(rxnet, conn);
517 	goto out_unlock;
518 
519 wait_for_capacity:
520 	_debug("wait");
521 	trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
522 	conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
523 	list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
524 	goto out_unlock;
525 }
526 
527 /*
528  * Deactivate a channel.
529  */
530 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
531 					 unsigned int channel)
532 {
533 	struct rxrpc_channel *chan = &conn->channels[channel];
534 
535 	rcu_assign_pointer(chan->call, NULL);
536 	conn->active_chans &= ~(1 << channel);
537 }
538 
539 /*
540  * Assign a channel to the call at the front of the queue and wake the call up.
541  * We don't increment the callNumber counter until this number has been exposed
542  * to the world.
543  */
544 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
545 				       unsigned int channel)
546 {
547 	struct rxrpc_channel *chan = &conn->channels[channel];
548 	struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
549 					     struct rxrpc_call, chan_wait_link);
550 	u32 call_id = chan->call_counter + 1;
551 
552 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
553 
554 	/* Cancel the final ACK on the previous call if it hasn't been sent yet
555 	 * as the DATA packet will implicitly ACK it.
556 	 */
557 	clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
558 
559 	write_lock_bh(&call->state_lock);
560 	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
561 	write_unlock_bh(&call->state_lock);
562 
563 	rxrpc_see_call(call);
564 	list_del_init(&call->chan_wait_link);
565 	conn->active_chans |= 1 << channel;
566 	call->peer	= rxrpc_get_peer(conn->params.peer);
567 	call->cid	= conn->proto.cid | channel;
568 	call->call_id	= call_id;
569 
570 	trace_rxrpc_connect_call(call);
571 	_net("CONNECT call %08x:%08x as call %d on conn %d",
572 	     call->cid, call->call_id, call->debug_id, conn->debug_id);
573 
574 	/* Paired with the read barrier in rxrpc_wait_for_channel().  This
575 	 * orders cid and epoch in the connection wrt to call_id without the
576 	 * need to take the channel_lock.
577 	 *
578 	 * We provisionally assign a callNumber at this point, but we don't
579 	 * confirm it until the call is about to be exposed.
580 	 *
581 	 * TODO: Pair with a barrier in the data_ready handler when that looks
582 	 * at the call ID through a connection channel.
583 	 */
584 	smp_wmb();
585 	chan->call_id	= call_id;
586 	chan->call_debug_id = call->debug_id;
587 	rcu_assign_pointer(chan->call, call);
588 	wake_up(&call->waitq);
589 }
590 
591 /*
592  * Assign channels and callNumbers to waiting calls with channel_lock
593  * held by caller.
594  */
595 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
596 {
597 	u8 avail, mask;
598 
599 	switch (conn->cache_state) {
600 	case RXRPC_CONN_CLIENT_ACTIVE:
601 		mask = RXRPC_ACTIVE_CHANS_MASK;
602 		break;
603 	case RXRPC_CONN_CLIENT_UPGRADE:
604 		mask = 0x01;
605 		break;
606 	default:
607 		return;
608 	}
609 
610 	while (!list_empty(&conn->waiting_calls) &&
611 	       (avail = ~conn->active_chans,
612 		avail &= mask,
613 		avail != 0))
614 		rxrpc_activate_one_channel(conn, __ffs(avail));
615 }
616 
617 /*
618  * Assign channels and callNumbers to waiting calls.
619  */
620 static void rxrpc_activate_channels(struct rxrpc_connection *conn)
621 {
622 	_enter("%d", conn->debug_id);
623 
624 	trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
625 
626 	if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
627 		return;
628 
629 	spin_lock(&conn->channel_lock);
630 	rxrpc_activate_channels_locked(conn);
631 	spin_unlock(&conn->channel_lock);
632 	_leave("");
633 }
634 
635 /*
636  * Wait for a callNumber and a channel to be granted to a call.
637  */
638 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
639 {
640 	int ret = 0;
641 
642 	_enter("%d", call->debug_id);
643 
644 	if (!call->call_id) {
645 		DECLARE_WAITQUEUE(myself, current);
646 
647 		if (!gfpflags_allow_blocking(gfp)) {
648 			ret = -EAGAIN;
649 			goto out;
650 		}
651 
652 		add_wait_queue_exclusive(&call->waitq, &myself);
653 		for (;;) {
654 			if (test_bit(RXRPC_CALL_IS_INTR, &call->flags))
655 				set_current_state(TASK_INTERRUPTIBLE);
656 			else
657 				set_current_state(TASK_UNINTERRUPTIBLE);
658 			if (call->call_id)
659 				break;
660 			if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
661 			    signal_pending(current)) {
662 				ret = -ERESTARTSYS;
663 				break;
664 			}
665 			schedule();
666 		}
667 		remove_wait_queue(&call->waitq, &myself);
668 		__set_current_state(TASK_RUNNING);
669 	}
670 
671 	/* Paired with the write barrier in rxrpc_activate_one_channel(). */
672 	smp_rmb();
673 
674 out:
675 	_leave(" = %d", ret);
676 	return ret;
677 }
678 
679 /*
680  * find a connection for a call
681  * - called in process context with IRQs enabled
682  */
683 int rxrpc_connect_call(struct rxrpc_sock *rx,
684 		       struct rxrpc_call *call,
685 		       struct rxrpc_conn_parameters *cp,
686 		       struct sockaddr_rxrpc *srx,
687 		       gfp_t gfp)
688 {
689 	struct rxrpc_net *rxnet = cp->local->rxnet;
690 	int ret;
691 
692 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
693 
694 	rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
695 	rxrpc_cull_active_client_conns(rxnet);
696 
697 	ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
698 	if (ret < 0)
699 		goto out;
700 
701 	rxrpc_animate_client_conn(rxnet, call->conn);
702 	rxrpc_activate_channels(call->conn);
703 
704 	ret = rxrpc_wait_for_channel(call, gfp);
705 	if (ret < 0) {
706 		trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
707 		rxrpc_disconnect_client_call(call);
708 		goto out;
709 	}
710 
711 	spin_lock_bh(&call->conn->params.peer->lock);
712 	hlist_add_head_rcu(&call->error_link,
713 			   &call->conn->params.peer->error_targets);
714 	spin_unlock_bh(&call->conn->params.peer->lock);
715 
716 out:
717 	_leave(" = %d", ret);
718 	return ret;
719 }
720 
721 /*
722  * Note that a connection is about to be exposed to the world.  Once it is
723  * exposed, we maintain an extra ref on it that stops it from being summarily
724  * discarded before it's (a) had a chance to deal with retransmission and (b)
725  * had a chance at re-use (the per-connection security negotiation is
726  * expensive).
727  */
728 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
729 				     unsigned int channel)
730 {
731 	if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
732 		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
733 		rxrpc_get_connection(conn);
734 	}
735 }
736 
737 /*
738  * Note that a call, and thus a connection, is about to be exposed to the
739  * world.
740  */
741 void rxrpc_expose_client_call(struct rxrpc_call *call)
742 {
743 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
744 	struct rxrpc_connection *conn = call->conn;
745 	struct rxrpc_channel *chan = &conn->channels[channel];
746 
747 	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
748 		/* Mark the call ID as being used.  If the callNumber counter
749 		 * exceeds ~2 billion, we kill the connection after its
750 		 * outstanding calls have finished so that the counter doesn't
751 		 * wrap.
752 		 */
753 		chan->call_counter++;
754 		if (chan->call_counter >= INT_MAX)
755 			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
756 		rxrpc_expose_client_conn(conn, channel);
757 	}
758 }
759 
760 /*
761  * Set the reap timer.
762  */
763 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
764 {
765 	unsigned long now = jiffies;
766 	unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
767 
768 	if (rxnet->live)
769 		timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
770 }
771 
772 /*
773  * Disconnect a client call.
774  */
775 void rxrpc_disconnect_client_call(struct rxrpc_call *call)
776 {
777 	struct rxrpc_connection *conn = call->conn;
778 	struct rxrpc_channel *chan = NULL;
779 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
780 	unsigned int channel = -1;
781 	u32 cid;
782 
783 	spin_lock(&conn->channel_lock);
784 
785 	cid = call->cid;
786 	if (cid) {
787 		channel = cid & RXRPC_CHANNELMASK;
788 		chan = &conn->channels[channel];
789 	}
790 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
791 	call->conn = NULL;
792 
793 	/* Calls that have never actually been assigned a channel can simply be
794 	 * discarded.  If the conn didn't get used either, it will follow
795 	 * immediately unless someone else grabs it in the meantime.
796 	 */
797 	if (!list_empty(&call->chan_wait_link)) {
798 		_debug("call is waiting");
799 		ASSERTCMP(call->call_id, ==, 0);
800 		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
801 		list_del_init(&call->chan_wait_link);
802 
803 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
804 
805 		/* We must deactivate or idle the connection if it's now
806 		 * waiting for nothing.
807 		 */
808 		spin_lock(&rxnet->client_conn_cache_lock);
809 		if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
810 		    list_empty(&conn->waiting_calls) &&
811 		    !conn->active_chans)
812 			goto idle_connection;
813 		goto out;
814 	}
815 
816 	if (rcu_access_pointer(chan->call) != call) {
817 		spin_unlock(&conn->channel_lock);
818 		BUG();
819 	}
820 
821 	/* If a client call was exposed to the world, we save the result for
822 	 * retransmission.
823 	 *
824 	 * We use a barrier here so that the call number and abort code can be
825 	 * read without needing to take a lock.
826 	 *
827 	 * TODO: Make the incoming packet handler check this and handle
828 	 * terminal retransmission without requiring access to the call.
829 	 */
830 	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
831 		_debug("exposed %u,%u", call->call_id, call->abort_code);
832 		__rxrpc_disconnect_call(conn, call);
833 	}
834 
835 	/* See if we can pass the channel directly to another call. */
836 	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
837 	    !list_empty(&conn->waiting_calls)) {
838 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
839 		rxrpc_activate_one_channel(conn, channel);
840 		goto out_2;
841 	}
842 
843 	/* Schedule the final ACK to be transmitted in a short while so that it
844 	 * can be skipped if we find a follow-on call.  The first DATA packet
845 	 * of the follow on call will implicitly ACK this call.
846 	 */
847 	if (call->completion == RXRPC_CALL_SUCCEEDED &&
848 	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
849 		unsigned long final_ack_at = jiffies + 2;
850 
851 		WRITE_ONCE(chan->final_ack_at, final_ack_at);
852 		smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
853 		set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
854 		rxrpc_reduce_conn_timer(conn, final_ack_at);
855 	}
856 
857 	/* Things are more complex and we need the cache lock.  We might be
858 	 * able to simply idle the conn or it might now be lurking on the wait
859 	 * list.  It might even get moved back to the active list whilst we're
860 	 * waiting for the lock.
861 	 */
862 	spin_lock(&rxnet->client_conn_cache_lock);
863 
864 	switch (conn->cache_state) {
865 	case RXRPC_CONN_CLIENT_UPGRADE:
866 		/* Deal with termination of a service upgrade probe. */
867 		if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
868 			clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
869 			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
870 			conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
871 			rxrpc_activate_channels_locked(conn);
872 		}
873 		/* fall through */
874 	case RXRPC_CONN_CLIENT_ACTIVE:
875 		if (list_empty(&conn->waiting_calls)) {
876 			rxrpc_deactivate_one_channel(conn, channel);
877 			if (!conn->active_chans) {
878 				rxnet->nr_active_client_conns--;
879 				goto idle_connection;
880 			}
881 			goto out;
882 		}
883 
884 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
885 		rxrpc_activate_one_channel(conn, channel);
886 		goto out;
887 
888 	case RXRPC_CONN_CLIENT_CULLED:
889 		rxrpc_deactivate_one_channel(conn, channel);
890 		ASSERT(list_empty(&conn->waiting_calls));
891 		if (!conn->active_chans)
892 			goto idle_connection;
893 		goto out;
894 
895 	case RXRPC_CONN_CLIENT_WAITING:
896 		rxrpc_deactivate_one_channel(conn, channel);
897 		goto out;
898 
899 	default:
900 		BUG();
901 	}
902 
903 out:
904 	spin_unlock(&rxnet->client_conn_cache_lock);
905 out_2:
906 	spin_unlock(&conn->channel_lock);
907 	rxrpc_put_connection(conn);
908 	_leave("");
909 	return;
910 
911 idle_connection:
912 	/* As no channels remain active, the connection gets deactivated
913 	 * immediately or moved to the idle list for a short while.
914 	 */
915 	if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
916 		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
917 		conn->idle_timestamp = jiffies;
918 		conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
919 		list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
920 		if (rxnet->idle_client_conns.next == &conn->cache_link &&
921 		    !rxnet->kill_all_client_conns)
922 			rxrpc_set_client_reap_timer(rxnet);
923 	} else {
924 		trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
925 		conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
926 		list_del_init(&conn->cache_link);
927 	}
928 	goto out;
929 }
930 
931 /*
932  * Clean up a dead client connection.
933  */
934 static struct rxrpc_connection *
935 rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
936 {
937 	struct rxrpc_connection *next = NULL;
938 	struct rxrpc_local *local = conn->params.local;
939 	struct rxrpc_net *rxnet = local->rxnet;
940 	unsigned int nr_conns;
941 
942 	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
943 
944 	if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
945 		spin_lock(&local->client_conns_lock);
946 		if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
947 				       &conn->flags))
948 			rb_erase(&conn->client_node, &local->client_conns);
949 		spin_unlock(&local->client_conns_lock);
950 	}
951 
952 	rxrpc_put_client_connection_id(conn);
953 
954 	ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
955 
956 	if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
957 		trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
958 		spin_lock(&rxnet->client_conn_cache_lock);
959 		nr_conns = --rxnet->nr_client_conns;
960 
961 		if (nr_conns < rxrpc_max_client_connections &&
962 		    !list_empty(&rxnet->waiting_client_conns)) {
963 			next = list_entry(rxnet->waiting_client_conns.next,
964 					  struct rxrpc_connection, cache_link);
965 			rxrpc_get_connection(next);
966 			rxrpc_activate_conn(rxnet, next);
967 		}
968 
969 		spin_unlock(&rxnet->client_conn_cache_lock);
970 	}
971 
972 	rxrpc_kill_connection(conn);
973 	if (next)
974 		rxrpc_activate_channels(next);
975 
976 	/* We need to get rid of the temporary ref we took upon next, but we
977 	 * can't call rxrpc_put_connection() recursively.
978 	 */
979 	return next;
980 }
981 
982 /*
983  * Clean up a dead client connections.
984  */
985 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
986 {
987 	const void *here = __builtin_return_address(0);
988 	int n;
989 
990 	do {
991 		n = atomic_dec_return(&conn->usage);
992 		trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
993 		if (n > 0)
994 			return;
995 		ASSERTCMP(n, >=, 0);
996 
997 		conn = rxrpc_put_one_client_conn(conn);
998 	} while (conn);
999 }
1000 
1001 /*
1002  * Kill the longest-active client connections to make room for new ones.
1003  */
1004 static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
1005 {
1006 	struct rxrpc_connection *conn;
1007 	unsigned int nr_conns = rxnet->nr_client_conns;
1008 	unsigned int nr_active, limit;
1009 
1010 	_enter("");
1011 
1012 	ASSERTCMP(nr_conns, >=, 0);
1013 	if (nr_conns < rxrpc_max_client_connections) {
1014 		_leave(" [ok]");
1015 		return;
1016 	}
1017 	limit = rxrpc_reap_client_connections;
1018 
1019 	spin_lock(&rxnet->client_conn_cache_lock);
1020 	nr_active = rxnet->nr_active_client_conns;
1021 
1022 	while (nr_active > limit) {
1023 		ASSERT(!list_empty(&rxnet->active_client_conns));
1024 		conn = list_entry(rxnet->active_client_conns.next,
1025 				  struct rxrpc_connection, cache_link);
1026 		ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
1027 			    conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
1028 
1029 		if (list_empty(&conn->waiting_calls)) {
1030 			trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
1031 			conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
1032 			list_del_init(&conn->cache_link);
1033 		} else {
1034 			trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
1035 			conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
1036 			list_move_tail(&conn->cache_link,
1037 				       &rxnet->waiting_client_conns);
1038 		}
1039 
1040 		nr_active--;
1041 	}
1042 
1043 	rxnet->nr_active_client_conns = nr_active;
1044 	spin_unlock(&rxnet->client_conn_cache_lock);
1045 	ASSERTCMP(nr_active, >=, 0);
1046 	_leave(" [culled]");
1047 }
1048 
1049 /*
1050  * Discard expired client connections from the idle list.  Each conn in the
1051  * idle list has been exposed and holds an extra ref because of that.
1052  *
1053  * This may be called from conn setup or from a work item so cannot be
1054  * considered non-reentrant.
1055  */
1056 void rxrpc_discard_expired_client_conns(struct work_struct *work)
1057 {
1058 	struct rxrpc_connection *conn;
1059 	struct rxrpc_net *rxnet =
1060 		container_of(work, struct rxrpc_net, client_conn_reaper);
1061 	unsigned long expiry, conn_expires_at, now;
1062 	unsigned int nr_conns;
1063 
1064 	_enter("");
1065 
1066 	if (list_empty(&rxnet->idle_client_conns)) {
1067 		_leave(" [empty]");
1068 		return;
1069 	}
1070 
1071 	/* Don't double up on the discarding */
1072 	if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1073 		_leave(" [already]");
1074 		return;
1075 	}
1076 
1077 	/* We keep an estimate of what the number of conns ought to be after
1078 	 * we've discarded some so that we don't overdo the discarding.
1079 	 */
1080 	nr_conns = rxnet->nr_client_conns;
1081 
1082 next:
1083 	spin_lock(&rxnet->client_conn_cache_lock);
1084 
1085 	if (list_empty(&rxnet->idle_client_conns))
1086 		goto out;
1087 
1088 	conn = list_entry(rxnet->idle_client_conns.next,
1089 			  struct rxrpc_connection, cache_link);
1090 	ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
1091 
1092 	if (!rxnet->kill_all_client_conns) {
1093 		/* If the number of connections is over the reap limit, we
1094 		 * expedite discard by reducing the expiry timeout.  We must,
1095 		 * however, have at least a short grace period to be able to do
1096 		 * final-ACK or ABORT retransmission.
1097 		 */
1098 		expiry = rxrpc_conn_idle_client_expiry;
1099 		if (nr_conns > rxrpc_reap_client_connections)
1100 			expiry = rxrpc_conn_idle_client_fast_expiry;
1101 		if (conn->params.local->service_closed)
1102 			expiry = rxrpc_closed_conn_expiry * HZ;
1103 
1104 		conn_expires_at = conn->idle_timestamp + expiry;
1105 
1106 		now = READ_ONCE(jiffies);
1107 		if (time_after(conn_expires_at, now))
1108 			goto not_yet_expired;
1109 	}
1110 
1111 	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1112 	if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1113 		BUG();
1114 	conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1115 	list_del_init(&conn->cache_link);
1116 
1117 	spin_unlock(&rxnet->client_conn_cache_lock);
1118 
1119 	/* When we cleared the EXPOSED flag, we took on responsibility for the
1120 	 * reference that that had on the usage count.  We deal with that here.
1121 	 * If someone re-sets the flag and re-gets the ref, that's fine.
1122 	 */
1123 	rxrpc_put_connection(conn);
1124 	nr_conns--;
1125 	goto next;
1126 
1127 not_yet_expired:
1128 	/* The connection at the front of the queue hasn't yet expired, so
1129 	 * schedule the work item for that point if we discarded something.
1130 	 *
1131 	 * We don't worry if the work item is already scheduled - it can look
1132 	 * after rescheduling itself at a later time.  We could cancel it, but
1133 	 * then things get messier.
1134 	 */
1135 	_debug("not yet");
1136 	if (!rxnet->kill_all_client_conns)
1137 		timer_reduce(&rxnet->client_conn_reap_timer,
1138 			     conn_expires_at);
1139 
1140 out:
1141 	spin_unlock(&rxnet->client_conn_cache_lock);
1142 	spin_unlock(&rxnet->client_conn_discard_lock);
1143 	_leave("");
1144 }
1145 
1146 /*
1147  * Preemptively destroy all the client connection records rather than waiting
1148  * for them to time out
1149  */
1150 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1151 {
1152 	_enter("");
1153 
1154 	spin_lock(&rxnet->client_conn_cache_lock);
1155 	rxnet->kill_all_client_conns = true;
1156 	spin_unlock(&rxnet->client_conn_cache_lock);
1157 
1158 	del_timer_sync(&rxnet->client_conn_reap_timer);
1159 
1160 	if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1161 		_debug("destroy: queue failed");
1162 
1163 	_leave("");
1164 }
1165