xref: /openbmc/linux/net/rxrpc/conn_object.c (revision 73f81e5a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC virtual connection handler, common bits.
3  *
4  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/net.h>
13 #include <linux/skbuff.h>
14 #include "ar-internal.h"
15 
16 /*
17  * Time till a connection expires after last use (in seconds).
18  */
19 unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
20 unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
21 
22 static void rxrpc_clean_up_connection(struct work_struct *work);
23 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
24 					 unsigned long reap_at);
25 
rxrpc_poke_conn(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)26 void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
27 {
28 	struct rxrpc_local *local = conn->local;
29 	bool busy;
30 
31 	if (WARN_ON_ONCE(!local))
32 		return;
33 
34 	spin_lock_bh(&local->lock);
35 	busy = !list_empty(&conn->attend_link);
36 	if (!busy) {
37 		rxrpc_get_connection(conn, why);
38 		list_add_tail(&conn->attend_link, &local->conn_attend_q);
39 	}
40 	spin_unlock_bh(&local->lock);
41 	rxrpc_wake_up_io_thread(local);
42 }
43 
rxrpc_connection_timer(struct timer_list * timer)44 static void rxrpc_connection_timer(struct timer_list *timer)
45 {
46 	struct rxrpc_connection *conn =
47 		container_of(timer, struct rxrpc_connection, timer);
48 
49 	rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
50 }
51 
52 /*
53  * allocate a new connection
54  */
rxrpc_alloc_connection(struct rxrpc_net * rxnet,gfp_t gfp)55 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
56 						gfp_t gfp)
57 {
58 	struct rxrpc_connection *conn;
59 
60 	_enter("");
61 
62 	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
63 	if (conn) {
64 		INIT_LIST_HEAD(&conn->cache_link);
65 		timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
66 		INIT_WORK(&conn->processor, rxrpc_process_connection);
67 		INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
68 		INIT_LIST_HEAD(&conn->proc_link);
69 		INIT_LIST_HEAD(&conn->link);
70 		mutex_init(&conn->security_lock);
71 		skb_queue_head_init(&conn->rx_queue);
72 		conn->rxnet = rxnet;
73 		conn->security = &rxrpc_no_security;
74 		spin_lock_init(&conn->state_lock);
75 		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
76 		conn->idle_timestamp = jiffies;
77 	}
78 
79 	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
80 	return conn;
81 }
82 
83 /*
84  * Look up a connection in the cache by protocol parameters.
85  *
86  * If successful, a pointer to the connection is returned, but no ref is taken.
87  * NULL is returned if there is no match.
88  *
89  * When searching for a service call, if we find a peer but no connection, we
90  * return that through *_peer in case we need to create a new service call.
91  *
92  * The caller must be holding the RCU read lock.
93  */
rxrpc_find_client_connection_rcu(struct rxrpc_local * local,struct sockaddr_rxrpc * srx,struct sk_buff * skb)94 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
95 							  struct sockaddr_rxrpc *srx,
96 							  struct sk_buff *skb)
97 {
98 	struct rxrpc_connection *conn;
99 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
100 	struct rxrpc_peer *peer;
101 
102 	_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
103 
104 	/* Look up client connections by connection ID alone as their
105 	 * IDs are unique for this machine.
106 	 */
107 	conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
108 	if (!conn || refcount_read(&conn->ref) == 0) {
109 		_debug("no conn");
110 		goto not_found;
111 	}
112 
113 	if (conn->proto.epoch != sp->hdr.epoch ||
114 	    conn->local != local)
115 		goto not_found;
116 
117 	peer = conn->peer;
118 	switch (srx->transport.family) {
119 	case AF_INET:
120 		if (peer->srx.transport.sin.sin_port !=
121 		    srx->transport.sin.sin_port)
122 			goto not_found;
123 		break;
124 #ifdef CONFIG_AF_RXRPC_IPV6
125 	case AF_INET6:
126 		if (peer->srx.transport.sin6.sin6_port !=
127 		    srx->transport.sin6.sin6_port)
128 			goto not_found;
129 		break;
130 #endif
131 	default:
132 		BUG();
133 	}
134 
135 	_leave(" = %p", conn);
136 	return conn;
137 
138 not_found:
139 	_leave(" = NULL");
140 	return NULL;
141 }
142 
143 /*
144  * Disconnect a call and clear any channel it occupies when that call
145  * terminates.  The caller must hold the channel_lock and must release the
146  * call's ref on the connection.
147  */
__rxrpc_disconnect_call(struct rxrpc_connection * conn,struct rxrpc_call * call)148 void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
149 			     struct rxrpc_call *call)
150 {
151 	struct rxrpc_channel *chan =
152 		&conn->channels[call->cid & RXRPC_CHANNELMASK];
153 
154 	_enter("%d,%x", conn->debug_id, call->cid);
155 
156 	if (chan->call == call) {
157 		/* Save the result of the call so that we can repeat it if necessary
158 		 * through the channel, whilst disposing of the actual call record.
159 		 */
160 		trace_rxrpc_disconnect_call(call);
161 		switch (call->completion) {
162 		case RXRPC_CALL_SUCCEEDED:
163 			chan->last_seq = call->rx_highest_seq;
164 			chan->last_type = RXRPC_PACKET_TYPE_ACK;
165 			break;
166 		case RXRPC_CALL_LOCALLY_ABORTED:
167 			chan->last_abort = call->abort_code;
168 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
169 			break;
170 		default:
171 			chan->last_abort = RX_CALL_DEAD;
172 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
173 			break;
174 		}
175 
176 		chan->last_call = chan->call_id;
177 		chan->call_id = chan->call_counter;
178 		chan->call = NULL;
179 	}
180 
181 	_leave("");
182 }
183 
184 /*
185  * Disconnect a call and clear any channel it occupies when that call
186  * terminates.
187  */
rxrpc_disconnect_call(struct rxrpc_call * call)188 void rxrpc_disconnect_call(struct rxrpc_call *call)
189 {
190 	struct rxrpc_connection *conn = call->conn;
191 
192 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
193 	rxrpc_see_call(call, rxrpc_call_see_disconnected);
194 
195 	call->peer->cong_ssthresh = call->cong_ssthresh;
196 
197 	if (!hlist_unhashed(&call->error_link)) {
198 		spin_lock(&call->peer->lock);
199 		hlist_del_init(&call->error_link);
200 		spin_unlock(&call->peer->lock);
201 	}
202 
203 	if (rxrpc_is_client_call(call)) {
204 		rxrpc_disconnect_client_call(call->bundle, call);
205 	} else {
206 		__rxrpc_disconnect_call(conn, call);
207 		conn->idle_timestamp = jiffies;
208 		if (atomic_dec_and_test(&conn->active))
209 			rxrpc_set_service_reap_timer(conn->rxnet,
210 						     jiffies + rxrpc_connection_expiry * HZ);
211 	}
212 
213 	rxrpc_put_call(call, rxrpc_call_put_io_thread);
214 }
215 
216 /*
217  * Queue a connection's work processor, getting a ref to pass to the work
218  * queue.
219  */
rxrpc_queue_conn(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)220 void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
221 {
222 	if (atomic_read(&conn->active) >= 0 &&
223 	    rxrpc_queue_work(&conn->processor))
224 		rxrpc_see_connection(conn, why);
225 }
226 
227 /*
228  * Note the re-emergence of a connection.
229  */
rxrpc_see_connection(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)230 void rxrpc_see_connection(struct rxrpc_connection *conn,
231 			  enum rxrpc_conn_trace why)
232 {
233 	if (conn) {
234 		int r = refcount_read(&conn->ref);
235 
236 		trace_rxrpc_conn(conn->debug_id, r, why);
237 	}
238 }
239 
240 /*
241  * Get a ref on a connection.
242  */
rxrpc_get_connection(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)243 struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
244 					      enum rxrpc_conn_trace why)
245 {
246 	int r;
247 
248 	__refcount_inc(&conn->ref, &r);
249 	trace_rxrpc_conn(conn->debug_id, r + 1, why);
250 	return conn;
251 }
252 
253 /*
254  * Try to get a ref on a connection.
255  */
256 struct rxrpc_connection *
rxrpc_get_connection_maybe(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)257 rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
258 			   enum rxrpc_conn_trace why)
259 {
260 	int r;
261 
262 	if (conn) {
263 		if (__refcount_inc_not_zero(&conn->ref, &r))
264 			trace_rxrpc_conn(conn->debug_id, r + 1, why);
265 		else
266 			conn = NULL;
267 	}
268 	return conn;
269 }
270 
271 /*
272  * Set the service connection reap timer.
273  */
rxrpc_set_service_reap_timer(struct rxrpc_net * rxnet,unsigned long reap_at)274 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
275 					 unsigned long reap_at)
276 {
277 	if (rxnet->live)
278 		timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
279 }
280 
281 /*
282  * destroy a virtual connection
283  */
rxrpc_rcu_free_connection(struct rcu_head * rcu)284 static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
285 {
286 	struct rxrpc_connection *conn =
287 		container_of(rcu, struct rxrpc_connection, rcu);
288 	struct rxrpc_net *rxnet = conn->rxnet;
289 
290 	_enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
291 
292 	trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
293 			 rxrpc_conn_free);
294 	kfree(conn);
295 
296 	if (atomic_dec_and_test(&rxnet->nr_conns))
297 		wake_up_var(&rxnet->nr_conns);
298 }
299 
300 /*
301  * Clean up a dead connection.
302  */
rxrpc_clean_up_connection(struct work_struct * work)303 static void rxrpc_clean_up_connection(struct work_struct *work)
304 {
305 	struct rxrpc_connection *conn =
306 		container_of(work, struct rxrpc_connection, destructor);
307 	struct rxrpc_net *rxnet = conn->rxnet;
308 
309 	ASSERT(!conn->channels[0].call &&
310 	       !conn->channels[1].call &&
311 	       !conn->channels[2].call &&
312 	       !conn->channels[3].call);
313 	ASSERT(list_empty(&conn->cache_link));
314 
315 	del_timer_sync(&conn->timer);
316 	cancel_work_sync(&conn->processor); /* Processing may restart the timer */
317 	del_timer_sync(&conn->timer);
318 
319 	write_lock(&rxnet->conn_lock);
320 	list_del_init(&conn->proc_link);
321 	write_unlock(&rxnet->conn_lock);
322 
323 	rxrpc_purge_queue(&conn->rx_queue);
324 
325 	rxrpc_kill_client_conn(conn);
326 
327 	conn->security->clear(conn);
328 	key_put(conn->key);
329 	rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
330 	rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
331 	rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
332 
333 	/* Drain the Rx queue.  Note that even though we've unpublished, an
334 	 * incoming packet could still be being added to our Rx queue, so we
335 	 * will need to drain it again in the RCU cleanup handler.
336 	 */
337 	rxrpc_purge_queue(&conn->rx_queue);
338 
339 	call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
340 }
341 
342 /*
343  * Drop a ref on a connection.
344  */
rxrpc_put_connection(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)345 void rxrpc_put_connection(struct rxrpc_connection *conn,
346 			  enum rxrpc_conn_trace why)
347 {
348 	unsigned int debug_id;
349 	bool dead;
350 	int r;
351 
352 	if (!conn)
353 		return;
354 
355 	debug_id = conn->debug_id;
356 	dead = __refcount_dec_and_test(&conn->ref, &r);
357 	trace_rxrpc_conn(debug_id, r - 1, why);
358 	if (dead) {
359 		del_timer(&conn->timer);
360 		cancel_work(&conn->processor);
361 
362 		if (in_softirq() || work_busy(&conn->processor) ||
363 		    timer_pending(&conn->timer))
364 			/* Can't use the rxrpc workqueue as we need to cancel/flush
365 			 * something that may be running/waiting there.
366 			 */
367 			schedule_work(&conn->destructor);
368 		else
369 			rxrpc_clean_up_connection(&conn->destructor);
370 	}
371 }
372 
373 /*
374  * reap dead service connections
375  */
rxrpc_service_connection_reaper(struct work_struct * work)376 void rxrpc_service_connection_reaper(struct work_struct *work)
377 {
378 	struct rxrpc_connection *conn, *_p;
379 	struct rxrpc_net *rxnet =
380 		container_of(work, struct rxrpc_net, service_conn_reaper);
381 	unsigned long expire_at, earliest, idle_timestamp, now;
382 	int active;
383 
384 	LIST_HEAD(graveyard);
385 
386 	_enter("");
387 
388 	now = jiffies;
389 	earliest = now + MAX_JIFFY_OFFSET;
390 
391 	write_lock(&rxnet->conn_lock);
392 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
393 		ASSERTCMP(atomic_read(&conn->active), >=, 0);
394 		if (likely(atomic_read(&conn->active) > 0))
395 			continue;
396 		if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
397 			continue;
398 
399 		if (rxnet->live && !conn->local->dead) {
400 			idle_timestamp = READ_ONCE(conn->idle_timestamp);
401 			expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
402 			if (conn->local->service_closed)
403 				expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
404 
405 			_debug("reap CONN %d { a=%d,t=%ld }",
406 			       conn->debug_id, atomic_read(&conn->active),
407 			       (long)expire_at - (long)now);
408 
409 			if (time_before(now, expire_at)) {
410 				if (time_before(expire_at, earliest))
411 					earliest = expire_at;
412 				continue;
413 			}
414 		}
415 
416 		/* The activity count sits at 0 whilst the conn is unused on
417 		 * the list; we reduce that to -1 to make the conn unavailable.
418 		 */
419 		active = 0;
420 		if (!atomic_try_cmpxchg(&conn->active, &active, -1))
421 			continue;
422 		rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
423 
424 		if (rxrpc_conn_is_client(conn))
425 			BUG();
426 		else
427 			rxrpc_unpublish_service_conn(conn);
428 
429 		list_move_tail(&conn->link, &graveyard);
430 	}
431 	write_unlock(&rxnet->conn_lock);
432 
433 	if (earliest != now + MAX_JIFFY_OFFSET) {
434 		_debug("reschedule reaper %ld", (long)earliest - (long)now);
435 		ASSERT(time_after(earliest, now));
436 		rxrpc_set_service_reap_timer(rxnet, earliest);
437 	}
438 
439 	while (!list_empty(&graveyard)) {
440 		conn = list_entry(graveyard.next, struct rxrpc_connection,
441 				  link);
442 		list_del_init(&conn->link);
443 
444 		ASSERTCMP(atomic_read(&conn->active), ==, -1);
445 		rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
446 	}
447 
448 	_leave("");
449 }
450 
451 /*
452  * preemptively destroy all the service connection records rather than
453  * waiting for them to time out
454  */
rxrpc_destroy_all_connections(struct rxrpc_net * rxnet)455 void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
456 {
457 	struct rxrpc_connection *conn, *_p;
458 	bool leak = false;
459 
460 	_enter("");
461 
462 	atomic_dec(&rxnet->nr_conns);
463 
464 	del_timer_sync(&rxnet->service_conn_reap_timer);
465 	rxrpc_queue_work(&rxnet->service_conn_reaper);
466 	flush_workqueue(rxrpc_workqueue);
467 
468 	write_lock(&rxnet->conn_lock);
469 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
470 		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
471 		       conn, refcount_read(&conn->ref));
472 		leak = true;
473 	}
474 	write_unlock(&rxnet->conn_lock);
475 	BUG_ON(leak);
476 
477 	ASSERT(list_empty(&rxnet->conn_proc_list));
478 
479 	/* We need to wait for the connections to be destroyed by RCU as they
480 	 * pin things that we still need to get rid of.
481 	 */
482 	wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
483 	_leave("");
484 }
485