xref: /openbmc/linux/net/rxrpc/conn_object.c (revision 3bf90eca)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC virtual connection handler, common bits.
3  *
4  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/net.h>
13 #include <linux/skbuff.h>
14 #include "ar-internal.h"
15 
16 /*
17  * Time till a connection expires after last use (in seconds).
18  */
19 unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
20 unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
21 
22 static void rxrpc_clean_up_connection(struct work_struct *work);
23 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
24 					 unsigned long reap_at);
25 
26 static void rxrpc_connection_timer(struct timer_list *timer)
27 {
28 	struct rxrpc_connection *conn =
29 		container_of(timer, struct rxrpc_connection, timer);
30 
31 	rxrpc_queue_conn(conn, rxrpc_conn_queue_timer);
32 }
33 
34 /*
35  * allocate a new connection
36  */
37 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
38 						gfp_t gfp)
39 {
40 	struct rxrpc_connection *conn;
41 
42 	_enter("");
43 
44 	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
45 	if (conn) {
46 		INIT_LIST_HEAD(&conn->cache_link);
47 		timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
48 		INIT_WORK(&conn->processor, rxrpc_process_connection);
49 		INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
50 		INIT_LIST_HEAD(&conn->proc_link);
51 		INIT_LIST_HEAD(&conn->link);
52 		skb_queue_head_init(&conn->rx_queue);
53 		conn->rxnet = rxnet;
54 		conn->security = &rxrpc_no_security;
55 		spin_lock_init(&conn->state_lock);
56 		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
57 		conn->idle_timestamp = jiffies;
58 	}
59 
60 	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
61 	return conn;
62 }
63 
64 /*
65  * Look up a connection in the cache by protocol parameters.
66  *
67  * If successful, a pointer to the connection is returned, but no ref is taken.
68  * NULL is returned if there is no match.
69  *
70  * When searching for a service call, if we find a peer but no connection, we
71  * return that through *_peer in case we need to create a new service call.
72  *
73  * The caller must be holding the RCU read lock.
74  */
75 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
76 							  struct sockaddr_rxrpc *srx,
77 							  struct sk_buff *skb)
78 {
79 	struct rxrpc_connection *conn;
80 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
81 	struct rxrpc_peer *peer;
82 
83 	_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
84 
85 	/* Look up client connections by connection ID alone as their IDs are
86 	 * unique for this machine.
87 	 */
88 	conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
89 	if (!conn || refcount_read(&conn->ref) == 0) {
90 		_debug("no conn");
91 		goto not_found;
92 	}
93 
94 	if (conn->proto.epoch != sp->hdr.epoch ||
95 	    conn->local != local)
96 		goto not_found;
97 
98 	peer = conn->peer;
99 	switch (srx->transport.family) {
100 	case AF_INET:
101 		if (peer->srx.transport.sin.sin_port !=
102 		    srx->transport.sin.sin_port ||
103 		    peer->srx.transport.sin.sin_addr.s_addr !=
104 		    srx->transport.sin.sin_addr.s_addr)
105 			goto not_found;
106 		break;
107 #ifdef CONFIG_AF_RXRPC_IPV6
108 	case AF_INET6:
109 		if (peer->srx.transport.sin6.sin6_port !=
110 		    srx->transport.sin6.sin6_port ||
111 		    memcmp(&peer->srx.transport.sin6.sin6_addr,
112 			   &srx->transport.sin6.sin6_addr,
113 			   sizeof(struct in6_addr)) != 0)
114 			goto not_found;
115 		break;
116 #endif
117 	default:
118 		BUG();
119 	}
120 
121 	_leave(" = %p", conn);
122 	return conn;
123 
124 not_found:
125 	_leave(" = NULL");
126 	return NULL;
127 }
128 
129 /*
130  * Disconnect a call and clear any channel it occupies when that call
131  * terminates.  The caller must hold the channel_lock and must release the
132  * call's ref on the connection.
133  */
134 void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
135 			     struct rxrpc_call *call)
136 {
137 	struct rxrpc_channel *chan =
138 		&conn->channels[call->cid & RXRPC_CHANNELMASK];
139 
140 	_enter("%d,%x", conn->debug_id, call->cid);
141 
142 	if (rcu_access_pointer(chan->call) == call) {
143 		/* Save the result of the call so that we can repeat it if necessary
144 		 * through the channel, whilst disposing of the actual call record.
145 		 */
146 		trace_rxrpc_disconnect_call(call);
147 		switch (call->completion) {
148 		case RXRPC_CALL_SUCCEEDED:
149 			chan->last_seq = call->rx_highest_seq;
150 			chan->last_type = RXRPC_PACKET_TYPE_ACK;
151 			break;
152 		case RXRPC_CALL_LOCALLY_ABORTED:
153 			chan->last_abort = call->abort_code;
154 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
155 			break;
156 		default:
157 			chan->last_abort = RX_CALL_DEAD;
158 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
159 			break;
160 		}
161 
162 		/* Sync with rxrpc_conn_retransmit(). */
163 		smp_wmb();
164 		chan->last_call = chan->call_id;
165 		chan->call_id = chan->call_counter;
166 
167 		rcu_assign_pointer(chan->call, NULL);
168 	}
169 
170 	_leave("");
171 }
172 
173 /*
174  * Disconnect a call and clear any channel it occupies when that call
175  * terminates.
176  */
177 void rxrpc_disconnect_call(struct rxrpc_call *call)
178 {
179 	struct rxrpc_connection *conn = call->conn;
180 
181 	call->peer->cong_ssthresh = call->cong_ssthresh;
182 
183 	if (!hlist_unhashed(&call->error_link)) {
184 		spin_lock(&call->peer->lock);
185 		hlist_del_init(&call->error_link);
186 		spin_unlock(&call->peer->lock);
187 	}
188 
189 	if (rxrpc_is_client_call(call))
190 		return rxrpc_disconnect_client_call(conn->bundle, call);
191 
192 	spin_lock(&conn->bundle->channel_lock);
193 	__rxrpc_disconnect_call(conn, call);
194 	spin_unlock(&conn->bundle->channel_lock);
195 
196 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
197 	conn->idle_timestamp = jiffies;
198 	if (atomic_dec_and_test(&conn->active))
199 		rxrpc_set_service_reap_timer(conn->rxnet,
200 					     jiffies + rxrpc_connection_expiry);
201 }
202 
203 /*
204  * Queue a connection's work processor, getting a ref to pass to the work
205  * queue.
206  */
207 void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
208 {
209 	if (atomic_read(&conn->active) >= 0 &&
210 	    rxrpc_queue_work(&conn->processor))
211 		rxrpc_see_connection(conn, why);
212 }
213 
214 /*
215  * Note the re-emergence of a connection.
216  */
217 void rxrpc_see_connection(struct rxrpc_connection *conn,
218 			  enum rxrpc_conn_trace why)
219 {
220 	if (conn) {
221 		int r = refcount_read(&conn->ref);
222 
223 		trace_rxrpc_conn(conn->debug_id, r, why);
224 	}
225 }
226 
227 /*
228  * Get a ref on a connection.
229  */
230 struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
231 					      enum rxrpc_conn_trace why)
232 {
233 	int r;
234 
235 	__refcount_inc(&conn->ref, &r);
236 	trace_rxrpc_conn(conn->debug_id, r + 1, why);
237 	return conn;
238 }
239 
240 /*
241  * Try to get a ref on a connection.
242  */
243 struct rxrpc_connection *
244 rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
245 			   enum rxrpc_conn_trace why)
246 {
247 	int r;
248 
249 	if (conn) {
250 		if (__refcount_inc_not_zero(&conn->ref, &r))
251 			trace_rxrpc_conn(conn->debug_id, r + 1, why);
252 		else
253 			conn = NULL;
254 	}
255 	return conn;
256 }
257 
258 /*
259  * Set the service connection reap timer.
260  */
261 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
262 					 unsigned long reap_at)
263 {
264 	if (rxnet->live)
265 		timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
266 }
267 
268 /*
269  * destroy a virtual connection
270  */
271 static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
272 {
273 	struct rxrpc_connection *conn =
274 		container_of(rcu, struct rxrpc_connection, rcu);
275 	struct rxrpc_net *rxnet = conn->rxnet;
276 
277 	_enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
278 
279 	trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
280 			 rxrpc_conn_free);
281 	kfree(conn);
282 
283 	if (atomic_dec_and_test(&rxnet->nr_conns))
284 		wake_up_var(&rxnet->nr_conns);
285 }
286 
287 /*
288  * Clean up a dead connection.
289  */
290 static void rxrpc_clean_up_connection(struct work_struct *work)
291 {
292 	struct rxrpc_connection *conn =
293 		container_of(work, struct rxrpc_connection, destructor);
294 	struct rxrpc_net *rxnet = conn->rxnet;
295 
296 	ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
297 	       !rcu_access_pointer(conn->channels[1].call) &&
298 	       !rcu_access_pointer(conn->channels[2].call) &&
299 	       !rcu_access_pointer(conn->channels[3].call));
300 	ASSERT(list_empty(&conn->cache_link));
301 
302 	del_timer_sync(&conn->timer);
303 	cancel_work_sync(&conn->processor); /* Processing may restart the timer */
304 	del_timer_sync(&conn->timer);
305 
306 	write_lock(&rxnet->conn_lock);
307 	list_del_init(&conn->proc_link);
308 	write_unlock(&rxnet->conn_lock);
309 
310 	rxrpc_purge_queue(&conn->rx_queue);
311 
312 	rxrpc_kill_client_conn(conn);
313 
314 	conn->security->clear(conn);
315 	key_put(conn->key);
316 	rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
317 	rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
318 	rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
319 
320 	/* Drain the Rx queue.  Note that even though we've unpublished, an
321 	 * incoming packet could still be being added to our Rx queue, so we
322 	 * will need to drain it again in the RCU cleanup handler.
323 	 */
324 	rxrpc_purge_queue(&conn->rx_queue);
325 
326 	call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
327 }
328 
329 /*
330  * Drop a ref on a connection.
331  */
332 void rxrpc_put_connection(struct rxrpc_connection *conn,
333 			  enum rxrpc_conn_trace why)
334 {
335 	unsigned int debug_id;
336 	bool dead;
337 	int r;
338 
339 	if (!conn)
340 		return;
341 
342 	debug_id = conn->debug_id;
343 	dead = __refcount_dec_and_test(&conn->ref, &r);
344 	trace_rxrpc_conn(debug_id, r - 1, why);
345 	if (dead) {
346 		del_timer(&conn->timer);
347 		cancel_work(&conn->processor);
348 
349 		if (in_softirq() || work_busy(&conn->processor) ||
350 		    timer_pending(&conn->timer))
351 			/* Can't use the rxrpc workqueue as we need to cancel/flush
352 			 * something that may be running/waiting there.
353 			 */
354 			schedule_work(&conn->destructor);
355 		else
356 			rxrpc_clean_up_connection(&conn->destructor);
357 	}
358 }
359 
360 /*
361  * reap dead service connections
362  */
363 void rxrpc_service_connection_reaper(struct work_struct *work)
364 {
365 	struct rxrpc_connection *conn, *_p;
366 	struct rxrpc_net *rxnet =
367 		container_of(work, struct rxrpc_net, service_conn_reaper);
368 	unsigned long expire_at, earliest, idle_timestamp, now;
369 	int active;
370 
371 	LIST_HEAD(graveyard);
372 
373 	_enter("");
374 
375 	now = jiffies;
376 	earliest = now + MAX_JIFFY_OFFSET;
377 
378 	write_lock(&rxnet->conn_lock);
379 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
380 		ASSERTCMP(atomic_read(&conn->active), >=, 0);
381 		if (likely(atomic_read(&conn->active) > 0))
382 			continue;
383 		if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
384 			continue;
385 
386 		if (rxnet->live && !conn->local->dead) {
387 			idle_timestamp = READ_ONCE(conn->idle_timestamp);
388 			expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
389 			if (conn->local->service_closed)
390 				expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
391 
392 			_debug("reap CONN %d { a=%d,t=%ld }",
393 			       conn->debug_id, atomic_read(&conn->active),
394 			       (long)expire_at - (long)now);
395 
396 			if (time_before(now, expire_at)) {
397 				if (time_before(expire_at, earliest))
398 					earliest = expire_at;
399 				continue;
400 			}
401 		}
402 
403 		/* The activity count sits at 0 whilst the conn is unused on
404 		 * the list; we reduce that to -1 to make the conn unavailable.
405 		 */
406 		active = 0;
407 		if (!atomic_try_cmpxchg(&conn->active, &active, -1))
408 			continue;
409 		rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
410 
411 		if (rxrpc_conn_is_client(conn))
412 			BUG();
413 		else
414 			rxrpc_unpublish_service_conn(conn);
415 
416 		list_move_tail(&conn->link, &graveyard);
417 	}
418 	write_unlock(&rxnet->conn_lock);
419 
420 	if (earliest != now + MAX_JIFFY_OFFSET) {
421 		_debug("reschedule reaper %ld", (long)earliest - (long)now);
422 		ASSERT(time_after(earliest, now));
423 		rxrpc_set_service_reap_timer(rxnet, earliest);
424 	}
425 
426 	while (!list_empty(&graveyard)) {
427 		conn = list_entry(graveyard.next, struct rxrpc_connection,
428 				  link);
429 		list_del_init(&conn->link);
430 
431 		ASSERTCMP(atomic_read(&conn->active), ==, -1);
432 		rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
433 	}
434 
435 	_leave("");
436 }
437 
438 /*
439  * preemptively destroy all the service connection records rather than
440  * waiting for them to time out
441  */
442 void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
443 {
444 	struct rxrpc_connection *conn, *_p;
445 	bool leak = false;
446 
447 	_enter("");
448 
449 	atomic_dec(&rxnet->nr_conns);
450 	rxrpc_destroy_all_client_connections(rxnet);
451 
452 	del_timer_sync(&rxnet->service_conn_reap_timer);
453 	rxrpc_queue_work(&rxnet->service_conn_reaper);
454 	flush_workqueue(rxrpc_workqueue);
455 
456 	write_lock(&rxnet->conn_lock);
457 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
458 		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
459 		       conn, refcount_read(&conn->ref));
460 		leak = true;
461 	}
462 	write_unlock(&rxnet->conn_lock);
463 	BUG_ON(leak);
464 
465 	ASSERT(list_empty(&rxnet->conn_proc_list));
466 
467 	/* We need to wait for the connections to be destroyed by RCU as they
468 	 * pin things that we still need to get rid of.
469 	 */
470 	wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
471 	_leave("");
472 }
473