xref: /openbmc/linux/net/rxrpc/conn_object.c (revision 996d5b4d)
1 /* RxRPC virtual connection handler, common bits.
2  *
3  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/net.h>
17 #include <linux/skbuff.h>
18 #include "ar-internal.h"
19 
20 /*
21  * Time till a connection expires after last use (in seconds).
22  */
23 unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
24 unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
25 
26 static void rxrpc_destroy_connection(struct rcu_head *);
27 
28 static void rxrpc_connection_timer(struct timer_list *timer)
29 {
30 	struct rxrpc_connection *conn =
31 		container_of(timer, struct rxrpc_connection, timer);
32 
33 	rxrpc_queue_conn(conn);
34 }
35 
36 /*
37  * allocate a new connection
38  */
39 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
40 {
41 	struct rxrpc_connection *conn;
42 
43 	_enter("");
44 
45 	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
46 	if (conn) {
47 		INIT_LIST_HEAD(&conn->cache_link);
48 		spin_lock_init(&conn->channel_lock);
49 		INIT_LIST_HEAD(&conn->waiting_calls);
50 		timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
51 		INIT_WORK(&conn->processor, &rxrpc_process_connection);
52 		INIT_LIST_HEAD(&conn->proc_link);
53 		INIT_LIST_HEAD(&conn->link);
54 		skb_queue_head_init(&conn->rx_queue);
55 		conn->security = &rxrpc_no_security;
56 		spin_lock_init(&conn->state_lock);
57 		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
58 		conn->size_align = 4;
59 		conn->idle_timestamp = jiffies;
60 	}
61 
62 	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
63 	return conn;
64 }
65 
66 /*
67  * Look up a connection in the cache by protocol parameters.
68  *
69  * If successful, a pointer to the connection is returned, but no ref is taken.
70  * NULL is returned if there is no match.
71  *
72  * The caller must be holding the RCU read lock.
73  */
74 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
75 						   struct sk_buff *skb)
76 {
77 	struct rxrpc_connection *conn;
78 	struct rxrpc_conn_proto k;
79 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
80 	struct sockaddr_rxrpc srx;
81 	struct rxrpc_peer *peer;
82 
83 	_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
84 
85 	if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
86 		goto not_found;
87 
88 	k.epoch	= sp->hdr.epoch;
89 	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
90 
91 	/* We may have to handle mixing IPv4 and IPv6 */
92 	if (srx.transport.family != local->srx.transport.family) {
93 		pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
94 				    srx.transport.family,
95 				    local->srx.transport.family);
96 		goto not_found;
97 	}
98 
99 	k.epoch	= sp->hdr.epoch;
100 	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
101 
102 	if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
103 		/* We need to look up service connections by the full protocol
104 		 * parameter set.  We look up the peer first as an intermediate
105 		 * step and then the connection from the peer's tree.
106 		 */
107 		peer = rxrpc_lookup_peer_rcu(local, &srx);
108 		if (!peer)
109 			goto not_found;
110 		conn = rxrpc_find_service_conn_rcu(peer, skb);
111 		if (!conn || atomic_read(&conn->usage) == 0)
112 			goto not_found;
113 		_leave(" = %p", conn);
114 		return conn;
115 	} else {
116 		/* Look up client connections by connection ID alone as their
117 		 * IDs are unique for this machine.
118 		 */
119 		conn = idr_find(&rxrpc_client_conn_ids,
120 				sp->hdr.cid >> RXRPC_CIDSHIFT);
121 		if (!conn || atomic_read(&conn->usage) == 0) {
122 			_debug("no conn");
123 			goto not_found;
124 		}
125 
126 		if (conn->proto.epoch != k.epoch ||
127 		    conn->params.local != local)
128 			goto not_found;
129 
130 		peer = conn->params.peer;
131 		switch (srx.transport.family) {
132 		case AF_INET:
133 			if (peer->srx.transport.sin.sin_port !=
134 			    srx.transport.sin.sin_port ||
135 			    peer->srx.transport.sin.sin_addr.s_addr !=
136 			    srx.transport.sin.sin_addr.s_addr)
137 				goto not_found;
138 			break;
139 #ifdef CONFIG_AF_RXRPC_IPV6
140 		case AF_INET6:
141 			if (peer->srx.transport.sin6.sin6_port !=
142 			    srx.transport.sin6.sin6_port ||
143 			    memcmp(&peer->srx.transport.sin6.sin6_addr,
144 				   &srx.transport.sin6.sin6_addr,
145 				   sizeof(struct in6_addr)) != 0)
146 				goto not_found;
147 			break;
148 #endif
149 		default:
150 			BUG();
151 		}
152 
153 		_leave(" = %p", conn);
154 		return conn;
155 	}
156 
157 not_found:
158 	_leave(" = NULL");
159 	return NULL;
160 }
161 
162 /*
163  * Disconnect a call and clear any channel it occupies when that call
164  * terminates.  The caller must hold the channel_lock and must release the
165  * call's ref on the connection.
166  */
167 void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
168 			     struct rxrpc_call *call)
169 {
170 	struct rxrpc_channel *chan =
171 		&conn->channels[call->cid & RXRPC_CHANNELMASK];
172 
173 	_enter("%d,%x", conn->debug_id, call->cid);
174 
175 	if (rcu_access_pointer(chan->call) == call) {
176 		/* Save the result of the call so that we can repeat it if necessary
177 		 * through the channel, whilst disposing of the actual call record.
178 		 */
179 		trace_rxrpc_disconnect_call(call);
180 		switch (call->completion) {
181 		case RXRPC_CALL_SUCCEEDED:
182 			chan->last_seq = call->rx_hard_ack;
183 			chan->last_type = RXRPC_PACKET_TYPE_ACK;
184 			break;
185 		case RXRPC_CALL_LOCALLY_ABORTED:
186 			chan->last_abort = call->abort_code;
187 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
188 			break;
189 		default:
190 			chan->last_abort = RX_USER_ABORT;
191 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
192 			break;
193 		}
194 
195 		/* Sync with rxrpc_conn_retransmit(). */
196 		smp_wmb();
197 		chan->last_call = chan->call_id;
198 		chan->call_id = chan->call_counter;
199 
200 		rcu_assign_pointer(chan->call, NULL);
201 	}
202 
203 	_leave("");
204 }
205 
206 /*
207  * Disconnect a call and clear any channel it occupies when that call
208  * terminates.
209  */
210 void rxrpc_disconnect_call(struct rxrpc_call *call)
211 {
212 	struct rxrpc_connection *conn = call->conn;
213 
214 	call->peer->cong_cwnd = call->cong_cwnd;
215 
216 	spin_lock_bh(&conn->params.peer->lock);
217 	hlist_del_init(&call->error_link);
218 	spin_unlock_bh(&conn->params.peer->lock);
219 
220 	if (rxrpc_is_client_call(call))
221 		return rxrpc_disconnect_client_call(call);
222 
223 	spin_lock(&conn->channel_lock);
224 	__rxrpc_disconnect_call(conn, call);
225 	spin_unlock(&conn->channel_lock);
226 
227 	call->conn = NULL;
228 	conn->idle_timestamp = jiffies;
229 	rxrpc_put_connection(conn);
230 }
231 
232 /*
233  * Kill off a connection.
234  */
235 void rxrpc_kill_connection(struct rxrpc_connection *conn)
236 {
237 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
238 
239 	ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
240 	       !rcu_access_pointer(conn->channels[1].call) &&
241 	       !rcu_access_pointer(conn->channels[2].call) &&
242 	       !rcu_access_pointer(conn->channels[3].call));
243 	ASSERT(list_empty(&conn->cache_link));
244 
245 	write_lock(&rxnet->conn_lock);
246 	list_del_init(&conn->proc_link);
247 	write_unlock(&rxnet->conn_lock);
248 
249 	/* Drain the Rx queue.  Note that even though we've unpublished, an
250 	 * incoming packet could still be being added to our Rx queue, so we
251 	 * will need to drain it again in the RCU cleanup handler.
252 	 */
253 	rxrpc_purge_queue(&conn->rx_queue);
254 
255 	/* Leave final destruction to RCU.  The connection processor work item
256 	 * must carry a ref on the connection to prevent us getting here whilst
257 	 * it is queued or running.
258 	 */
259 	call_rcu(&conn->rcu, rxrpc_destroy_connection);
260 }
261 
262 /*
263  * Queue a connection's work processor, getting a ref to pass to the work
264  * queue.
265  */
266 bool rxrpc_queue_conn(struct rxrpc_connection *conn)
267 {
268 	const void *here = __builtin_return_address(0);
269 	int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
270 	if (n == 0)
271 		return false;
272 	if (rxrpc_queue_work(&conn->processor))
273 		trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
274 	else
275 		rxrpc_put_connection(conn);
276 	return true;
277 }
278 
279 /*
280  * Note the re-emergence of a connection.
281  */
282 void rxrpc_see_connection(struct rxrpc_connection *conn)
283 {
284 	const void *here = __builtin_return_address(0);
285 	if (conn) {
286 		int n = atomic_read(&conn->usage);
287 
288 		trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
289 	}
290 }
291 
292 /*
293  * Get a ref on a connection.
294  */
295 void rxrpc_get_connection(struct rxrpc_connection *conn)
296 {
297 	const void *here = __builtin_return_address(0);
298 	int n = atomic_inc_return(&conn->usage);
299 
300 	trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
301 }
302 
303 /*
304  * Try to get a ref on a connection.
305  */
306 struct rxrpc_connection *
307 rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
308 {
309 	const void *here = __builtin_return_address(0);
310 
311 	if (conn) {
312 		int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
313 		if (n > 0)
314 			trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
315 		else
316 			conn = NULL;
317 	}
318 	return conn;
319 }
320 
321 /*
322  * Set the service connection reap timer.
323  */
324 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
325 					 unsigned long reap_at)
326 {
327 	if (rxnet->live)
328 		timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
329 }
330 
331 /*
332  * Release a service connection
333  */
334 void rxrpc_put_service_conn(struct rxrpc_connection *conn)
335 {
336 	const void *here = __builtin_return_address(0);
337 	int n;
338 
339 	n = atomic_dec_return(&conn->usage);
340 	trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
341 	ASSERTCMP(n, >=, 0);
342 	if (n == 1)
343 		rxrpc_set_service_reap_timer(conn->params.local->rxnet,
344 					     jiffies + rxrpc_connection_expiry);
345 }
346 
347 /*
348  * destroy a virtual connection
349  */
350 static void rxrpc_destroy_connection(struct rcu_head *rcu)
351 {
352 	struct rxrpc_connection *conn =
353 		container_of(rcu, struct rxrpc_connection, rcu);
354 
355 	_enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
356 
357 	ASSERTCMP(atomic_read(&conn->usage), ==, 0);
358 
359 	_net("DESTROY CONN %d", conn->debug_id);
360 
361 	del_timer_sync(&conn->timer);
362 	rxrpc_purge_queue(&conn->rx_queue);
363 
364 	conn->security->clear(conn);
365 	key_put(conn->params.key);
366 	key_put(conn->server_key);
367 	rxrpc_put_peer(conn->params.peer);
368 
369 	if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
370 		wake_up_var(&conn->params.local->rxnet->nr_conns);
371 	rxrpc_put_local(conn->params.local);
372 
373 	kfree(conn);
374 	_leave("");
375 }
376 
377 /*
378  * reap dead service connections
379  */
380 void rxrpc_service_connection_reaper(struct work_struct *work)
381 {
382 	struct rxrpc_connection *conn, *_p;
383 	struct rxrpc_net *rxnet =
384 		container_of(work, struct rxrpc_net, service_conn_reaper);
385 	unsigned long expire_at, earliest, idle_timestamp, now;
386 
387 	LIST_HEAD(graveyard);
388 
389 	_enter("");
390 
391 	now = jiffies;
392 	earliest = now + MAX_JIFFY_OFFSET;
393 
394 	write_lock(&rxnet->conn_lock);
395 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
396 		ASSERTCMP(atomic_read(&conn->usage), >, 0);
397 		if (likely(atomic_read(&conn->usage) > 1))
398 			continue;
399 		if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
400 			continue;
401 
402 		if (rxnet->live) {
403 			idle_timestamp = READ_ONCE(conn->idle_timestamp);
404 			expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
405 			if (conn->params.local->service_closed)
406 				expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
407 
408 			_debug("reap CONN %d { u=%d,t=%ld }",
409 			       conn->debug_id, atomic_read(&conn->usage),
410 			       (long)expire_at - (long)now);
411 
412 			if (time_before(now, expire_at)) {
413 				if (time_before(expire_at, earliest))
414 					earliest = expire_at;
415 				continue;
416 			}
417 		}
418 
419 		/* The usage count sits at 1 whilst the object is unused on the
420 		 * list; we reduce that to 0 to make the object unavailable.
421 		 */
422 		if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
423 			continue;
424 		trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
425 
426 		if (rxrpc_conn_is_client(conn))
427 			BUG();
428 		else
429 			rxrpc_unpublish_service_conn(conn);
430 
431 		list_move_tail(&conn->link, &graveyard);
432 	}
433 	write_unlock(&rxnet->conn_lock);
434 
435 	if (earliest != now + MAX_JIFFY_OFFSET) {
436 		_debug("reschedule reaper %ld", (long)earliest - (long)now);
437 		ASSERT(time_after(earliest, now));
438 		rxrpc_set_service_reap_timer(rxnet, earliest);
439 	}
440 
441 	while (!list_empty(&graveyard)) {
442 		conn = list_entry(graveyard.next, struct rxrpc_connection,
443 				  link);
444 		list_del_init(&conn->link);
445 
446 		ASSERTCMP(atomic_read(&conn->usage), ==, 0);
447 		rxrpc_kill_connection(conn);
448 	}
449 
450 	_leave("");
451 }
452 
453 /*
454  * preemptively destroy all the service connection records rather than
455  * waiting for them to time out
456  */
457 void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
458 {
459 	struct rxrpc_connection *conn, *_p;
460 	bool leak = false;
461 
462 	_enter("");
463 
464 	atomic_dec(&rxnet->nr_conns);
465 	rxrpc_destroy_all_client_connections(rxnet);
466 
467 	del_timer_sync(&rxnet->service_conn_reap_timer);
468 	rxrpc_queue_work(&rxnet->service_conn_reaper);
469 	flush_workqueue(rxrpc_workqueue);
470 
471 	write_lock(&rxnet->conn_lock);
472 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
473 		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
474 		       conn, atomic_read(&conn->usage));
475 		leak = true;
476 	}
477 	write_unlock(&rxnet->conn_lock);
478 	BUG_ON(leak);
479 
480 	ASSERT(list_empty(&rxnet->conn_proc_list));
481 
482 	/* We need to wait for the connections to be destroyed by RCU as they
483 	 * pin things that we still need to get rid of.
484 	 */
485 	wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
486 	_leave("");
487 }
488