xref: /openbmc/linux/net/rds/connection.c (revision 56a0eccd)
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <net/inet_hashtables.h>
38 
39 #include "rds.h"
40 #include "loop.h"
41 
42 #define RDS_CONNECTION_HASH_BITS 12
43 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
44 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
45 
46 /* converting this to RCU is a chore for another day.. */
47 static DEFINE_SPINLOCK(rds_conn_lock);
48 static unsigned long rds_conn_count;
49 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
50 static struct kmem_cache *rds_conn_slab;
51 
52 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
53 {
54 	static u32 rds_hash_secret __read_mostly;
55 
56 	unsigned long hash;
57 
58 	net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
59 
60 	/* Pass NULL, don't need struct net for hash */
61 	hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
62 			      be32_to_cpu(faddr), 0,
63 			      rds_hash_secret);
64 	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
65 }
66 
67 #define rds_conn_info_set(var, test, suffix) do {		\
68 	if (test)						\
69 		var |= RDS_INFO_CONNECTION_FLAG_##suffix;	\
70 } while (0)
71 
72 /* rcu read lock must be held or the connection spinlock */
73 static struct rds_connection *rds_conn_lookup(struct net *net,
74 					      struct hlist_head *head,
75 					      __be32 laddr, __be32 faddr,
76 					      struct rds_transport *trans)
77 {
78 	struct rds_connection *conn, *ret = NULL;
79 
80 	hlist_for_each_entry_rcu(conn, head, c_hash_node) {
81 		if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
82 		    conn->c_trans == trans && net == rds_conn_net(conn)) {
83 			ret = conn;
84 			break;
85 		}
86 	}
87 	rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
88 		 &laddr, &faddr);
89 	return ret;
90 }
91 
92 /*
93  * This is called by transports as they're bringing down a connection.
94  * It clears partial message state so that the transport can start sending
95  * and receiving over this connection again in the future.  It is up to
96  * the transport to have serialized this call with its send and recv.
97  */
98 static void rds_conn_reset(struct rds_connection *conn)
99 {
100 	rdsdebug("connection %pI4 to %pI4 reset\n",
101 	  &conn->c_laddr, &conn->c_faddr);
102 
103 	rds_stats_inc(s_conn_reset);
104 	rds_send_reset(conn);
105 	conn->c_flags = 0;
106 
107 	/* Do not clear next_rx_seq here, else we cannot distinguish
108 	 * retransmitted packets from new packets, and will hand all
109 	 * of them to the application. That is not consistent with the
110 	 * reliability guarantees of RDS. */
111 }
112 
113 /*
114  * There is only every one 'conn' for a given pair of addresses in the
115  * system at a time.  They contain messages to be retransmitted and so
116  * span the lifetime of the actual underlying transport connections.
117  *
118  * For now they are not garbage collected once they're created.  They
119  * are torn down as the module is removed, if ever.
120  */
121 static struct rds_connection *__rds_conn_create(struct net *net,
122 						__be32 laddr, __be32 faddr,
123 				       struct rds_transport *trans, gfp_t gfp,
124 				       int is_outgoing)
125 {
126 	struct rds_connection *conn, *parent = NULL;
127 	struct hlist_head *head = rds_conn_bucket(laddr, faddr);
128 	struct rds_transport *loop_trans;
129 	unsigned long flags;
130 	int ret;
131 
132 	rcu_read_lock();
133 	conn = rds_conn_lookup(net, head, laddr, faddr, trans);
134 	if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
135 	    laddr == faddr && !is_outgoing) {
136 		/* This is a looped back IB connection, and we're
137 		 * called by the code handling the incoming connect.
138 		 * We need a second connection object into which we
139 		 * can stick the other QP. */
140 		parent = conn;
141 		conn = parent->c_passive;
142 	}
143 	rcu_read_unlock();
144 	if (conn)
145 		goto out;
146 
147 	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
148 	if (!conn) {
149 		conn = ERR_PTR(-ENOMEM);
150 		goto out;
151 	}
152 
153 	INIT_HLIST_NODE(&conn->c_hash_node);
154 	conn->c_laddr = laddr;
155 	conn->c_faddr = faddr;
156 	spin_lock_init(&conn->c_lock);
157 	conn->c_next_tx_seq = 1;
158 	rds_conn_net_set(conn, net);
159 
160 	init_waitqueue_head(&conn->c_waitq);
161 	INIT_LIST_HEAD(&conn->c_send_queue);
162 	INIT_LIST_HEAD(&conn->c_retrans);
163 
164 	ret = rds_cong_get_maps(conn);
165 	if (ret) {
166 		kmem_cache_free(rds_conn_slab, conn);
167 		conn = ERR_PTR(ret);
168 		goto out;
169 	}
170 
171 	/*
172 	 * This is where a connection becomes loopback.  If *any* RDS sockets
173 	 * can bind to the destination address then we'd rather the messages
174 	 * flow through loopback rather than either transport.
175 	 */
176 	loop_trans = rds_trans_get_preferred(net, faddr);
177 	if (loop_trans) {
178 		rds_trans_put(loop_trans);
179 		conn->c_loopback = 1;
180 		if (is_outgoing && trans->t_prefer_loopback) {
181 			/* "outgoing" connection - and the transport
182 			 * says it wants the connection handled by the
183 			 * loopback transport. This is what TCP does.
184 			 */
185 			trans = &rds_loop_transport;
186 		}
187 	}
188 
189 	conn->c_trans = trans;
190 
191 	ret = trans->conn_alloc(conn, gfp);
192 	if (ret) {
193 		kmem_cache_free(rds_conn_slab, conn);
194 		conn = ERR_PTR(ret);
195 		goto out;
196 	}
197 
198 	atomic_set(&conn->c_state, RDS_CONN_DOWN);
199 	conn->c_send_gen = 0;
200 	conn->c_outgoing = (is_outgoing ? 1 : 0);
201 	conn->c_reconnect_jiffies = 0;
202 	INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
203 	INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
204 	INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
205 	INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
206 	mutex_init(&conn->c_cm_lock);
207 	conn->c_flags = 0;
208 
209 	rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
210 	  conn, &laddr, &faddr,
211 	  trans->t_name ? trans->t_name : "[unknown]",
212 	  is_outgoing ? "(outgoing)" : "");
213 
214 	/*
215 	 * Since we ran without holding the conn lock, someone could
216 	 * have created the same conn (either normal or passive) in the
217 	 * interim. We check while holding the lock. If we won, we complete
218 	 * init and return our conn. If we lost, we rollback and return the
219 	 * other one.
220 	 */
221 	spin_lock_irqsave(&rds_conn_lock, flags);
222 	if (parent) {
223 		/* Creating passive conn */
224 		if (parent->c_passive) {
225 			trans->conn_free(conn->c_transport_data);
226 			kmem_cache_free(rds_conn_slab, conn);
227 			conn = parent->c_passive;
228 		} else {
229 			parent->c_passive = conn;
230 			rds_cong_add_conn(conn);
231 			rds_conn_count++;
232 		}
233 	} else {
234 		/* Creating normal conn */
235 		struct rds_connection *found;
236 
237 		found = rds_conn_lookup(net, head, laddr, faddr, trans);
238 		if (found) {
239 			trans->conn_free(conn->c_transport_data);
240 			kmem_cache_free(rds_conn_slab, conn);
241 			conn = found;
242 		} else {
243 			hlist_add_head_rcu(&conn->c_hash_node, head);
244 			rds_cong_add_conn(conn);
245 			rds_conn_count++;
246 		}
247 	}
248 	spin_unlock_irqrestore(&rds_conn_lock, flags);
249 
250 out:
251 	return conn;
252 }
253 
254 struct rds_connection *rds_conn_create(struct net *net,
255 				       __be32 laddr, __be32 faddr,
256 				       struct rds_transport *trans, gfp_t gfp)
257 {
258 	return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
259 }
260 EXPORT_SYMBOL_GPL(rds_conn_create);
261 
262 struct rds_connection *rds_conn_create_outgoing(struct net *net,
263 						__be32 laddr, __be32 faddr,
264 				       struct rds_transport *trans, gfp_t gfp)
265 {
266 	return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
267 }
268 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
269 
270 void rds_conn_shutdown(struct rds_connection *conn)
271 {
272 	/* shut it down unless it's down already */
273 	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
274 		/*
275 		 * Quiesce the connection mgmt handlers before we start tearing
276 		 * things down. We don't hold the mutex for the entire
277 		 * duration of the shutdown operation, else we may be
278 		 * deadlocking with the CM handler. Instead, the CM event
279 		 * handler is supposed to check for state DISCONNECTING
280 		 */
281 		mutex_lock(&conn->c_cm_lock);
282 		if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
283 		 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
284 			rds_conn_error(conn, "shutdown called in state %d\n",
285 					atomic_read(&conn->c_state));
286 			mutex_unlock(&conn->c_cm_lock);
287 			return;
288 		}
289 		mutex_unlock(&conn->c_cm_lock);
290 
291 		wait_event(conn->c_waitq,
292 			   !test_bit(RDS_IN_XMIT, &conn->c_flags));
293 		wait_event(conn->c_waitq,
294 			   !test_bit(RDS_RECV_REFILL, &conn->c_flags));
295 
296 		conn->c_trans->conn_shutdown(conn);
297 		rds_conn_reset(conn);
298 
299 		if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
300 			/* This can happen - eg when we're in the middle of tearing
301 			 * down the connection, and someone unloads the rds module.
302 			 * Quite reproduceable with loopback connections.
303 			 * Mostly harmless.
304 			 */
305 			rds_conn_error(conn,
306 				"%s: failed to transition to state DOWN, "
307 				"current state is %d\n",
308 				__func__,
309 				atomic_read(&conn->c_state));
310 			return;
311 		}
312 	}
313 
314 	/* Then reconnect if it's still live.
315 	 * The passive side of an IB loopback connection is never added
316 	 * to the conn hash, so we never trigger a reconnect on this
317 	 * conn - the reconnect is always triggered by the active peer. */
318 	cancel_delayed_work_sync(&conn->c_conn_w);
319 	rcu_read_lock();
320 	if (!hlist_unhashed(&conn->c_hash_node)) {
321 		rcu_read_unlock();
322 		if (conn->c_trans->t_type != RDS_TRANS_TCP ||
323 		    conn->c_outgoing == 1)
324 			rds_queue_reconnect(conn);
325 	} else {
326 		rcu_read_unlock();
327 	}
328 }
329 
330 /*
331  * Stop and free a connection.
332  *
333  * This can only be used in very limited circumstances.  It assumes that once
334  * the conn has been shutdown that no one else is referencing the connection.
335  * We can only ensure this in the rmmod path in the current code.
336  */
337 void rds_conn_destroy(struct rds_connection *conn)
338 {
339 	struct rds_message *rm, *rtmp;
340 	unsigned long flags;
341 
342 	rdsdebug("freeing conn %p for %pI4 -> "
343 		 "%pI4\n", conn, &conn->c_laddr,
344 		 &conn->c_faddr);
345 
346 	/* Ensure conn will not be scheduled for reconnect */
347 	spin_lock_irq(&rds_conn_lock);
348 	hlist_del_init_rcu(&conn->c_hash_node);
349 	spin_unlock_irq(&rds_conn_lock);
350 	synchronize_rcu();
351 
352 	/* shut the connection down */
353 	rds_conn_drop(conn);
354 	flush_work(&conn->c_down_w);
355 
356 	/* make sure lingering queued work won't try to ref the conn */
357 	cancel_delayed_work_sync(&conn->c_send_w);
358 	cancel_delayed_work_sync(&conn->c_recv_w);
359 
360 	/* tear down queued messages */
361 	list_for_each_entry_safe(rm, rtmp,
362 				 &conn->c_send_queue,
363 				 m_conn_item) {
364 		list_del_init(&rm->m_conn_item);
365 		BUG_ON(!list_empty(&rm->m_sock_item));
366 		rds_message_put(rm);
367 	}
368 	if (conn->c_xmit_rm)
369 		rds_message_put(conn->c_xmit_rm);
370 
371 	conn->c_trans->conn_free(conn->c_transport_data);
372 
373 	/*
374 	 * The congestion maps aren't freed up here.  They're
375 	 * freed by rds_cong_exit() after all the connections
376 	 * have been freed.
377 	 */
378 	rds_cong_remove_conn(conn);
379 
380 	BUG_ON(!list_empty(&conn->c_retrans));
381 	kmem_cache_free(rds_conn_slab, conn);
382 
383 	spin_lock_irqsave(&rds_conn_lock, flags);
384 	rds_conn_count--;
385 	spin_unlock_irqrestore(&rds_conn_lock, flags);
386 }
387 EXPORT_SYMBOL_GPL(rds_conn_destroy);
388 
389 static void rds_conn_message_info(struct socket *sock, unsigned int len,
390 				  struct rds_info_iterator *iter,
391 				  struct rds_info_lengths *lens,
392 				  int want_send)
393 {
394 	struct hlist_head *head;
395 	struct list_head *list;
396 	struct rds_connection *conn;
397 	struct rds_message *rm;
398 	unsigned int total = 0;
399 	unsigned long flags;
400 	size_t i;
401 
402 	len /= sizeof(struct rds_info_message);
403 
404 	rcu_read_lock();
405 
406 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
407 	     i++, head++) {
408 		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
409 			if (want_send)
410 				list = &conn->c_send_queue;
411 			else
412 				list = &conn->c_retrans;
413 
414 			spin_lock_irqsave(&conn->c_lock, flags);
415 
416 			/* XXX too lazy to maintain counts.. */
417 			list_for_each_entry(rm, list, m_conn_item) {
418 				total++;
419 				if (total <= len)
420 					rds_inc_info_copy(&rm->m_inc, iter,
421 							  conn->c_laddr,
422 							  conn->c_faddr, 0);
423 			}
424 
425 			spin_unlock_irqrestore(&conn->c_lock, flags);
426 		}
427 	}
428 	rcu_read_unlock();
429 
430 	lens->nr = total;
431 	lens->each = sizeof(struct rds_info_message);
432 }
433 
434 static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
435 				       struct rds_info_iterator *iter,
436 				       struct rds_info_lengths *lens)
437 {
438 	rds_conn_message_info(sock, len, iter, lens, 1);
439 }
440 
441 static void rds_conn_message_info_retrans(struct socket *sock,
442 					  unsigned int len,
443 					  struct rds_info_iterator *iter,
444 					  struct rds_info_lengths *lens)
445 {
446 	rds_conn_message_info(sock, len, iter, lens, 0);
447 }
448 
449 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
450 			  struct rds_info_iterator *iter,
451 			  struct rds_info_lengths *lens,
452 			  int (*visitor)(struct rds_connection *, void *),
453 			  size_t item_len)
454 {
455 	uint64_t buffer[(item_len + 7) / 8];
456 	struct hlist_head *head;
457 	struct rds_connection *conn;
458 	size_t i;
459 
460 	rcu_read_lock();
461 
462 	lens->nr = 0;
463 	lens->each = item_len;
464 
465 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
466 	     i++, head++) {
467 		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
468 
469 			/* XXX no c_lock usage.. */
470 			if (!visitor(conn, buffer))
471 				continue;
472 
473 			/* We copy as much as we can fit in the buffer,
474 			 * but we count all items so that the caller
475 			 * can resize the buffer. */
476 			if (len >= item_len) {
477 				rds_info_copy(iter, buffer, item_len);
478 				len -= item_len;
479 			}
480 			lens->nr++;
481 		}
482 	}
483 	rcu_read_unlock();
484 }
485 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
486 
487 static int rds_conn_info_visitor(struct rds_connection *conn,
488 				  void *buffer)
489 {
490 	struct rds_info_connection *cinfo = buffer;
491 
492 	cinfo->next_tx_seq = conn->c_next_tx_seq;
493 	cinfo->next_rx_seq = conn->c_next_rx_seq;
494 	cinfo->laddr = conn->c_laddr;
495 	cinfo->faddr = conn->c_faddr;
496 	strncpy(cinfo->transport, conn->c_trans->t_name,
497 		sizeof(cinfo->transport));
498 	cinfo->flags = 0;
499 
500 	rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
501 			  SENDING);
502 	/* XXX Future: return the state rather than these funky bits */
503 	rds_conn_info_set(cinfo->flags,
504 			  atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
505 			  CONNECTING);
506 	rds_conn_info_set(cinfo->flags,
507 			  atomic_read(&conn->c_state) == RDS_CONN_UP,
508 			  CONNECTED);
509 	return 1;
510 }
511 
512 static void rds_conn_info(struct socket *sock, unsigned int len,
513 			  struct rds_info_iterator *iter,
514 			  struct rds_info_lengths *lens)
515 {
516 	rds_for_each_conn_info(sock, len, iter, lens,
517 				rds_conn_info_visitor,
518 				sizeof(struct rds_info_connection));
519 }
520 
521 int rds_conn_init(void)
522 {
523 	rds_conn_slab = kmem_cache_create("rds_connection",
524 					  sizeof(struct rds_connection),
525 					  0, 0, NULL);
526 	if (!rds_conn_slab)
527 		return -ENOMEM;
528 
529 	rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
530 	rds_info_register_func(RDS_INFO_SEND_MESSAGES,
531 			       rds_conn_message_info_send);
532 	rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
533 			       rds_conn_message_info_retrans);
534 
535 	return 0;
536 }
537 
538 void rds_conn_exit(void)
539 {
540 	rds_loop_exit();
541 
542 	WARN_ON(!hlist_empty(rds_conn_hash));
543 
544 	kmem_cache_destroy(rds_conn_slab);
545 
546 	rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
547 	rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
548 				 rds_conn_message_info_send);
549 	rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
550 				 rds_conn_message_info_retrans);
551 }
552 
553 /*
554  * Force a disconnect
555  */
556 void rds_conn_drop(struct rds_connection *conn)
557 {
558 	atomic_set(&conn->c_state, RDS_CONN_ERROR);
559 	queue_work(rds_wq, &conn->c_down_w);
560 }
561 EXPORT_SYMBOL_GPL(rds_conn_drop);
562 
563 /*
564  * If the connection is down, trigger a connect. We may have scheduled a
565  * delayed reconnect however - in this case we should not interfere.
566  */
567 void rds_conn_connect_if_down(struct rds_connection *conn)
568 {
569 	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
570 	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
571 		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
572 }
573 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
574 
575 /*
576  * An error occurred on the connection
577  */
578 void
579 __rds_conn_error(struct rds_connection *conn, const char *fmt, ...)
580 {
581 	va_list ap;
582 
583 	va_start(ap, fmt);
584 	vprintk(fmt, ap);
585 	va_end(ap);
586 
587 	rds_conn_drop(conn);
588 }
589