xref: /openbmc/linux/net/rds/ib.c (revision e7253313)
1 /*
2  * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/dmapool.h>
34 #include <linux/kernel.h>
35 #include <linux/in.h>
36 #include <linux/if.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/if_arp.h>
40 #include <linux/delay.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <net/addrconf.h>
44 
45 #include "rds_single_path.h"
46 #include "rds.h"
47 #include "ib.h"
48 #include "ib_mr.h"
49 
50 static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
51 static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
52 unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
53 static atomic_t rds_ib_unloading;
54 
55 module_param(rds_ib_mr_1m_pool_size, int, 0444);
56 MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA");
57 module_param(rds_ib_mr_8k_pool_size, int, 0444);
58 MODULE_PARM_DESC(rds_ib_mr_8k_pool_size, " Max number of 8K mr per HCA");
59 module_param(rds_ib_retry_count, int, 0444);
60 MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
61 
62 /*
63  * we have a clumsy combination of RCU and a rwsem protecting this list
64  * because it is used both in the get_mr fast path and while blocking in
65  * the FMR flushing path.
66  */
67 DECLARE_RWSEM(rds_ib_devices_lock);
68 struct list_head rds_ib_devices;
69 
70 /* NOTE: if also grabbing ibdev lock, grab this first */
71 DEFINE_SPINLOCK(ib_nodev_conns_lock);
72 LIST_HEAD(ib_nodev_conns);
73 
74 static void rds_ib_nodev_connect(void)
75 {
76 	struct rds_ib_connection *ic;
77 
78 	spin_lock(&ib_nodev_conns_lock);
79 	list_for_each_entry(ic, &ib_nodev_conns, ib_node)
80 		rds_conn_connect_if_down(ic->conn);
81 	spin_unlock(&ib_nodev_conns_lock);
82 }
83 
84 static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
85 {
86 	struct rds_ib_connection *ic;
87 	unsigned long flags;
88 
89 	spin_lock_irqsave(&rds_ibdev->spinlock, flags);
90 	list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
91 		rds_conn_path_drop(&ic->conn->c_path[0], true);
92 	spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
93 }
94 
95 /*
96  * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
97  * from interrupt context so we push freing off into a work struct in krdsd.
98  */
99 static void rds_ib_dev_free(struct work_struct *work)
100 {
101 	struct rds_ib_ipaddr *i_ipaddr, *i_next;
102 	struct rds_ib_device *rds_ibdev = container_of(work,
103 					struct rds_ib_device, free_work);
104 
105 	if (rds_ibdev->mr_8k_pool)
106 		rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool);
107 	if (rds_ibdev->mr_1m_pool)
108 		rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
109 	if (rds_ibdev->pd)
110 		ib_dealloc_pd(rds_ibdev->pd);
111 	dma_pool_destroy(rds_ibdev->rid_hdrs_pool);
112 
113 	list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
114 		list_del(&i_ipaddr->list);
115 		kfree(i_ipaddr);
116 	}
117 
118 	kfree(rds_ibdev->vector_load);
119 
120 	kfree(rds_ibdev);
121 }
122 
123 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
124 {
125 	BUG_ON(refcount_read(&rds_ibdev->refcount) == 0);
126 	if (refcount_dec_and_test(&rds_ibdev->refcount))
127 		queue_work(rds_wq, &rds_ibdev->free_work);
128 }
129 
130 static void rds_ib_add_one(struct ib_device *device)
131 {
132 	struct rds_ib_device *rds_ibdev;
133 	bool has_fr, has_fmr;
134 
135 	/* Only handle IB (no iWARP) devices */
136 	if (device->node_type != RDMA_NODE_IB_CA)
137 		return;
138 
139 	rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
140 				 ibdev_to_node(device));
141 	if (!rds_ibdev)
142 		return;
143 
144 	spin_lock_init(&rds_ibdev->spinlock);
145 	refcount_set(&rds_ibdev->refcount, 1);
146 	INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
147 
148 	INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
149 	INIT_LIST_HEAD(&rds_ibdev->conn_list);
150 
151 	rds_ibdev->max_wrs = device->attrs.max_qp_wr;
152 	rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
153 
154 	has_fr = (device->attrs.device_cap_flags &
155 		  IB_DEVICE_MEM_MGT_EXTENSIONS);
156 	has_fmr = (device->ops.alloc_fmr && device->ops.dealloc_fmr &&
157 		   device->ops.map_phys_fmr && device->ops.unmap_fmr);
158 	rds_ibdev->use_fastreg = (has_fr && !has_fmr);
159 
160 	rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
161 	rds_ibdev->max_1m_mrs = device->attrs.max_mr ?
162 		min_t(unsigned int, (device->attrs.max_mr / 2),
163 		      rds_ib_mr_1m_pool_size) : rds_ib_mr_1m_pool_size;
164 
165 	rds_ibdev->max_8k_mrs = device->attrs.max_mr ?
166 		min_t(unsigned int, ((device->attrs.max_mr / 2) * RDS_MR_8K_SCALE),
167 		      rds_ib_mr_8k_pool_size) : rds_ib_mr_8k_pool_size;
168 
169 	rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
170 	rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
171 
172 	rds_ibdev->vector_load = kcalloc(device->num_comp_vectors,
173 					 sizeof(int),
174 					 GFP_KERNEL);
175 	if (!rds_ibdev->vector_load) {
176 		pr_err("RDS/IB: %s failed to allocate vector memory\n",
177 			__func__);
178 		goto put_dev;
179 	}
180 
181 	rds_ibdev->dev = device;
182 	rds_ibdev->pd = ib_alloc_pd(device, 0);
183 	if (IS_ERR(rds_ibdev->pd)) {
184 		rds_ibdev->pd = NULL;
185 		goto put_dev;
186 	}
187 	rds_ibdev->rid_hdrs_pool = dma_pool_create(device->name,
188 						   device->dma_device,
189 						   sizeof(struct rds_header),
190 						   L1_CACHE_BYTES, 0);
191 	if (!rds_ibdev->rid_hdrs_pool)
192 		goto put_dev;
193 
194 	rds_ibdev->mr_1m_pool =
195 		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
196 	if (IS_ERR(rds_ibdev->mr_1m_pool)) {
197 		rds_ibdev->mr_1m_pool = NULL;
198 		goto put_dev;
199 	}
200 
201 	rds_ibdev->mr_8k_pool =
202 		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL);
203 	if (IS_ERR(rds_ibdev->mr_8k_pool)) {
204 		rds_ibdev->mr_8k_pool = NULL;
205 		goto put_dev;
206 	}
207 
208 	rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_mrs = %d, max_8k_mrs = %d\n",
209 		 device->attrs.max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
210 		 rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_mrs,
211 		 rds_ibdev->max_8k_mrs);
212 
213 	pr_info("RDS/IB: %s: %s supported and preferred\n",
214 		device->name,
215 		rds_ibdev->use_fastreg ? "FRMR" : "FMR");
216 
217 	down_write(&rds_ib_devices_lock);
218 	list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
219 	up_write(&rds_ib_devices_lock);
220 	refcount_inc(&rds_ibdev->refcount);
221 
222 	ib_set_client_data(device, &rds_ib_client, rds_ibdev);
223 	refcount_inc(&rds_ibdev->refcount);
224 
225 	rds_ib_nodev_connect();
226 
227 put_dev:
228 	rds_ib_dev_put(rds_ibdev);
229 }
230 
231 /*
232  * New connections use this to find the device to associate with the
233  * connection.  It's not in the fast path so we're not concerned about the
234  * performance of the IB call.  (As of this writing, it uses an interrupt
235  * blocking spinlock to serialize walking a per-device list of all registered
236  * clients.)
237  *
238  * RCU is used to handle incoming connections racing with device teardown.
239  * Rather than use a lock to serialize removal from the client_data and
240  * getting a new reference, we use an RCU grace period.  The destruction
241  * path removes the device from client_data and then waits for all RCU
242  * readers to finish.
243  *
244  * A new connection can get NULL from this if its arriving on a
245  * device that is in the process of being removed.
246  */
247 struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
248 {
249 	struct rds_ib_device *rds_ibdev;
250 
251 	rcu_read_lock();
252 	rds_ibdev = ib_get_client_data(device, &rds_ib_client);
253 	if (rds_ibdev)
254 		refcount_inc(&rds_ibdev->refcount);
255 	rcu_read_unlock();
256 	return rds_ibdev;
257 }
258 
259 /*
260  * The IB stack is letting us know that a device is going away.  This can
261  * happen if the underlying HCA driver is removed or if PCI hotplug is removing
262  * the pci function, for example.
263  *
264  * This can be called at any time and can be racing with any other RDS path.
265  */
266 static void rds_ib_remove_one(struct ib_device *device, void *client_data)
267 {
268 	struct rds_ib_device *rds_ibdev = client_data;
269 
270 	if (!rds_ibdev)
271 		return;
272 
273 	rds_ib_dev_shutdown(rds_ibdev);
274 
275 	/* stop connection attempts from getting a reference to this device. */
276 	ib_set_client_data(device, &rds_ib_client, NULL);
277 
278 	down_write(&rds_ib_devices_lock);
279 	list_del_rcu(&rds_ibdev->list);
280 	up_write(&rds_ib_devices_lock);
281 
282 	/*
283 	 * This synchronize rcu is waiting for readers of both the ib
284 	 * client data and the devices list to finish before we drop
285 	 * both of those references.
286 	 */
287 	synchronize_rcu();
288 	rds_ib_dev_put(rds_ibdev);
289 	rds_ib_dev_put(rds_ibdev);
290 }
291 
292 struct ib_client rds_ib_client = {
293 	.name   = "rds_ib",
294 	.add    = rds_ib_add_one,
295 	.remove = rds_ib_remove_one
296 };
297 
298 static int rds_ib_conn_info_visitor(struct rds_connection *conn,
299 				    void *buffer)
300 {
301 	struct rds_info_rdma_connection *iinfo = buffer;
302 	struct rds_ib_connection *ic = conn->c_transport_data;
303 
304 	/* We will only ever look at IB transports */
305 	if (conn->c_trans != &rds_ib_transport)
306 		return 0;
307 	if (conn->c_isv6)
308 		return 0;
309 
310 	iinfo->src_addr = conn->c_laddr.s6_addr32[3];
311 	iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
312 	if (ic) {
313 		iinfo->tos = conn->c_tos;
314 		iinfo->sl = ic->i_sl;
315 	}
316 
317 	memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
318 	memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
319 	if (rds_conn_state(conn) == RDS_CONN_UP) {
320 		struct rds_ib_device *rds_ibdev;
321 
322 		rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
323 			       (union ib_gid *)&iinfo->dst_gid);
324 
325 		rds_ibdev = ic->rds_ibdev;
326 		iinfo->max_send_wr = ic->i_send_ring.w_nr;
327 		iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
328 		iinfo->max_send_sge = rds_ibdev->max_sge;
329 		rds_ib_get_mr_info(rds_ibdev, iinfo);
330 		iinfo->cache_allocs = atomic_read(&ic->i_cache_allocs);
331 	}
332 	return 1;
333 }
334 
335 #if IS_ENABLED(CONFIG_IPV6)
336 /* IPv6 version of rds_ib_conn_info_visitor(). */
337 static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
338 				     void *buffer)
339 {
340 	struct rds6_info_rdma_connection *iinfo6 = buffer;
341 	struct rds_ib_connection *ic = conn->c_transport_data;
342 
343 	/* We will only ever look at IB transports */
344 	if (conn->c_trans != &rds_ib_transport)
345 		return 0;
346 
347 	iinfo6->src_addr = conn->c_laddr;
348 	iinfo6->dst_addr = conn->c_faddr;
349 	if (ic) {
350 		iinfo6->tos = conn->c_tos;
351 		iinfo6->sl = ic->i_sl;
352 	}
353 
354 	memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
355 	memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
356 
357 	if (rds_conn_state(conn) == RDS_CONN_UP) {
358 		struct rds_ib_device *rds_ibdev;
359 
360 		rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
361 			       (union ib_gid *)&iinfo6->dst_gid);
362 		rds_ibdev = ic->rds_ibdev;
363 		iinfo6->max_send_wr = ic->i_send_ring.w_nr;
364 		iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
365 		iinfo6->max_send_sge = rds_ibdev->max_sge;
366 		rds6_ib_get_mr_info(rds_ibdev, iinfo6);
367 		iinfo6->cache_allocs = atomic_read(&ic->i_cache_allocs);
368 	}
369 	return 1;
370 }
371 #endif
372 
373 static void rds_ib_ic_info(struct socket *sock, unsigned int len,
374 			   struct rds_info_iterator *iter,
375 			   struct rds_info_lengths *lens)
376 {
377 	u64 buffer[(sizeof(struct rds_info_rdma_connection) + 7) / 8];
378 
379 	rds_for_each_conn_info(sock, len, iter, lens,
380 				rds_ib_conn_info_visitor,
381 				buffer,
382 				sizeof(struct rds_info_rdma_connection));
383 }
384 
385 #if IS_ENABLED(CONFIG_IPV6)
386 /* IPv6 version of rds_ib_ic_info(). */
387 static void rds6_ib_ic_info(struct socket *sock, unsigned int len,
388 			    struct rds_info_iterator *iter,
389 			    struct rds_info_lengths *lens)
390 {
391 	u64 buffer[(sizeof(struct rds6_info_rdma_connection) + 7) / 8];
392 
393 	rds_for_each_conn_info(sock, len, iter, lens,
394 			       rds6_ib_conn_info_visitor,
395 			       buffer,
396 			       sizeof(struct rds6_info_rdma_connection));
397 }
398 #endif
399 
400 /*
401  * Early RDS/IB was built to only bind to an address if there is an IPoIB
402  * device with that address set.
403  *
404  * If it were me, I'd advocate for something more flexible.  Sending and
405  * receiving should be device-agnostic.  Transports would try and maintain
406  * connections between peers who have messages queued.  Userspace would be
407  * allowed to influence which paths have priority.  We could call userspace
408  * asserting this policy "routing".
409  */
410 static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr,
411 			      __u32 scope_id)
412 {
413 	int ret;
414 	struct rdma_cm_id *cm_id;
415 #if IS_ENABLED(CONFIG_IPV6)
416 	struct sockaddr_in6 sin6;
417 #endif
418 	struct sockaddr_in sin;
419 	struct sockaddr *sa;
420 	bool isv4;
421 
422 	isv4 = ipv6_addr_v4mapped(addr);
423 	/* Create a CMA ID and try to bind it. This catches both
424 	 * IB and iWARP capable NICs.
425 	 */
426 	cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
427 			       NULL, RDMA_PS_TCP, IB_QPT_RC);
428 	if (IS_ERR(cm_id))
429 		return PTR_ERR(cm_id);
430 
431 	if (isv4) {
432 		memset(&sin, 0, sizeof(sin));
433 		sin.sin_family = AF_INET;
434 		sin.sin_addr.s_addr = addr->s6_addr32[3];
435 		sa = (struct sockaddr *)&sin;
436 	} else {
437 #if IS_ENABLED(CONFIG_IPV6)
438 		memset(&sin6, 0, sizeof(sin6));
439 		sin6.sin6_family = AF_INET6;
440 		sin6.sin6_addr = *addr;
441 		sin6.sin6_scope_id = scope_id;
442 		sa = (struct sockaddr *)&sin6;
443 
444 		/* XXX Do a special IPv6 link local address check here.  The
445 		 * reason is that rdma_bind_addr() always succeeds with IPv6
446 		 * link local address regardless it is indeed configured in a
447 		 * system.
448 		 */
449 		if (ipv6_addr_type(addr) & IPV6_ADDR_LINKLOCAL) {
450 			struct net_device *dev;
451 
452 			if (scope_id == 0) {
453 				ret = -EADDRNOTAVAIL;
454 				goto out;
455 			}
456 
457 			/* Use init_net for now as RDS is not network
458 			 * name space aware.
459 			 */
460 			dev = dev_get_by_index(&init_net, scope_id);
461 			if (!dev) {
462 				ret = -EADDRNOTAVAIL;
463 				goto out;
464 			}
465 			if (!ipv6_chk_addr(&init_net, addr, dev, 1)) {
466 				dev_put(dev);
467 				ret = -EADDRNOTAVAIL;
468 				goto out;
469 			}
470 			dev_put(dev);
471 		}
472 #else
473 		ret = -EADDRNOTAVAIL;
474 		goto out;
475 #endif
476 	}
477 
478 	/* rdma_bind_addr will only succeed for IB & iWARP devices */
479 	ret = rdma_bind_addr(cm_id, sa);
480 	/* due to this, we will claim to support iWARP devices unless we
481 	   check node_type. */
482 	if (ret || !cm_id->device ||
483 	    cm_id->device->node_type != RDMA_NODE_IB_CA)
484 		ret = -EADDRNOTAVAIL;
485 
486 	rdsdebug("addr %pI6c%%%u ret %d node type %d\n",
487 		 addr, scope_id, ret,
488 		 cm_id->device ? cm_id->device->node_type : -1);
489 
490 out:
491 	rdma_destroy_id(cm_id);
492 
493 	return ret;
494 }
495 
496 static void rds_ib_unregister_client(void)
497 {
498 	ib_unregister_client(&rds_ib_client);
499 	/* wait for rds_ib_dev_free() to complete */
500 	flush_workqueue(rds_wq);
501 }
502 
503 static void rds_ib_set_unloading(void)
504 {
505 	atomic_set(&rds_ib_unloading, 1);
506 }
507 
508 static bool rds_ib_is_unloading(struct rds_connection *conn)
509 {
510 	struct rds_conn_path *cp = &conn->c_path[0];
511 
512 	return (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags) ||
513 		atomic_read(&rds_ib_unloading) != 0);
514 }
515 
516 void rds_ib_exit(void)
517 {
518 	rds_ib_set_unloading();
519 	synchronize_rcu();
520 	rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
521 #if IS_ENABLED(CONFIG_IPV6)
522 	rds_info_deregister_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info);
523 #endif
524 	rds_ib_unregister_client();
525 	rds_ib_destroy_nodev_conns();
526 	rds_ib_sysctl_exit();
527 	rds_ib_recv_exit();
528 	rds_trans_unregister(&rds_ib_transport);
529 	rds_ib_mr_exit();
530 }
531 
532 static u8 rds_ib_get_tos_map(u8 tos)
533 {
534 	/* 1:1 user to transport map for RDMA transport.
535 	 * In future, if custom map is desired, hook can export
536 	 * user configurable map.
537 	 */
538 	return tos;
539 }
540 
541 struct rds_transport rds_ib_transport = {
542 	.laddr_check		= rds_ib_laddr_check,
543 	.xmit_path_complete	= rds_ib_xmit_path_complete,
544 	.xmit			= rds_ib_xmit,
545 	.xmit_rdma		= rds_ib_xmit_rdma,
546 	.xmit_atomic		= rds_ib_xmit_atomic,
547 	.recv_path		= rds_ib_recv_path,
548 	.conn_alloc		= rds_ib_conn_alloc,
549 	.conn_free		= rds_ib_conn_free,
550 	.conn_path_connect	= rds_ib_conn_path_connect,
551 	.conn_path_shutdown	= rds_ib_conn_path_shutdown,
552 	.inc_copy_to_user	= rds_ib_inc_copy_to_user,
553 	.inc_free		= rds_ib_inc_free,
554 	.cm_initiate_connect	= rds_ib_cm_initiate_connect,
555 	.cm_handle_connect	= rds_ib_cm_handle_connect,
556 	.cm_connect_complete	= rds_ib_cm_connect_complete,
557 	.stats_info_copy	= rds_ib_stats_info_copy,
558 	.exit			= rds_ib_exit,
559 	.get_mr			= rds_ib_get_mr,
560 	.sync_mr		= rds_ib_sync_mr,
561 	.free_mr		= rds_ib_free_mr,
562 	.flush_mrs		= rds_ib_flush_mrs,
563 	.get_tos_map		= rds_ib_get_tos_map,
564 	.t_owner		= THIS_MODULE,
565 	.t_name			= "infiniband",
566 	.t_unloading		= rds_ib_is_unloading,
567 	.t_type			= RDS_TRANS_IB
568 };
569 
570 int rds_ib_init(void)
571 {
572 	int ret;
573 
574 	INIT_LIST_HEAD(&rds_ib_devices);
575 
576 	ret = rds_ib_mr_init();
577 	if (ret)
578 		goto out;
579 
580 	ret = ib_register_client(&rds_ib_client);
581 	if (ret)
582 		goto out_mr_exit;
583 
584 	ret = rds_ib_sysctl_init();
585 	if (ret)
586 		goto out_ibreg;
587 
588 	ret = rds_ib_recv_init();
589 	if (ret)
590 		goto out_sysctl;
591 
592 	rds_trans_register(&rds_ib_transport);
593 
594 	rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
595 #if IS_ENABLED(CONFIG_IPV6)
596 	rds_info_register_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info);
597 #endif
598 
599 	goto out;
600 
601 out_sysctl:
602 	rds_ib_sysctl_exit();
603 out_ibreg:
604 	rds_ib_unregister_client();
605 out_mr_exit:
606 	rds_ib_mr_exit();
607 out:
608 	return ret;
609 }
610 
611 MODULE_LICENSE("GPL");
612