xref: /openbmc/linux/drivers/infiniband/core/device.c (revision 46bdf370)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
32a1d9b7fSRoland Dreier  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * This software is available to you under a choice of one of two
61da177e4SLinus Torvalds  * licenses.  You may choose to be licensed under the terms of the GNU
71da177e4SLinus Torvalds  * General Public License (GPL) Version 2, available from the file
81da177e4SLinus Torvalds  * COPYING in the main directory of this source tree, or the
91da177e4SLinus Torvalds  * OpenIB.org BSD license below:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *     Redistribution and use in source and binary forms, with or
121da177e4SLinus Torvalds  *     without modification, are permitted provided that the following
131da177e4SLinus Torvalds  *     conditions are met:
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  *      - Redistributions of source code must retain the above
161da177e4SLinus Torvalds  *        copyright notice, this list of conditions and the following
171da177e4SLinus Torvalds  *        disclaimer.
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  *      - Redistributions in binary form must reproduce the above
201da177e4SLinus Torvalds  *        copyright notice, this list of conditions and the following
211da177e4SLinus Torvalds  *        disclaimer in the documentation and/or other materials
221da177e4SLinus Torvalds  *        provided with the distribution.
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
251da177e4SLinus Torvalds  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
261da177e4SLinus Torvalds  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
271da177e4SLinus Torvalds  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
281da177e4SLinus Torvalds  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
291da177e4SLinus Torvalds  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
301da177e4SLinus Torvalds  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
311da177e4SLinus Torvalds  * SOFTWARE.
321da177e4SLinus Torvalds  */
331da177e4SLinus Torvalds 
341da177e4SLinus Torvalds #include <linux/module.h>
351da177e4SLinus Torvalds #include <linux/string.h>
361da177e4SLinus Torvalds #include <linux/errno.h>
379a6b090cSAhmed S. Darwish #include <linux/kernel.h>
381da177e4SLinus Torvalds #include <linux/slab.h>
391da177e4SLinus Torvalds #include <linux/init.h>
409268f72dSYotam Kenneth #include <linux/netdevice.h>
414e0f7b90SParav Pandit #include <net/net_namespace.h>
424e0f7b90SParav Pandit #include <net/netns/generic.h>
438f408ab6SDaniel Jurgens #include <linux/security.h>
448f408ab6SDaniel Jurgens #include <linux/notifier.h>
45324e227eSJason Gunthorpe #include <linux/hashtable.h>
46b2cbae2cSRoland Dreier #include <rdma/rdma_netlink.h>
4703db3a2dSMatan Barak #include <rdma/ib_addr.h>
4803db3a2dSMatan Barak #include <rdma/ib_cache.h>
491da177e4SLinus Torvalds 
501da177e4SLinus Torvalds #include "core_priv.h"
5141eda65cSLeon Romanovsky #include "restrack.h"
521da177e4SLinus Torvalds 
531da177e4SLinus Torvalds MODULE_AUTHOR("Roland Dreier");
541da177e4SLinus Torvalds MODULE_DESCRIPTION("core kernel InfiniBand API");
551da177e4SLinus Torvalds MODULE_LICENSE("Dual BSD/GPL");
561da177e4SLinus Torvalds 
5714d3a3b2SChristoph Hellwig struct workqueue_struct *ib_comp_wq;
58f794809aSJack Morgenstein struct workqueue_struct *ib_comp_unbound_wq;
59f0626710STejun Heo struct workqueue_struct *ib_wq;
60f0626710STejun Heo EXPORT_SYMBOL_GPL(ib_wq);
61f0626710STejun Heo 
620df91bb6SJason Gunthorpe /*
63921eab11SJason Gunthorpe  * Each of the three rwsem locks (devices, clients, client_data) protects the
64921eab11SJason Gunthorpe  * xarray of the same name. Specifically it allows the caller to assert that
65921eab11SJason Gunthorpe  * the MARK will/will not be changing under the lock, and for devices and
66921eab11SJason Gunthorpe  * clients, that the value in the xarray is still a valid pointer. Change of
67921eab11SJason Gunthorpe  * the MARK is linked to the object state, so holding the lock and testing the
68921eab11SJason Gunthorpe  * MARK also asserts that the contained object is in a certain state.
69921eab11SJason Gunthorpe  *
70921eab11SJason Gunthorpe  * This is used to build a two stage register/unregister flow where objects
71921eab11SJason Gunthorpe  * can continue to be in the xarray even though they are still in progress to
72921eab11SJason Gunthorpe  * register/unregister.
73921eab11SJason Gunthorpe  *
74921eab11SJason Gunthorpe  * The xarray itself provides additional locking, and restartable iteration,
75921eab11SJason Gunthorpe  * which is also relied on.
76921eab11SJason Gunthorpe  *
77921eab11SJason Gunthorpe  * Locks should not be nested, with the exception of client_data, which is
78921eab11SJason Gunthorpe  * allowed to nest under the read side of the other two locks.
79921eab11SJason Gunthorpe  *
80921eab11SJason Gunthorpe  * The devices_rwsem also protects the device name list, any change or
81921eab11SJason Gunthorpe  * assignment of device name must also hold the write side to guarantee unique
82921eab11SJason Gunthorpe  * names.
83921eab11SJason Gunthorpe  */
84921eab11SJason Gunthorpe 
85921eab11SJason Gunthorpe /*
860df91bb6SJason Gunthorpe  * devices contains devices that have had their names assigned. The
870df91bb6SJason Gunthorpe  * devices may not be registered. Users that care about the registration
880df91bb6SJason Gunthorpe  * status need to call ib_device_try_get() on the device to ensure it is
890df91bb6SJason Gunthorpe  * registered, and keep it registered, for the required duration.
900df91bb6SJason Gunthorpe  *
910df91bb6SJason Gunthorpe  */
920df91bb6SJason Gunthorpe static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
93921eab11SJason Gunthorpe static DECLARE_RWSEM(devices_rwsem);
940df91bb6SJason Gunthorpe #define DEVICE_REGISTERED XA_MARK_1
950df91bb6SJason Gunthorpe 
961da177e4SLinus Torvalds static LIST_HEAD(client_list);
97e59178d8SJason Gunthorpe #define CLIENT_REGISTERED XA_MARK_1
98e59178d8SJason Gunthorpe static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
99921eab11SJason Gunthorpe static DECLARE_RWSEM(clients_rwsem);
1001da177e4SLinus Torvalds 
1011da177e4SLinus Torvalds /*
1020df91bb6SJason Gunthorpe  * If client_data is registered then the corresponding client must also still
1030df91bb6SJason Gunthorpe  * be registered.
1040df91bb6SJason Gunthorpe  */
1050df91bb6SJason Gunthorpe #define CLIENT_DATA_REGISTERED XA_MARK_1
1064e0f7b90SParav Pandit 
1074e0f7b90SParav Pandit /**
1084e0f7b90SParav Pandit  * struct rdma_dev_net - rdma net namespace metadata for a net
1094e0f7b90SParav Pandit  * @net:	Pointer to owner net namespace
1104e0f7b90SParav Pandit  * @id:		xarray id to identify the net namespace.
1114e0f7b90SParav Pandit  */
1124e0f7b90SParav Pandit struct rdma_dev_net {
1134e0f7b90SParav Pandit 	possible_net_t net;
1144e0f7b90SParav Pandit 	u32 id;
1154e0f7b90SParav Pandit };
1164e0f7b90SParav Pandit 
1174e0f7b90SParav Pandit static unsigned int rdma_dev_net_id;
1184e0f7b90SParav Pandit 
1194e0f7b90SParav Pandit /*
1204e0f7b90SParav Pandit  * A list of net namespaces is maintained in an xarray. This is necessary
1214e0f7b90SParav Pandit  * because we can't get the locking right using the existing net ns list. We
1224e0f7b90SParav Pandit  * would require a init_net callback after the list is updated.
1234e0f7b90SParav Pandit  */
1244e0f7b90SParav Pandit static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
1254e0f7b90SParav Pandit /*
1264e0f7b90SParav Pandit  * rwsem to protect accessing the rdma_nets xarray entries.
1274e0f7b90SParav Pandit  */
1284e0f7b90SParav Pandit static DECLARE_RWSEM(rdma_nets_rwsem);
1294e0f7b90SParav Pandit 
130cb7e0e13SParav Pandit bool ib_devices_shared_netns = true;
131a56bc45bSParav Pandit module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
132a56bc45bSParav Pandit MODULE_PARM_DESC(netns_mode,
133a56bc45bSParav Pandit 		 "Share device among net namespaces; default=1 (shared)");
13441c61401SParav Pandit /**
13541c61401SParav Pandit  * rdma_dev_access_netns() - Return whether a rdma device can be accessed
13641c61401SParav Pandit  *			     from a specified net namespace or not.
13741c61401SParav Pandit  * @device:	Pointer to rdma device which needs to be checked
13841c61401SParav Pandit  * @net:	Pointer to net namesapce for which access to be checked
13941c61401SParav Pandit  *
14041c61401SParav Pandit  * rdma_dev_access_netns() - Return whether a rdma device can be accessed
14141c61401SParav Pandit  *			     from a specified net namespace or not. When
14241c61401SParav Pandit  *			     rdma device is in shared mode, it ignores the
14341c61401SParav Pandit  *			     net namespace. When rdma device is exclusive
14441c61401SParav Pandit  *			     to a net namespace, rdma device net namespace is
14541c61401SParav Pandit  *			     checked against the specified one.
14641c61401SParav Pandit  */
14741c61401SParav Pandit bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
14841c61401SParav Pandit {
14941c61401SParav Pandit 	return (ib_devices_shared_netns ||
15041c61401SParav Pandit 		net_eq(read_pnet(&dev->coredev.rdma_net), net));
15141c61401SParav Pandit }
15241c61401SParav Pandit EXPORT_SYMBOL(rdma_dev_access_netns);
15341c61401SParav Pandit 
1540df91bb6SJason Gunthorpe /*
1550df91bb6SJason Gunthorpe  * xarray has this behavior where it won't iterate over NULL values stored in
1560df91bb6SJason Gunthorpe  * allocated arrays.  So we need our own iterator to see all values stored in
1570df91bb6SJason Gunthorpe  * the array. This does the same thing as xa_for_each except that it also
1580df91bb6SJason Gunthorpe  * returns NULL valued entries if the array is allocating. Simplified to only
1590df91bb6SJason Gunthorpe  * work on simple xarrays.
1600df91bb6SJason Gunthorpe  */
1610df91bb6SJason Gunthorpe static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
1620df91bb6SJason Gunthorpe 			     xa_mark_t filter)
1630df91bb6SJason Gunthorpe {
1640df91bb6SJason Gunthorpe 	XA_STATE(xas, xa, *indexp);
1650df91bb6SJason Gunthorpe 	void *entry;
1660df91bb6SJason Gunthorpe 
1670df91bb6SJason Gunthorpe 	rcu_read_lock();
1680df91bb6SJason Gunthorpe 	do {
1690df91bb6SJason Gunthorpe 		entry = xas_find_marked(&xas, ULONG_MAX, filter);
1700df91bb6SJason Gunthorpe 		if (xa_is_zero(entry))
1710df91bb6SJason Gunthorpe 			break;
1720df91bb6SJason Gunthorpe 	} while (xas_retry(&xas, entry));
1730df91bb6SJason Gunthorpe 	rcu_read_unlock();
1740df91bb6SJason Gunthorpe 
1750df91bb6SJason Gunthorpe 	if (entry) {
1760df91bb6SJason Gunthorpe 		*indexp = xas.xa_index;
1770df91bb6SJason Gunthorpe 		if (xa_is_zero(entry))
1780df91bb6SJason Gunthorpe 			return NULL;
1790df91bb6SJason Gunthorpe 		return entry;
1800df91bb6SJason Gunthorpe 	}
1810df91bb6SJason Gunthorpe 	return XA_ERROR(-ENOENT);
1820df91bb6SJason Gunthorpe }
1830df91bb6SJason Gunthorpe #define xan_for_each_marked(xa, index, entry, filter)                          \
1840df91bb6SJason Gunthorpe 	for (index = 0, entry = xan_find_marked(xa, &(index), filter);         \
1850df91bb6SJason Gunthorpe 	     !xa_is_err(entry);                                                \
1860df91bb6SJason Gunthorpe 	     (index)++, entry = xan_find_marked(xa, &(index), filter))
1870df91bb6SJason Gunthorpe 
188324e227eSJason Gunthorpe /* RCU hash table mapping netdevice pointers to struct ib_port_data */
189324e227eSJason Gunthorpe static DEFINE_SPINLOCK(ndev_hash_lock);
190324e227eSJason Gunthorpe static DECLARE_HASHTABLE(ndev_hash, 5);
191324e227eSJason Gunthorpe 
192c2261dd7SJason Gunthorpe static void free_netdevs(struct ib_device *ib_dev);
193d0899892SJason Gunthorpe static void ib_unregister_work(struct work_struct *work);
194d0899892SJason Gunthorpe static void __ib_unregister_device(struct ib_device *device);
1958f408ab6SDaniel Jurgens static int ib_security_change(struct notifier_block *nb, unsigned long event,
1968f408ab6SDaniel Jurgens 			      void *lsm_data);
1978f408ab6SDaniel Jurgens static void ib_policy_change_task(struct work_struct *work);
1988f408ab6SDaniel Jurgens static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
1998f408ab6SDaniel Jurgens 
200923abb9dSGal Pressman static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
201923abb9dSGal Pressman 			   struct va_format *vaf)
202923abb9dSGal Pressman {
203923abb9dSGal Pressman 	if (ibdev && ibdev->dev.parent)
204923abb9dSGal Pressman 		dev_printk_emit(level[1] - '0',
205923abb9dSGal Pressman 				ibdev->dev.parent,
206923abb9dSGal Pressman 				"%s %s %s: %pV",
207923abb9dSGal Pressman 				dev_driver_string(ibdev->dev.parent),
208923abb9dSGal Pressman 				dev_name(ibdev->dev.parent),
209923abb9dSGal Pressman 				dev_name(&ibdev->dev),
210923abb9dSGal Pressman 				vaf);
211923abb9dSGal Pressman 	else if (ibdev)
212923abb9dSGal Pressman 		printk("%s%s: %pV",
213923abb9dSGal Pressman 		       level, dev_name(&ibdev->dev), vaf);
214923abb9dSGal Pressman 	else
215923abb9dSGal Pressman 		printk("%s(NULL ib_device): %pV", level, vaf);
216923abb9dSGal Pressman }
217923abb9dSGal Pressman 
218923abb9dSGal Pressman void ibdev_printk(const char *level, const struct ib_device *ibdev,
219923abb9dSGal Pressman 		  const char *format, ...)
220923abb9dSGal Pressman {
221923abb9dSGal Pressman 	struct va_format vaf;
222923abb9dSGal Pressman 	va_list args;
223923abb9dSGal Pressman 
224923abb9dSGal Pressman 	va_start(args, format);
225923abb9dSGal Pressman 
226923abb9dSGal Pressman 	vaf.fmt = format;
227923abb9dSGal Pressman 	vaf.va = &args;
228923abb9dSGal Pressman 
229923abb9dSGal Pressman 	__ibdev_printk(level, ibdev, &vaf);
230923abb9dSGal Pressman 
231923abb9dSGal Pressman 	va_end(args);
232923abb9dSGal Pressman }
233923abb9dSGal Pressman EXPORT_SYMBOL(ibdev_printk);
234923abb9dSGal Pressman 
235923abb9dSGal Pressman #define define_ibdev_printk_level(func, level)                  \
236923abb9dSGal Pressman void func(const struct ib_device *ibdev, const char *fmt, ...)  \
237923abb9dSGal Pressman {                                                               \
238923abb9dSGal Pressman 	struct va_format vaf;                                   \
239923abb9dSGal Pressman 	va_list args;                                           \
240923abb9dSGal Pressman 								\
241923abb9dSGal Pressman 	va_start(args, fmt);                                    \
242923abb9dSGal Pressman 								\
243923abb9dSGal Pressman 	vaf.fmt = fmt;                                          \
244923abb9dSGal Pressman 	vaf.va = &args;                                         \
245923abb9dSGal Pressman 								\
246923abb9dSGal Pressman 	__ibdev_printk(level, ibdev, &vaf);                     \
247923abb9dSGal Pressman 								\
248923abb9dSGal Pressman 	va_end(args);                                           \
249923abb9dSGal Pressman }                                                               \
250923abb9dSGal Pressman EXPORT_SYMBOL(func);
251923abb9dSGal Pressman 
252923abb9dSGal Pressman define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
253923abb9dSGal Pressman define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
254923abb9dSGal Pressman define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
255923abb9dSGal Pressman define_ibdev_printk_level(ibdev_err, KERN_ERR);
256923abb9dSGal Pressman define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
257923abb9dSGal Pressman define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
258923abb9dSGal Pressman define_ibdev_printk_level(ibdev_info, KERN_INFO);
259923abb9dSGal Pressman 
2608f408ab6SDaniel Jurgens static struct notifier_block ibdev_lsm_nb = {
2618f408ab6SDaniel Jurgens 	.notifier_call = ib_security_change,
2628f408ab6SDaniel Jurgens };
2631da177e4SLinus Torvalds 
264decbc7a6SParav Pandit static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
265decbc7a6SParav Pandit 				 struct net *net);
266decbc7a6SParav Pandit 
267324e227eSJason Gunthorpe /* Pointer to the RCU head at the start of the ib_port_data array */
268324e227eSJason Gunthorpe struct ib_port_data_rcu {
269324e227eSJason Gunthorpe 	struct rcu_head rcu_head;
270324e227eSJason Gunthorpe 	struct ib_port_data pdata[];
271324e227eSJason Gunthorpe };
272324e227eSJason Gunthorpe 
2731da177e4SLinus Torvalds static int ib_device_check_mandatory(struct ib_device *device)
2741da177e4SLinus Torvalds {
2753023a1e9SKamal Heib #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
2761da177e4SLinus Torvalds 	static const struct {
2771da177e4SLinus Torvalds 		size_t offset;
2781da177e4SLinus Torvalds 		char  *name;
2791da177e4SLinus Torvalds 	} mandatory_table[] = {
2801da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(query_device),
2811da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(query_port),
2821da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(query_pkey),
2831da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(alloc_pd),
2841da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(dealloc_pd),
2851da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(create_qp),
2861da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(modify_qp),
2871da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(destroy_qp),
2881da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(post_send),
2891da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(post_recv),
2901da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(create_cq),
2911da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(destroy_cq),
2921da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(poll_cq),
2931da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(req_notify_cq),
2941da177e4SLinus Torvalds 		IB_MANDATORY_FUNC(get_dma_mr),
2957738613eSIra Weiny 		IB_MANDATORY_FUNC(dereg_mr),
2967738613eSIra Weiny 		IB_MANDATORY_FUNC(get_port_immutable)
2971da177e4SLinus Torvalds 	};
2981da177e4SLinus Torvalds 	int i;
2991da177e4SLinus Torvalds 
3006780c4faSGal Pressman 	device->kverbs_provider = true;
3019a6b090cSAhmed S. Darwish 	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
3023023a1e9SKamal Heib 		if (!*(void **) ((void *) &device->ops +
3033023a1e9SKamal Heib 				 mandatory_table[i].offset)) {
3046780c4faSGal Pressman 			device->kverbs_provider = false;
3056780c4faSGal Pressman 			break;
3061da177e4SLinus Torvalds 		}
3071da177e4SLinus Torvalds 	}
3081da177e4SLinus Torvalds 
3091da177e4SLinus Torvalds 	return 0;
3101da177e4SLinus Torvalds }
3111da177e4SLinus Torvalds 
312f8978bd9SLeon Romanovsky /*
31301b67117SParav Pandit  * Caller must perform ib_device_put() to return the device reference count
31401b67117SParav Pandit  * when ib_device_get_by_index() returns valid device pointer.
315f8978bd9SLeon Romanovsky  */
31637eeab55SParav Pandit struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
317f8978bd9SLeon Romanovsky {
318f8978bd9SLeon Romanovsky 	struct ib_device *device;
319f8978bd9SLeon Romanovsky 
320921eab11SJason Gunthorpe 	down_read(&devices_rwsem);
3210df91bb6SJason Gunthorpe 	device = xa_load(&devices, index);
32201b67117SParav Pandit 	if (device) {
32337eeab55SParav Pandit 		if (!rdma_dev_access_netns(device, net)) {
32437eeab55SParav Pandit 			device = NULL;
32537eeab55SParav Pandit 			goto out;
32637eeab55SParav Pandit 		}
32737eeab55SParav Pandit 
328d79af724SJason Gunthorpe 		if (!ib_device_try_get(device))
32901b67117SParav Pandit 			device = NULL;
33001b67117SParav Pandit 	}
33137eeab55SParav Pandit out:
332921eab11SJason Gunthorpe 	up_read(&devices_rwsem);
333f8978bd9SLeon Romanovsky 	return device;
334f8978bd9SLeon Romanovsky }
335f8978bd9SLeon Romanovsky 
336d79af724SJason Gunthorpe /**
337d79af724SJason Gunthorpe  * ib_device_put - Release IB device reference
338d79af724SJason Gunthorpe  * @device: device whose reference to be released
339d79af724SJason Gunthorpe  *
340d79af724SJason Gunthorpe  * ib_device_put() releases reference to the IB device to allow it to be
341d79af724SJason Gunthorpe  * unregistered and eventually free.
342d79af724SJason Gunthorpe  */
34301b67117SParav Pandit void ib_device_put(struct ib_device *device)
34401b67117SParav Pandit {
34501b67117SParav Pandit 	if (refcount_dec_and_test(&device->refcount))
34601b67117SParav Pandit 		complete(&device->unreg_completion);
34701b67117SParav Pandit }
348d79af724SJason Gunthorpe EXPORT_SYMBOL(ib_device_put);
34901b67117SParav Pandit 
3501da177e4SLinus Torvalds static struct ib_device *__ib_device_get_by_name(const char *name)
3511da177e4SLinus Torvalds {
3521da177e4SLinus Torvalds 	struct ib_device *device;
3530df91bb6SJason Gunthorpe 	unsigned long index;
3541da177e4SLinus Torvalds 
3550df91bb6SJason Gunthorpe 	xa_for_each (&devices, index, device)
356896de009SJason Gunthorpe 		if (!strcmp(name, dev_name(&device->dev)))
3571da177e4SLinus Torvalds 			return device;
3581da177e4SLinus Torvalds 
3591da177e4SLinus Torvalds 	return NULL;
3601da177e4SLinus Torvalds }
3611da177e4SLinus Torvalds 
3626cc2c8e5SJason Gunthorpe /**
3636cc2c8e5SJason Gunthorpe  * ib_device_get_by_name - Find an IB device by name
3646cc2c8e5SJason Gunthorpe  * @name: The name to look for
3656cc2c8e5SJason Gunthorpe  * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
3666cc2c8e5SJason Gunthorpe  *
3676cc2c8e5SJason Gunthorpe  * Find and hold an ib_device by its name. The caller must call
3686cc2c8e5SJason Gunthorpe  * ib_device_put() on the returned pointer.
3696cc2c8e5SJason Gunthorpe  */
3706cc2c8e5SJason Gunthorpe struct ib_device *ib_device_get_by_name(const char *name,
3716cc2c8e5SJason Gunthorpe 					enum rdma_driver_id driver_id)
3726cc2c8e5SJason Gunthorpe {
3736cc2c8e5SJason Gunthorpe 	struct ib_device *device;
3746cc2c8e5SJason Gunthorpe 
3756cc2c8e5SJason Gunthorpe 	down_read(&devices_rwsem);
3766cc2c8e5SJason Gunthorpe 	device = __ib_device_get_by_name(name);
3776cc2c8e5SJason Gunthorpe 	if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
3786cc2c8e5SJason Gunthorpe 	    device->driver_id != driver_id)
3796cc2c8e5SJason Gunthorpe 		device = NULL;
3806cc2c8e5SJason Gunthorpe 
3816cc2c8e5SJason Gunthorpe 	if (device) {
3826cc2c8e5SJason Gunthorpe 		if (!ib_device_try_get(device))
3836cc2c8e5SJason Gunthorpe 			device = NULL;
3846cc2c8e5SJason Gunthorpe 	}
3856cc2c8e5SJason Gunthorpe 	up_read(&devices_rwsem);
3866cc2c8e5SJason Gunthorpe 	return device;
3876cc2c8e5SJason Gunthorpe }
3886cc2c8e5SJason Gunthorpe EXPORT_SYMBOL(ib_device_get_by_name);
3896cc2c8e5SJason Gunthorpe 
3904e0f7b90SParav Pandit static int rename_compat_devs(struct ib_device *device)
3914e0f7b90SParav Pandit {
3924e0f7b90SParav Pandit 	struct ib_core_device *cdev;
3934e0f7b90SParav Pandit 	unsigned long index;
3944e0f7b90SParav Pandit 	int ret = 0;
3954e0f7b90SParav Pandit 
3964e0f7b90SParav Pandit 	mutex_lock(&device->compat_devs_mutex);
3974e0f7b90SParav Pandit 	xa_for_each (&device->compat_devs, index, cdev) {
3984e0f7b90SParav Pandit 		ret = device_rename(&cdev->dev, dev_name(&device->dev));
3994e0f7b90SParav Pandit 		if (ret) {
4004e0f7b90SParav Pandit 			dev_warn(&cdev->dev,
4014e0f7b90SParav Pandit 				 "Fail to rename compatdev to new name %s\n",
4024e0f7b90SParav Pandit 				 dev_name(&device->dev));
4034e0f7b90SParav Pandit 			break;
4044e0f7b90SParav Pandit 		}
4054e0f7b90SParav Pandit 	}
4064e0f7b90SParav Pandit 	mutex_unlock(&device->compat_devs_mutex);
4074e0f7b90SParav Pandit 	return ret;
4084e0f7b90SParav Pandit }
4094e0f7b90SParav Pandit 
410d21943ddSLeon Romanovsky int ib_device_rename(struct ib_device *ibdev, const char *name)
411d21943ddSLeon Romanovsky {
412dc1435c0SLeon Romanovsky 	unsigned long index;
413dc1435c0SLeon Romanovsky 	void *client_data;
414e3593b56SJason Gunthorpe 	int ret;
415d21943ddSLeon Romanovsky 
416921eab11SJason Gunthorpe 	down_write(&devices_rwsem);
417e3593b56SJason Gunthorpe 	if (!strcmp(name, dev_name(&ibdev->dev))) {
418dc1435c0SLeon Romanovsky 		up_write(&devices_rwsem);
419dc1435c0SLeon Romanovsky 		return 0;
420e3593b56SJason Gunthorpe 	}
421e3593b56SJason Gunthorpe 
422344684e6SJason Gunthorpe 	if (__ib_device_get_by_name(name)) {
423dc1435c0SLeon Romanovsky 		up_write(&devices_rwsem);
424dc1435c0SLeon Romanovsky 		return -EEXIST;
425d21943ddSLeon Romanovsky 	}
426d21943ddSLeon Romanovsky 
427d21943ddSLeon Romanovsky 	ret = device_rename(&ibdev->dev, name);
428dc1435c0SLeon Romanovsky 	if (ret) {
429921eab11SJason Gunthorpe 		up_write(&devices_rwsem);
430d21943ddSLeon Romanovsky 		return ret;
431d21943ddSLeon Romanovsky 	}
432d21943ddSLeon Romanovsky 
433dc1435c0SLeon Romanovsky 	strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
434dc1435c0SLeon Romanovsky 	ret = rename_compat_devs(ibdev);
435dc1435c0SLeon Romanovsky 
436dc1435c0SLeon Romanovsky 	downgrade_write(&devices_rwsem);
437dc1435c0SLeon Romanovsky 	down_read(&ibdev->client_data_rwsem);
438dc1435c0SLeon Romanovsky 	xan_for_each_marked(&ibdev->client_data, index, client_data,
439dc1435c0SLeon Romanovsky 			    CLIENT_DATA_REGISTERED) {
440dc1435c0SLeon Romanovsky 		struct ib_client *client = xa_load(&clients, index);
441dc1435c0SLeon Romanovsky 
442dc1435c0SLeon Romanovsky 		if (!client || !client->rename)
443dc1435c0SLeon Romanovsky 			continue;
444dc1435c0SLeon Romanovsky 
445dc1435c0SLeon Romanovsky 		client->rename(ibdev, client_data);
446dc1435c0SLeon Romanovsky 	}
447dc1435c0SLeon Romanovsky 	up_read(&ibdev->client_data_rwsem);
448dc1435c0SLeon Romanovsky 	up_read(&devices_rwsem);
449dc1435c0SLeon Romanovsky 	return 0;
450dc1435c0SLeon Romanovsky }
451dc1435c0SLeon Romanovsky 
452e349f858SJason Gunthorpe static int alloc_name(struct ib_device *ibdev, const char *name)
4531da177e4SLinus Torvalds {
4541da177e4SLinus Torvalds 	struct ib_device *device;
4550df91bb6SJason Gunthorpe 	unsigned long index;
4563b88afd3SJason Gunthorpe 	struct ida inuse;
4573b88afd3SJason Gunthorpe 	int rc;
4581da177e4SLinus Torvalds 	int i;
4591da177e4SLinus Torvalds 
460921eab11SJason Gunthorpe 	lockdep_assert_held_exclusive(&devices_rwsem);
4613b88afd3SJason Gunthorpe 	ida_init(&inuse);
4620df91bb6SJason Gunthorpe 	xa_for_each (&devices, index, device) {
463e349f858SJason Gunthorpe 		char buf[IB_DEVICE_NAME_MAX];
464e349f858SJason Gunthorpe 
465896de009SJason Gunthorpe 		if (sscanf(dev_name(&device->dev), name, &i) != 1)
4661da177e4SLinus Torvalds 			continue;
4673b88afd3SJason Gunthorpe 		if (i < 0 || i >= INT_MAX)
4681da177e4SLinus Torvalds 			continue;
4691da177e4SLinus Torvalds 		snprintf(buf, sizeof buf, name, i);
4703b88afd3SJason Gunthorpe 		if (strcmp(buf, dev_name(&device->dev)) != 0)
4713b88afd3SJason Gunthorpe 			continue;
4723b88afd3SJason Gunthorpe 
4733b88afd3SJason Gunthorpe 		rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
4743b88afd3SJason Gunthorpe 		if (rc < 0)
4753b88afd3SJason Gunthorpe 			goto out;
4761da177e4SLinus Torvalds 	}
4771da177e4SLinus Torvalds 
4783b88afd3SJason Gunthorpe 	rc = ida_alloc(&inuse, GFP_KERNEL);
4793b88afd3SJason Gunthorpe 	if (rc < 0)
4803b88afd3SJason Gunthorpe 		goto out;
4811da177e4SLinus Torvalds 
4823b88afd3SJason Gunthorpe 	rc = dev_set_name(&ibdev->dev, name, rc);
4833b88afd3SJason Gunthorpe out:
4843b88afd3SJason Gunthorpe 	ida_destroy(&inuse);
4853b88afd3SJason Gunthorpe 	return rc;
4861da177e4SLinus Torvalds }
4871da177e4SLinus Torvalds 
48855aeed06SJason Gunthorpe static void ib_device_release(struct device *device)
48955aeed06SJason Gunthorpe {
49055aeed06SJason Gunthorpe 	struct ib_device *dev = container_of(device, struct ib_device, dev);
49155aeed06SJason Gunthorpe 
492c2261dd7SJason Gunthorpe 	free_netdevs(dev);
493652432f3SJason Gunthorpe 	WARN_ON(refcount_read(&dev->refcount));
49446bdf370SKamal Heib 	if (dev->port_data) {
49503db3a2dSMatan Barak 		ib_cache_release_one(dev);
496b34b269aSJason Gunthorpe 		ib_security_release_port_pkey_list(dev);
497324e227eSJason Gunthorpe 		kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
498324e227eSJason Gunthorpe 				       pdata[0]),
499324e227eSJason Gunthorpe 			  rcu_head);
50046bdf370SKamal Heib 	}
50146bdf370SKamal Heib 	xa_destroy(&dev->compat_devs);
50246bdf370SKamal Heib 	xa_destroy(&dev->client_data);
503324e227eSJason Gunthorpe 	kfree_rcu(dev, rcu_head);
50455aeed06SJason Gunthorpe }
50555aeed06SJason Gunthorpe 
50655aeed06SJason Gunthorpe static int ib_device_uevent(struct device *device,
50755aeed06SJason Gunthorpe 			    struct kobj_uevent_env *env)
50855aeed06SJason Gunthorpe {
509896de009SJason Gunthorpe 	if (add_uevent_var(env, "NAME=%s", dev_name(device)))
51055aeed06SJason Gunthorpe 		return -ENOMEM;
51155aeed06SJason Gunthorpe 
51255aeed06SJason Gunthorpe 	/*
51355aeed06SJason Gunthorpe 	 * It would be nice to pass the node GUID with the event...
51455aeed06SJason Gunthorpe 	 */
51555aeed06SJason Gunthorpe 
51655aeed06SJason Gunthorpe 	return 0;
51755aeed06SJason Gunthorpe }
51855aeed06SJason Gunthorpe 
51962dfa795SParav Pandit static const void *net_namespace(struct device *d)
52062dfa795SParav Pandit {
5214e0f7b90SParav Pandit 	struct ib_core_device *coredev =
5224e0f7b90SParav Pandit 			container_of(d, struct ib_core_device, dev);
5234e0f7b90SParav Pandit 
5244e0f7b90SParav Pandit 	return read_pnet(&coredev->rdma_net);
52562dfa795SParav Pandit }
52662dfa795SParav Pandit 
52755aeed06SJason Gunthorpe static struct class ib_class = {
52855aeed06SJason Gunthorpe 	.name    = "infiniband",
52955aeed06SJason Gunthorpe 	.dev_release = ib_device_release,
53055aeed06SJason Gunthorpe 	.dev_uevent = ib_device_uevent,
53162dfa795SParav Pandit 	.ns_type = &net_ns_type_operations,
53262dfa795SParav Pandit 	.namespace = net_namespace,
53355aeed06SJason Gunthorpe };
53455aeed06SJason Gunthorpe 
535cebe556bSParav Pandit static void rdma_init_coredev(struct ib_core_device *coredev,
5364e0f7b90SParav Pandit 			      struct ib_device *dev, struct net *net)
537cebe556bSParav Pandit {
538cebe556bSParav Pandit 	/* This BUILD_BUG_ON is intended to catch layout change
539cebe556bSParav Pandit 	 * of union of ib_core_device and device.
540cebe556bSParav Pandit 	 * dev must be the first element as ib_core and providers
541cebe556bSParav Pandit 	 * driver uses it. Adding anything in ib_core_device before
542cebe556bSParav Pandit 	 * device will break this assumption.
543cebe556bSParav Pandit 	 */
544cebe556bSParav Pandit 	BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
545cebe556bSParav Pandit 		     offsetof(struct ib_device, dev));
546cebe556bSParav Pandit 
547cebe556bSParav Pandit 	coredev->dev.class = &ib_class;
548cebe556bSParav Pandit 	coredev->dev.groups = dev->groups;
549cebe556bSParav Pandit 	device_initialize(&coredev->dev);
550cebe556bSParav Pandit 	coredev->owner = dev;
551cebe556bSParav Pandit 	INIT_LIST_HEAD(&coredev->port_list);
5524e0f7b90SParav Pandit 	write_pnet(&coredev->rdma_net, net);
553cebe556bSParav Pandit }
554cebe556bSParav Pandit 
5551da177e4SLinus Torvalds /**
556459cc69fSLeon Romanovsky  * _ib_alloc_device - allocate an IB device struct
5571da177e4SLinus Torvalds  * @size:size of structure to allocate
5581da177e4SLinus Torvalds  *
5591da177e4SLinus Torvalds  * Low-level drivers should use ib_alloc_device() to allocate &struct
5601da177e4SLinus Torvalds  * ib_device.  @size is the size of the structure to be allocated,
5611da177e4SLinus Torvalds  * including any private data used by the low-level driver.
5621da177e4SLinus Torvalds  * ib_dealloc_device() must be used to free structures allocated with
5631da177e4SLinus Torvalds  * ib_alloc_device().
5641da177e4SLinus Torvalds  */
565459cc69fSLeon Romanovsky struct ib_device *_ib_alloc_device(size_t size)
5661da177e4SLinus Torvalds {
56755aeed06SJason Gunthorpe 	struct ib_device *device;
5681da177e4SLinus Torvalds 
56955aeed06SJason Gunthorpe 	if (WARN_ON(size < sizeof(struct ib_device)))
57055aeed06SJason Gunthorpe 		return NULL;
57155aeed06SJason Gunthorpe 
57255aeed06SJason Gunthorpe 	device = kzalloc(size, GFP_KERNEL);
57355aeed06SJason Gunthorpe 	if (!device)
57455aeed06SJason Gunthorpe 		return NULL;
57555aeed06SJason Gunthorpe 
57641eda65cSLeon Romanovsky 	if (rdma_restrack_init(device)) {
57741eda65cSLeon Romanovsky 		kfree(device);
57841eda65cSLeon Romanovsky 		return NULL;
57941eda65cSLeon Romanovsky 	}
58002d8883fSLeon Romanovsky 
5815f8f5499SParav Pandit 	device->groups[0] = &ib_dev_attr_group;
5824e0f7b90SParav Pandit 	rdma_init_coredev(&device->coredev, device, &init_net);
58355aeed06SJason Gunthorpe 
58455aeed06SJason Gunthorpe 	INIT_LIST_HEAD(&device->event_handler_list);
58555aeed06SJason Gunthorpe 	spin_lock_init(&device->event_handler_lock);
586d0899892SJason Gunthorpe 	mutex_init(&device->unregistration_lock);
5870df91bb6SJason Gunthorpe 	/*
5880df91bb6SJason Gunthorpe 	 * client_data needs to be alloc because we don't want our mark to be
5890df91bb6SJason Gunthorpe 	 * destroyed if the user stores NULL in the client data.
5900df91bb6SJason Gunthorpe 	 */
5910df91bb6SJason Gunthorpe 	xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
592921eab11SJason Gunthorpe 	init_rwsem(&device->client_data_rwsem);
5934e0f7b90SParav Pandit 	xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
5944e0f7b90SParav Pandit 	mutex_init(&device->compat_devs_mutex);
59501b67117SParav Pandit 	init_completion(&device->unreg_completion);
596d0899892SJason Gunthorpe 	INIT_WORK(&device->unregistration_work, ib_unregister_work);
59755aeed06SJason Gunthorpe 
59855aeed06SJason Gunthorpe 	return device;
5991da177e4SLinus Torvalds }
600459cc69fSLeon Romanovsky EXPORT_SYMBOL(_ib_alloc_device);
6011da177e4SLinus Torvalds 
6021da177e4SLinus Torvalds /**
6031da177e4SLinus Torvalds  * ib_dealloc_device - free an IB device struct
6041da177e4SLinus Torvalds  * @device:structure to free
6051da177e4SLinus Torvalds  *
6061da177e4SLinus Torvalds  * Free a structure allocated with ib_alloc_device().
6071da177e4SLinus Torvalds  */
6081da177e4SLinus Torvalds void ib_dealloc_device(struct ib_device *device)
6091da177e4SLinus Torvalds {
610d0899892SJason Gunthorpe 	if (device->ops.dealloc_driver)
611d0899892SJason Gunthorpe 		device->ops.dealloc_driver(device);
612d0899892SJason Gunthorpe 
613d0899892SJason Gunthorpe 	/*
614d0899892SJason Gunthorpe 	 * ib_unregister_driver() requires all devices to remain in the xarray
615d0899892SJason Gunthorpe 	 * while their ops are callable. The last op we call is dealloc_driver
616d0899892SJason Gunthorpe 	 * above.  This is needed to create a fence on op callbacks prior to
617d0899892SJason Gunthorpe 	 * allowing the driver module to unload.
618d0899892SJason Gunthorpe 	 */
619d0899892SJason Gunthorpe 	down_write(&devices_rwsem);
620d0899892SJason Gunthorpe 	if (xa_load(&devices, device->index) == device)
621d0899892SJason Gunthorpe 		xa_erase(&devices, device->index);
622d0899892SJason Gunthorpe 	up_write(&devices_rwsem);
623d0899892SJason Gunthorpe 
624c2261dd7SJason Gunthorpe 	/* Expedite releasing netdev references */
625c2261dd7SJason Gunthorpe 	free_netdevs(device);
626c2261dd7SJason Gunthorpe 
6274e0f7b90SParav Pandit 	WARN_ON(!xa_empty(&device->compat_devs));
6280df91bb6SJason Gunthorpe 	WARN_ON(!xa_empty(&device->client_data));
629652432f3SJason Gunthorpe 	WARN_ON(refcount_read(&device->refcount));
6300ad699c0SLeon Romanovsky 	rdma_restrack_clean(device);
631e155755eSParav Pandit 	/* Balances with device_initialize */
632924b8900SLeon Romanovsky 	put_device(&device->dev);
6331da177e4SLinus Torvalds }
6341da177e4SLinus Torvalds EXPORT_SYMBOL(ib_dealloc_device);
6351da177e4SLinus Torvalds 
636921eab11SJason Gunthorpe /*
637921eab11SJason Gunthorpe  * add_client_context() and remove_client_context() must be safe against
638921eab11SJason Gunthorpe  * parallel calls on the same device - registration/unregistration of both the
639921eab11SJason Gunthorpe  * device and client can be occurring in parallel.
640921eab11SJason Gunthorpe  *
641921eab11SJason Gunthorpe  * The routines need to be a fence, any caller must not return until the add
642921eab11SJason Gunthorpe  * or remove is fully completed.
643921eab11SJason Gunthorpe  */
644921eab11SJason Gunthorpe static int add_client_context(struct ib_device *device,
645921eab11SJason Gunthorpe 			      struct ib_client *client)
6461da177e4SLinus Torvalds {
647921eab11SJason Gunthorpe 	int ret = 0;
6481da177e4SLinus Torvalds 
6496780c4faSGal Pressman 	if (!device->kverbs_provider && !client->no_kverbs_req)
650921eab11SJason Gunthorpe 		return 0;
6516780c4faSGal Pressman 
652921eab11SJason Gunthorpe 	down_write(&device->client_data_rwsem);
653921eab11SJason Gunthorpe 	/*
654921eab11SJason Gunthorpe 	 * Another caller to add_client_context got here first and has already
655921eab11SJason Gunthorpe 	 * completely initialized context.
656921eab11SJason Gunthorpe 	 */
657921eab11SJason Gunthorpe 	if (xa_get_mark(&device->client_data, client->client_id,
658921eab11SJason Gunthorpe 		    CLIENT_DATA_REGISTERED))
659921eab11SJason Gunthorpe 		goto out;
660921eab11SJason Gunthorpe 
661921eab11SJason Gunthorpe 	ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
662921eab11SJason Gunthorpe 			      GFP_KERNEL));
663921eab11SJason Gunthorpe 	if (ret)
664921eab11SJason Gunthorpe 		goto out;
665921eab11SJason Gunthorpe 	downgrade_write(&device->client_data_rwsem);
666921eab11SJason Gunthorpe 	if (client->add)
667921eab11SJason Gunthorpe 		client->add(device);
668921eab11SJason Gunthorpe 
669921eab11SJason Gunthorpe 	/* Readers shall not see a client until add has been completed */
6700df91bb6SJason Gunthorpe 	xa_set_mark(&device->client_data, client->client_id,
6710df91bb6SJason Gunthorpe 		    CLIENT_DATA_REGISTERED);
672921eab11SJason Gunthorpe 	up_read(&device->client_data_rwsem);
673921eab11SJason Gunthorpe 	return 0;
6741da177e4SLinus Torvalds 
675921eab11SJason Gunthorpe out:
676921eab11SJason Gunthorpe 	up_write(&device->client_data_rwsem);
677921eab11SJason Gunthorpe 	return ret;
678921eab11SJason Gunthorpe }
679921eab11SJason Gunthorpe 
680921eab11SJason Gunthorpe static void remove_client_context(struct ib_device *device,
681921eab11SJason Gunthorpe 				  unsigned int client_id)
682921eab11SJason Gunthorpe {
683921eab11SJason Gunthorpe 	struct ib_client *client;
684921eab11SJason Gunthorpe 	void *client_data;
685921eab11SJason Gunthorpe 
686921eab11SJason Gunthorpe 	down_write(&device->client_data_rwsem);
687921eab11SJason Gunthorpe 	if (!xa_get_mark(&device->client_data, client_id,
688921eab11SJason Gunthorpe 			 CLIENT_DATA_REGISTERED)) {
689921eab11SJason Gunthorpe 		up_write(&device->client_data_rwsem);
690921eab11SJason Gunthorpe 		return;
691921eab11SJason Gunthorpe 	}
692921eab11SJason Gunthorpe 	client_data = xa_load(&device->client_data, client_id);
693921eab11SJason Gunthorpe 	xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
694921eab11SJason Gunthorpe 	client = xa_load(&clients, client_id);
695921eab11SJason Gunthorpe 	downgrade_write(&device->client_data_rwsem);
696921eab11SJason Gunthorpe 
697921eab11SJason Gunthorpe 	/*
698921eab11SJason Gunthorpe 	 * Notice we cannot be holding any exclusive locks when calling the
699921eab11SJason Gunthorpe 	 * remove callback as the remove callback can recurse back into any
700921eab11SJason Gunthorpe 	 * public functions in this module and thus try for any locks those
701921eab11SJason Gunthorpe 	 * functions take.
702921eab11SJason Gunthorpe 	 *
703921eab11SJason Gunthorpe 	 * For this reason clients and drivers should not call the
704921eab11SJason Gunthorpe 	 * unregistration functions will holdling any locks.
705921eab11SJason Gunthorpe 	 *
706921eab11SJason Gunthorpe 	 * It tempting to drop the client_data_rwsem too, but this is required
707921eab11SJason Gunthorpe 	 * to ensure that unregister_client does not return until all clients
708921eab11SJason Gunthorpe 	 * are completely unregistered, which is required to avoid module
709921eab11SJason Gunthorpe 	 * unloading races.
710921eab11SJason Gunthorpe 	 */
711921eab11SJason Gunthorpe 	if (client->remove)
712921eab11SJason Gunthorpe 		client->remove(device, client_data);
713921eab11SJason Gunthorpe 
714921eab11SJason Gunthorpe 	xa_erase(&device->client_data, client_id);
715921eab11SJason Gunthorpe 	up_read(&device->client_data_rwsem);
7161da177e4SLinus Torvalds }
7171da177e4SLinus Torvalds 
718c2261dd7SJason Gunthorpe static int alloc_port_data(struct ib_device *device)
7195eb620c8SYosef Etigin {
720324e227eSJason Gunthorpe 	struct ib_port_data_rcu *pdata_rcu;
721ea1075edSJason Gunthorpe 	unsigned int port;
722c2261dd7SJason Gunthorpe 
723c2261dd7SJason Gunthorpe 	if (device->port_data)
724c2261dd7SJason Gunthorpe 		return 0;
725c2261dd7SJason Gunthorpe 
726c2261dd7SJason Gunthorpe 	/* This can only be called once the physical port range is defined */
727c2261dd7SJason Gunthorpe 	if (WARN_ON(!device->phys_port_cnt))
728c2261dd7SJason Gunthorpe 		return -EINVAL;
7295eb620c8SYosef Etigin 
7308ceb1357SJason Gunthorpe 	/*
7318ceb1357SJason Gunthorpe 	 * device->port_data is indexed directly by the port number to make
7327738613eSIra Weiny 	 * access to this data as efficient as possible.
7337738613eSIra Weiny 	 *
7348ceb1357SJason Gunthorpe 	 * Therefore port_data is declared as a 1 based array with potential
7358ceb1357SJason Gunthorpe 	 * empty slots at the beginning.
7367738613eSIra Weiny 	 */
737324e227eSJason Gunthorpe 	pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
738324e227eSJason Gunthorpe 					rdma_end_port(device) + 1),
739324e227eSJason Gunthorpe 			    GFP_KERNEL);
740324e227eSJason Gunthorpe 	if (!pdata_rcu)
74155aeed06SJason Gunthorpe 		return -ENOMEM;
742324e227eSJason Gunthorpe 	/*
743324e227eSJason Gunthorpe 	 * The rcu_head is put in front of the port data array and the stored
744324e227eSJason Gunthorpe 	 * pointer is adjusted since we never need to see that member until
745324e227eSJason Gunthorpe 	 * kfree_rcu.
746324e227eSJason Gunthorpe 	 */
747324e227eSJason Gunthorpe 	device->port_data = pdata_rcu->pdata;
7485eb620c8SYosef Etigin 
749ea1075edSJason Gunthorpe 	rdma_for_each_port (device, port) {
7508ceb1357SJason Gunthorpe 		struct ib_port_data *pdata = &device->port_data[port];
7518ceb1357SJason Gunthorpe 
752324e227eSJason Gunthorpe 		pdata->ib_dev = device;
7538ceb1357SJason Gunthorpe 		spin_lock_init(&pdata->pkey_list_lock);
7548ceb1357SJason Gunthorpe 		INIT_LIST_HEAD(&pdata->pkey_list);
755c2261dd7SJason Gunthorpe 		spin_lock_init(&pdata->netdev_lock);
756324e227eSJason Gunthorpe 		INIT_HLIST_NODE(&pdata->ndev_hash_link);
757c2261dd7SJason Gunthorpe 	}
758c2261dd7SJason Gunthorpe 	return 0;
759c2261dd7SJason Gunthorpe }
760c2261dd7SJason Gunthorpe 
761c2261dd7SJason Gunthorpe static int verify_immutable(const struct ib_device *dev, u8 port)
762c2261dd7SJason Gunthorpe {
763c2261dd7SJason Gunthorpe 	return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
764c2261dd7SJason Gunthorpe 			    rdma_max_mad_size(dev, port) != 0);
765c2261dd7SJason Gunthorpe }
766c2261dd7SJason Gunthorpe 
767c2261dd7SJason Gunthorpe static int setup_port_data(struct ib_device *device)
768c2261dd7SJason Gunthorpe {
769c2261dd7SJason Gunthorpe 	unsigned int port;
770c2261dd7SJason Gunthorpe 	int ret;
771c2261dd7SJason Gunthorpe 
772c2261dd7SJason Gunthorpe 	ret = alloc_port_data(device);
773c2261dd7SJason Gunthorpe 	if (ret)
774c2261dd7SJason Gunthorpe 		return ret;
775c2261dd7SJason Gunthorpe 
776c2261dd7SJason Gunthorpe 	rdma_for_each_port (device, port) {
777c2261dd7SJason Gunthorpe 		struct ib_port_data *pdata = &device->port_data[port];
7788ceb1357SJason Gunthorpe 
7798ceb1357SJason Gunthorpe 		ret = device->ops.get_port_immutable(device, port,
7808ceb1357SJason Gunthorpe 						     &pdata->immutable);
7815eb620c8SYosef Etigin 		if (ret)
7825eb620c8SYosef Etigin 			return ret;
78355aeed06SJason Gunthorpe 
78455aeed06SJason Gunthorpe 		if (verify_immutable(device, port))
78555aeed06SJason Gunthorpe 			return -EINVAL;
78655aeed06SJason Gunthorpe 	}
78755aeed06SJason Gunthorpe 	return 0;
7885eb620c8SYosef Etigin }
7895eb620c8SYosef Etigin 
7909abb0d1bSLeon Romanovsky void ib_get_device_fw_str(struct ib_device *dev, char *str)
7915fa76c20SIra Weiny {
7923023a1e9SKamal Heib 	if (dev->ops.get_dev_fw_str)
7933023a1e9SKamal Heib 		dev->ops.get_dev_fw_str(dev, str);
7945fa76c20SIra Weiny 	else
7955fa76c20SIra Weiny 		str[0] = '\0';
7965fa76c20SIra Weiny }
7975fa76c20SIra Weiny EXPORT_SYMBOL(ib_get_device_fw_str);
7985fa76c20SIra Weiny 
7998f408ab6SDaniel Jurgens static void ib_policy_change_task(struct work_struct *work)
8008f408ab6SDaniel Jurgens {
8018f408ab6SDaniel Jurgens 	struct ib_device *dev;
8020df91bb6SJason Gunthorpe 	unsigned long index;
8038f408ab6SDaniel Jurgens 
804921eab11SJason Gunthorpe 	down_read(&devices_rwsem);
8050df91bb6SJason Gunthorpe 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
806ea1075edSJason Gunthorpe 		unsigned int i;
8078f408ab6SDaniel Jurgens 
808ea1075edSJason Gunthorpe 		rdma_for_each_port (dev, i) {
8098f408ab6SDaniel Jurgens 			u64 sp;
8108f408ab6SDaniel Jurgens 			int ret = ib_get_cached_subnet_prefix(dev,
8118f408ab6SDaniel Jurgens 							      i,
8128f408ab6SDaniel Jurgens 							      &sp);
8138f408ab6SDaniel Jurgens 
8148f408ab6SDaniel Jurgens 			WARN_ONCE(ret,
8158f408ab6SDaniel Jurgens 				  "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
8168f408ab6SDaniel Jurgens 				  ret);
817a750cfdeSDaniel Jurgens 			if (!ret)
8188f408ab6SDaniel Jurgens 				ib_security_cache_change(dev, i, sp);
8198f408ab6SDaniel Jurgens 		}
8208f408ab6SDaniel Jurgens 	}
821921eab11SJason Gunthorpe 	up_read(&devices_rwsem);
8228f408ab6SDaniel Jurgens }
8238f408ab6SDaniel Jurgens 
8248f408ab6SDaniel Jurgens static int ib_security_change(struct notifier_block *nb, unsigned long event,
8258f408ab6SDaniel Jurgens 			      void *lsm_data)
8268f408ab6SDaniel Jurgens {
8278f408ab6SDaniel Jurgens 	if (event != LSM_POLICY_CHANGE)
8288f408ab6SDaniel Jurgens 		return NOTIFY_DONE;
8298f408ab6SDaniel Jurgens 
8308f408ab6SDaniel Jurgens 	schedule_work(&ib_policy_change_work);
831c66f6741SDaniel Jurgens 	ib_mad_agent_security_change();
8328f408ab6SDaniel Jurgens 
8338f408ab6SDaniel Jurgens 	return NOTIFY_OK;
8348f408ab6SDaniel Jurgens }
8358f408ab6SDaniel Jurgens 
8364e0f7b90SParav Pandit static void compatdev_release(struct device *dev)
8374e0f7b90SParav Pandit {
8384e0f7b90SParav Pandit 	struct ib_core_device *cdev =
8394e0f7b90SParav Pandit 		container_of(dev, struct ib_core_device, dev);
8404e0f7b90SParav Pandit 
8414e0f7b90SParav Pandit 	kfree(cdev);
8424e0f7b90SParav Pandit }
8434e0f7b90SParav Pandit 
8444e0f7b90SParav Pandit static int add_one_compat_dev(struct ib_device *device,
8454e0f7b90SParav Pandit 			      struct rdma_dev_net *rnet)
8464e0f7b90SParav Pandit {
8474e0f7b90SParav Pandit 	struct ib_core_device *cdev;
8484e0f7b90SParav Pandit 	int ret;
8494e0f7b90SParav Pandit 
8502b34c558SParav Pandit 	lockdep_assert_held(&rdma_nets_rwsem);
851a56bc45bSParav Pandit 	if (!ib_devices_shared_netns)
852a56bc45bSParav Pandit 		return 0;
853a56bc45bSParav Pandit 
8544e0f7b90SParav Pandit 	/*
8554e0f7b90SParav Pandit 	 * Create and add compat device in all namespaces other than where it
8564e0f7b90SParav Pandit 	 * is currently bound to.
8574e0f7b90SParav Pandit 	 */
8584e0f7b90SParav Pandit 	if (net_eq(read_pnet(&rnet->net),
8594e0f7b90SParav Pandit 		   read_pnet(&device->coredev.rdma_net)))
8604e0f7b90SParav Pandit 		return 0;
8614e0f7b90SParav Pandit 
8624e0f7b90SParav Pandit 	/*
8634e0f7b90SParav Pandit 	 * The first of init_net() or ib_register_device() to take the
8644e0f7b90SParav Pandit 	 * compat_devs_mutex wins and gets to add the device. Others will wait
8654e0f7b90SParav Pandit 	 * for completion here.
8664e0f7b90SParav Pandit 	 */
8674e0f7b90SParav Pandit 	mutex_lock(&device->compat_devs_mutex);
8684e0f7b90SParav Pandit 	cdev = xa_load(&device->compat_devs, rnet->id);
8694e0f7b90SParav Pandit 	if (cdev) {
8704e0f7b90SParav Pandit 		ret = 0;
8714e0f7b90SParav Pandit 		goto done;
8724e0f7b90SParav Pandit 	}
8734e0f7b90SParav Pandit 	ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
8744e0f7b90SParav Pandit 	if (ret)
8754e0f7b90SParav Pandit 		goto done;
8764e0f7b90SParav Pandit 
8774e0f7b90SParav Pandit 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
8784e0f7b90SParav Pandit 	if (!cdev) {
8794e0f7b90SParav Pandit 		ret = -ENOMEM;
8804e0f7b90SParav Pandit 		goto cdev_err;
8814e0f7b90SParav Pandit 	}
8824e0f7b90SParav Pandit 
8834e0f7b90SParav Pandit 	cdev->dev.parent = device->dev.parent;
8844e0f7b90SParav Pandit 	rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
8854e0f7b90SParav Pandit 	cdev->dev.release = compatdev_release;
8864e0f7b90SParav Pandit 	dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
8874e0f7b90SParav Pandit 
8884e0f7b90SParav Pandit 	ret = device_add(&cdev->dev);
8894e0f7b90SParav Pandit 	if (ret)
8904e0f7b90SParav Pandit 		goto add_err;
891eb15c78bSParav Pandit 	ret = ib_setup_port_attrs(cdev);
8925417783eSParav Pandit 	if (ret)
8935417783eSParav Pandit 		goto port_err;
8944e0f7b90SParav Pandit 
8954e0f7b90SParav Pandit 	ret = xa_err(xa_store(&device->compat_devs, rnet->id,
8964e0f7b90SParav Pandit 			      cdev, GFP_KERNEL));
8974e0f7b90SParav Pandit 	if (ret)
8984e0f7b90SParav Pandit 		goto insert_err;
8994e0f7b90SParav Pandit 
9004e0f7b90SParav Pandit 	mutex_unlock(&device->compat_devs_mutex);
9014e0f7b90SParav Pandit 	return 0;
9024e0f7b90SParav Pandit 
9034e0f7b90SParav Pandit insert_err:
9045417783eSParav Pandit 	ib_free_port_attrs(cdev);
9055417783eSParav Pandit port_err:
9064e0f7b90SParav Pandit 	device_del(&cdev->dev);
9074e0f7b90SParav Pandit add_err:
9084e0f7b90SParav Pandit 	put_device(&cdev->dev);
9094e0f7b90SParav Pandit cdev_err:
9104e0f7b90SParav Pandit 	xa_release(&device->compat_devs, rnet->id);
9114e0f7b90SParav Pandit done:
9124e0f7b90SParav Pandit 	mutex_unlock(&device->compat_devs_mutex);
9134e0f7b90SParav Pandit 	return ret;
9144e0f7b90SParav Pandit }
9154e0f7b90SParav Pandit 
9164e0f7b90SParav Pandit static void remove_one_compat_dev(struct ib_device *device, u32 id)
9174e0f7b90SParav Pandit {
9184e0f7b90SParav Pandit 	struct ib_core_device *cdev;
9194e0f7b90SParav Pandit 
9204e0f7b90SParav Pandit 	mutex_lock(&device->compat_devs_mutex);
9214e0f7b90SParav Pandit 	cdev = xa_erase(&device->compat_devs, id);
9224e0f7b90SParav Pandit 	mutex_unlock(&device->compat_devs_mutex);
9234e0f7b90SParav Pandit 	if (cdev) {
9245417783eSParav Pandit 		ib_free_port_attrs(cdev);
9254e0f7b90SParav Pandit 		device_del(&cdev->dev);
9264e0f7b90SParav Pandit 		put_device(&cdev->dev);
9274e0f7b90SParav Pandit 	}
9284e0f7b90SParav Pandit }
9294e0f7b90SParav Pandit 
9304e0f7b90SParav Pandit static void remove_compat_devs(struct ib_device *device)
9314e0f7b90SParav Pandit {
9324e0f7b90SParav Pandit 	struct ib_core_device *cdev;
9334e0f7b90SParav Pandit 	unsigned long index;
9344e0f7b90SParav Pandit 
9354e0f7b90SParav Pandit 	xa_for_each (&device->compat_devs, index, cdev)
9364e0f7b90SParav Pandit 		remove_one_compat_dev(device, index);
9374e0f7b90SParav Pandit }
9384e0f7b90SParav Pandit 
9394e0f7b90SParav Pandit static int add_compat_devs(struct ib_device *device)
9404e0f7b90SParav Pandit {
9414e0f7b90SParav Pandit 	struct rdma_dev_net *rnet;
9424e0f7b90SParav Pandit 	unsigned long index;
9434e0f7b90SParav Pandit 	int ret = 0;
9444e0f7b90SParav Pandit 
945decbc7a6SParav Pandit 	lockdep_assert_held(&devices_rwsem);
946decbc7a6SParav Pandit 
9474e0f7b90SParav Pandit 	down_read(&rdma_nets_rwsem);
9484e0f7b90SParav Pandit 	xa_for_each (&rdma_nets, index, rnet) {
9494e0f7b90SParav Pandit 		ret = add_one_compat_dev(device, rnet);
9504e0f7b90SParav Pandit 		if (ret)
9514e0f7b90SParav Pandit 			break;
9524e0f7b90SParav Pandit 	}
9534e0f7b90SParav Pandit 	up_read(&rdma_nets_rwsem);
9544e0f7b90SParav Pandit 	return ret;
9554e0f7b90SParav Pandit }
9564e0f7b90SParav Pandit 
9572b34c558SParav Pandit static void remove_all_compat_devs(void)
9582b34c558SParav Pandit {
9592b34c558SParav Pandit 	struct ib_compat_device *cdev;
9602b34c558SParav Pandit 	struct ib_device *dev;
9612b34c558SParav Pandit 	unsigned long index;
9622b34c558SParav Pandit 
9632b34c558SParav Pandit 	down_read(&devices_rwsem);
9642b34c558SParav Pandit 	xa_for_each (&devices, index, dev) {
9652b34c558SParav Pandit 		unsigned long c_index = 0;
9662b34c558SParav Pandit 
9672b34c558SParav Pandit 		/* Hold nets_rwsem so that any other thread modifying this
9682b34c558SParav Pandit 		 * system param can sync with this thread.
9692b34c558SParav Pandit 		 */
9702b34c558SParav Pandit 		down_read(&rdma_nets_rwsem);
9712b34c558SParav Pandit 		xa_for_each (&dev->compat_devs, c_index, cdev)
9722b34c558SParav Pandit 			remove_one_compat_dev(dev, c_index);
9732b34c558SParav Pandit 		up_read(&rdma_nets_rwsem);
9742b34c558SParav Pandit 	}
9752b34c558SParav Pandit 	up_read(&devices_rwsem);
9762b34c558SParav Pandit }
9772b34c558SParav Pandit 
9782b34c558SParav Pandit static int add_all_compat_devs(void)
9792b34c558SParav Pandit {
9802b34c558SParav Pandit 	struct rdma_dev_net *rnet;
9812b34c558SParav Pandit 	struct ib_device *dev;
9822b34c558SParav Pandit 	unsigned long index;
9832b34c558SParav Pandit 	int ret = 0;
9842b34c558SParav Pandit 
9852b34c558SParav Pandit 	down_read(&devices_rwsem);
9862b34c558SParav Pandit 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
9872b34c558SParav Pandit 		unsigned long net_index = 0;
9882b34c558SParav Pandit 
9892b34c558SParav Pandit 		/* Hold nets_rwsem so that any other thread modifying this
9902b34c558SParav Pandit 		 * system param can sync with this thread.
9912b34c558SParav Pandit 		 */
9922b34c558SParav Pandit 		down_read(&rdma_nets_rwsem);
9932b34c558SParav Pandit 		xa_for_each (&rdma_nets, net_index, rnet) {
9942b34c558SParav Pandit 			ret = add_one_compat_dev(dev, rnet);
9952b34c558SParav Pandit 			if (ret)
9962b34c558SParav Pandit 				break;
9972b34c558SParav Pandit 		}
9982b34c558SParav Pandit 		up_read(&rdma_nets_rwsem);
9992b34c558SParav Pandit 	}
10002b34c558SParav Pandit 	up_read(&devices_rwsem);
10012b34c558SParav Pandit 	if (ret)
10022b34c558SParav Pandit 		remove_all_compat_devs();
10032b34c558SParav Pandit 	return ret;
10042b34c558SParav Pandit }
10052b34c558SParav Pandit 
10062b34c558SParav Pandit int rdma_compatdev_set(u8 enable)
10072b34c558SParav Pandit {
10082b34c558SParav Pandit 	struct rdma_dev_net *rnet;
10092b34c558SParav Pandit 	unsigned long index;
10102b34c558SParav Pandit 	int ret = 0;
10112b34c558SParav Pandit 
10122b34c558SParav Pandit 	down_write(&rdma_nets_rwsem);
10132b34c558SParav Pandit 	if (ib_devices_shared_netns == enable) {
10142b34c558SParav Pandit 		up_write(&rdma_nets_rwsem);
10152b34c558SParav Pandit 		return 0;
10162b34c558SParav Pandit 	}
10172b34c558SParav Pandit 
10182b34c558SParav Pandit 	/* enable/disable of compat devices is not supported
10192b34c558SParav Pandit 	 * when more than default init_net exists.
10202b34c558SParav Pandit 	 */
10212b34c558SParav Pandit 	xa_for_each (&rdma_nets, index, rnet) {
10222b34c558SParav Pandit 		ret++;
10232b34c558SParav Pandit 		break;
10242b34c558SParav Pandit 	}
10252b34c558SParav Pandit 	if (!ret)
10262b34c558SParav Pandit 		ib_devices_shared_netns = enable;
10272b34c558SParav Pandit 	up_write(&rdma_nets_rwsem);
10282b34c558SParav Pandit 	if (ret)
10292b34c558SParav Pandit 		return -EBUSY;
10302b34c558SParav Pandit 
10312b34c558SParav Pandit 	if (enable)
10322b34c558SParav Pandit 		ret = add_all_compat_devs();
10332b34c558SParav Pandit 	else
10342b34c558SParav Pandit 		remove_all_compat_devs();
10352b34c558SParav Pandit 	return ret;
10362b34c558SParav Pandit }
10372b34c558SParav Pandit 
10384e0f7b90SParav Pandit static void rdma_dev_exit_net(struct net *net)
10394e0f7b90SParav Pandit {
10404e0f7b90SParav Pandit 	struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
10414e0f7b90SParav Pandit 	struct ib_device *dev;
10424e0f7b90SParav Pandit 	unsigned long index;
10434e0f7b90SParav Pandit 	int ret;
10444e0f7b90SParav Pandit 
10454e0f7b90SParav Pandit 	down_write(&rdma_nets_rwsem);
10464e0f7b90SParav Pandit 	/*
10474e0f7b90SParav Pandit 	 * Prevent the ID from being re-used and hide the id from xa_for_each.
10484e0f7b90SParav Pandit 	 */
10494e0f7b90SParav Pandit 	ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
10504e0f7b90SParav Pandit 	WARN_ON(ret);
10514e0f7b90SParav Pandit 	up_write(&rdma_nets_rwsem);
10524e0f7b90SParav Pandit 
10534e0f7b90SParav Pandit 	down_read(&devices_rwsem);
10544e0f7b90SParav Pandit 	xa_for_each (&devices, index, dev) {
10554e0f7b90SParav Pandit 		get_device(&dev->dev);
10564e0f7b90SParav Pandit 		/*
10574e0f7b90SParav Pandit 		 * Release the devices_rwsem so that pontentially blocking
10584e0f7b90SParav Pandit 		 * device_del, doesn't hold the devices_rwsem for too long.
10594e0f7b90SParav Pandit 		 */
10604e0f7b90SParav Pandit 		up_read(&devices_rwsem);
10614e0f7b90SParav Pandit 
10624e0f7b90SParav Pandit 		remove_one_compat_dev(dev, rnet->id);
10634e0f7b90SParav Pandit 
1064decbc7a6SParav Pandit 		/*
1065decbc7a6SParav Pandit 		 * If the real device is in the NS then move it back to init.
1066decbc7a6SParav Pandit 		 */
1067decbc7a6SParav Pandit 		rdma_dev_change_netns(dev, net, &init_net);
1068decbc7a6SParav Pandit 
10694e0f7b90SParav Pandit 		put_device(&dev->dev);
10704e0f7b90SParav Pandit 		down_read(&devices_rwsem);
10714e0f7b90SParav Pandit 	}
10724e0f7b90SParav Pandit 	up_read(&devices_rwsem);
10734e0f7b90SParav Pandit 
10744e0f7b90SParav Pandit 	xa_erase(&rdma_nets, rnet->id);
10754e0f7b90SParav Pandit }
10764e0f7b90SParav Pandit 
10774e0f7b90SParav Pandit static __net_init int rdma_dev_init_net(struct net *net)
10784e0f7b90SParav Pandit {
10794e0f7b90SParav Pandit 	struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
10804e0f7b90SParav Pandit 	unsigned long index;
10814e0f7b90SParav Pandit 	struct ib_device *dev;
10824e0f7b90SParav Pandit 	int ret;
10834e0f7b90SParav Pandit 
10844e0f7b90SParav Pandit 	/* No need to create any compat devices in default init_net. */
10854e0f7b90SParav Pandit 	if (net_eq(net, &init_net))
10864e0f7b90SParav Pandit 		return 0;
10874e0f7b90SParav Pandit 
10884e0f7b90SParav Pandit 	write_pnet(&rnet->net, net);
10894e0f7b90SParav Pandit 
10904e0f7b90SParav Pandit 	ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
10914e0f7b90SParav Pandit 	if (ret)
10924e0f7b90SParav Pandit 		return ret;
10934e0f7b90SParav Pandit 
10944e0f7b90SParav Pandit 	down_read(&devices_rwsem);
10954e0f7b90SParav Pandit 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
10962b34c558SParav Pandit 		/* Hold nets_rwsem so that netlink command cannot change
10972b34c558SParav Pandit 		 * system configuration for device sharing mode.
10982b34c558SParav Pandit 		 */
10992b34c558SParav Pandit 		down_read(&rdma_nets_rwsem);
11004e0f7b90SParav Pandit 		ret = add_one_compat_dev(dev, rnet);
11012b34c558SParav Pandit 		up_read(&rdma_nets_rwsem);
11024e0f7b90SParav Pandit 		if (ret)
11034e0f7b90SParav Pandit 			break;
11044e0f7b90SParav Pandit 	}
11054e0f7b90SParav Pandit 	up_read(&devices_rwsem);
11064e0f7b90SParav Pandit 
11074e0f7b90SParav Pandit 	if (ret)
11084e0f7b90SParav Pandit 		rdma_dev_exit_net(net);
11094e0f7b90SParav Pandit 
11104e0f7b90SParav Pandit 	return ret;
11114e0f7b90SParav Pandit }
11124e0f7b90SParav Pandit 
1113ecc82c53SLeon Romanovsky /*
1114d0899892SJason Gunthorpe  * Assign the unique string device name and the unique device index. This is
1115d0899892SJason Gunthorpe  * undone by ib_dealloc_device.
1116ecc82c53SLeon Romanovsky  */
11170df91bb6SJason Gunthorpe static int assign_name(struct ib_device *device, const char *name)
11180df91bb6SJason Gunthorpe {
11190df91bb6SJason Gunthorpe 	static u32 last_id;
11200df91bb6SJason Gunthorpe 	int ret;
1121ecc82c53SLeon Romanovsky 
1122921eab11SJason Gunthorpe 	down_write(&devices_rwsem);
11230df91bb6SJason Gunthorpe 	/* Assign a unique name to the device */
11240df91bb6SJason Gunthorpe 	if (strchr(name, '%'))
11250df91bb6SJason Gunthorpe 		ret = alloc_name(device, name);
11260df91bb6SJason Gunthorpe 	else
11270df91bb6SJason Gunthorpe 		ret = dev_set_name(&device->dev, name);
11280df91bb6SJason Gunthorpe 	if (ret)
11290df91bb6SJason Gunthorpe 		goto out;
1130ecc82c53SLeon Romanovsky 
11310df91bb6SJason Gunthorpe 	if (__ib_device_get_by_name(dev_name(&device->dev))) {
11320df91bb6SJason Gunthorpe 		ret = -ENFILE;
11330df91bb6SJason Gunthorpe 		goto out;
1134ecc82c53SLeon Romanovsky 	}
11350df91bb6SJason Gunthorpe 	strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
11360df91bb6SJason Gunthorpe 
1137ea295481SLinus Torvalds 	ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
1138ea295481SLinus Torvalds 			&last_id, GFP_KERNEL);
1139ea295481SLinus Torvalds 	if (ret > 0)
11400df91bb6SJason Gunthorpe 		ret = 0;
1141921eab11SJason Gunthorpe 
11420df91bb6SJason Gunthorpe out:
1143921eab11SJason Gunthorpe 	up_write(&devices_rwsem);
11440df91bb6SJason Gunthorpe 	return ret;
11450df91bb6SJason Gunthorpe }
11460df91bb6SJason Gunthorpe 
1147548cb4fbSParav Pandit static void setup_dma_device(struct ib_device *device)
11481da177e4SLinus Torvalds {
114999db9494SBart Van Assche 	struct device *parent = device->dev.parent;
11501da177e4SLinus Torvalds 
11510957c29fSBart Van Assche 	WARN_ON_ONCE(device->dma_device);
11520957c29fSBart Van Assche 	if (device->dev.dma_ops) {
11530957c29fSBart Van Assche 		/*
11540957c29fSBart Van Assche 		 * The caller provided custom DMA operations. Copy the
11550957c29fSBart Van Assche 		 * DMA-related fields that are used by e.g. dma_alloc_coherent()
11560957c29fSBart Van Assche 		 * into device->dev.
11570957c29fSBart Van Assche 		 */
11580957c29fSBart Van Assche 		device->dma_device = &device->dev;
115902ee9da3SBart Van Assche 		if (!device->dev.dma_mask) {
116002ee9da3SBart Van Assche 			if (parent)
116199db9494SBart Van Assche 				device->dev.dma_mask = parent->dma_mask;
116202ee9da3SBart Van Assche 			else
116302ee9da3SBart Van Assche 				WARN_ON_ONCE(true);
116402ee9da3SBart Van Assche 		}
116502ee9da3SBart Van Assche 		if (!device->dev.coherent_dma_mask) {
116602ee9da3SBart Van Assche 			if (parent)
11670957c29fSBart Van Assche 				device->dev.coherent_dma_mask =
11680957c29fSBart Van Assche 					parent->coherent_dma_mask;
116902ee9da3SBart Van Assche 			else
117002ee9da3SBart Van Assche 				WARN_ON_ONCE(true);
117102ee9da3SBart Van Assche 		}
11720957c29fSBart Van Assche 	} else {
11730957c29fSBart Van Assche 		/*
11740957c29fSBart Van Assche 		 * The caller did not provide custom DMA operations. Use the
11750957c29fSBart Van Assche 		 * DMA mapping operations of the parent device.
11760957c29fSBart Van Assche 		 */
117702ee9da3SBart Van Assche 		WARN_ON_ONCE(!parent);
11780957c29fSBart Van Assche 		device->dma_device = parent;
11790957c29fSBart Van Assche 	}
1180d10bcf94SShiraz Saleem 	/* Setup default max segment size for all IB devices */
1181d10bcf94SShiraz Saleem 	dma_set_max_seg_size(device->dma_device, SZ_2G);
1182d10bcf94SShiraz Saleem 
1183548cb4fbSParav Pandit }
1184548cb4fbSParav Pandit 
1185921eab11SJason Gunthorpe /*
1186921eab11SJason Gunthorpe  * setup_device() allocates memory and sets up data that requires calling the
1187921eab11SJason Gunthorpe  * device ops, this is the only reason these actions are not done during
1188921eab11SJason Gunthorpe  * ib_alloc_device. It is undone by ib_dealloc_device().
1189921eab11SJason Gunthorpe  */
1190548cb4fbSParav Pandit static int setup_device(struct ib_device *device)
1191548cb4fbSParav Pandit {
1192548cb4fbSParav Pandit 	struct ib_udata uhw = {.outlen = 0, .inlen = 0};
1193548cb4fbSParav Pandit 	int ret;
1194548cb4fbSParav Pandit 
1195921eab11SJason Gunthorpe 	setup_dma_device(device);
1196921eab11SJason Gunthorpe 
1197548cb4fbSParav Pandit 	ret = ib_device_check_mandatory(device);
1198548cb4fbSParav Pandit 	if (ret)
1199548cb4fbSParav Pandit 		return ret;
1200548cb4fbSParav Pandit 
12018ceb1357SJason Gunthorpe 	ret = setup_port_data(device);
1202548cb4fbSParav Pandit 	if (ret) {
12038ceb1357SJason Gunthorpe 		dev_warn(&device->dev, "Couldn't create per-port data\n");
1204548cb4fbSParav Pandit 		return ret;
1205548cb4fbSParav Pandit 	}
1206548cb4fbSParav Pandit 
1207548cb4fbSParav Pandit 	memset(&device->attrs, 0, sizeof(device->attrs));
12083023a1e9SKamal Heib 	ret = device->ops.query_device(device, &device->attrs, &uhw);
1209548cb4fbSParav Pandit 	if (ret) {
1210548cb4fbSParav Pandit 		dev_warn(&device->dev,
1211548cb4fbSParav Pandit 			 "Couldn't query the device attributes\n");
1212d45f89d5SJason Gunthorpe 		return ret;
1213548cb4fbSParav Pandit 	}
1214548cb4fbSParav Pandit 
1215548cb4fbSParav Pandit 	return 0;
1216548cb4fbSParav Pandit }
1217548cb4fbSParav Pandit 
1218921eab11SJason Gunthorpe static void disable_device(struct ib_device *device)
1219921eab11SJason Gunthorpe {
1220921eab11SJason Gunthorpe 	struct ib_client *client;
1221921eab11SJason Gunthorpe 
1222921eab11SJason Gunthorpe 	WARN_ON(!refcount_read(&device->refcount));
1223921eab11SJason Gunthorpe 
1224921eab11SJason Gunthorpe 	down_write(&devices_rwsem);
1225921eab11SJason Gunthorpe 	xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
1226921eab11SJason Gunthorpe 	up_write(&devices_rwsem);
1227921eab11SJason Gunthorpe 
1228921eab11SJason Gunthorpe 	down_read(&clients_rwsem);
1229921eab11SJason Gunthorpe 	list_for_each_entry_reverse(client, &client_list, list)
1230921eab11SJason Gunthorpe 		remove_client_context(device, client->client_id);
1231921eab11SJason Gunthorpe 	up_read(&clients_rwsem);
1232921eab11SJason Gunthorpe 
1233921eab11SJason Gunthorpe 	/* Pairs with refcount_set in enable_device */
1234921eab11SJason Gunthorpe 	ib_device_put(device);
1235921eab11SJason Gunthorpe 	wait_for_completion(&device->unreg_completion);
1236c2261dd7SJason Gunthorpe 
12374e0f7b90SParav Pandit 	/*
12384e0f7b90SParav Pandit 	 * compat devices must be removed after device refcount drops to zero.
12394e0f7b90SParav Pandit 	 * Otherwise init_net() may add more compatdevs after removing compat
12404e0f7b90SParav Pandit 	 * devices and before device is disabled.
12414e0f7b90SParav Pandit 	 */
12424e0f7b90SParav Pandit 	remove_compat_devs(device);
1243921eab11SJason Gunthorpe }
1244921eab11SJason Gunthorpe 
1245921eab11SJason Gunthorpe /*
1246921eab11SJason Gunthorpe  * An enabled device is visible to all clients and to all the public facing
1247d0899892SJason Gunthorpe  * APIs that return a device pointer. This always returns with a new get, even
1248d0899892SJason Gunthorpe  * if it fails.
1249921eab11SJason Gunthorpe  */
1250d0899892SJason Gunthorpe static int enable_device_and_get(struct ib_device *device)
1251921eab11SJason Gunthorpe {
1252921eab11SJason Gunthorpe 	struct ib_client *client;
1253921eab11SJason Gunthorpe 	unsigned long index;
1254d0899892SJason Gunthorpe 	int ret = 0;
1255921eab11SJason Gunthorpe 
1256d0899892SJason Gunthorpe 	/*
1257d0899892SJason Gunthorpe 	 * One ref belongs to the xa and the other belongs to this
1258d0899892SJason Gunthorpe 	 * thread. This is needed to guard against parallel unregistration.
1259d0899892SJason Gunthorpe 	 */
1260d0899892SJason Gunthorpe 	refcount_set(&device->refcount, 2);
1261921eab11SJason Gunthorpe 	down_write(&devices_rwsem);
1262921eab11SJason Gunthorpe 	xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
1263d0899892SJason Gunthorpe 
1264d0899892SJason Gunthorpe 	/*
1265d0899892SJason Gunthorpe 	 * By using downgrade_write() we ensure that no other thread can clear
1266d0899892SJason Gunthorpe 	 * DEVICE_REGISTERED while we are completing the client setup.
1267d0899892SJason Gunthorpe 	 */
1268d0899892SJason Gunthorpe 	downgrade_write(&devices_rwsem);
1269921eab11SJason Gunthorpe 
1270ca22354bSJason Gunthorpe 	if (device->ops.enable_driver) {
1271ca22354bSJason Gunthorpe 		ret = device->ops.enable_driver(device);
1272ca22354bSJason Gunthorpe 		if (ret)
1273ca22354bSJason Gunthorpe 			goto out;
1274ca22354bSJason Gunthorpe 	}
1275ca22354bSJason Gunthorpe 
1276921eab11SJason Gunthorpe 	down_read(&clients_rwsem);
1277921eab11SJason Gunthorpe 	xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1278921eab11SJason Gunthorpe 		ret = add_client_context(device, client);
1279d0899892SJason Gunthorpe 		if (ret)
1280d0899892SJason Gunthorpe 			break;
1281d0899892SJason Gunthorpe 	}
1282921eab11SJason Gunthorpe 	up_read(&clients_rwsem);
12834e0f7b90SParav Pandit 	if (!ret)
12844e0f7b90SParav Pandit 		ret = add_compat_devs(device);
1285ca22354bSJason Gunthorpe out:
1286d0899892SJason Gunthorpe 	up_read(&devices_rwsem);
1287921eab11SJason Gunthorpe 	return ret;
1288921eab11SJason Gunthorpe }
1289921eab11SJason Gunthorpe 
1290548cb4fbSParav Pandit /**
1291548cb4fbSParav Pandit  * ib_register_device - Register an IB device with IB core
1292548cb4fbSParav Pandit  * @device:Device to register
1293548cb4fbSParav Pandit  *
1294548cb4fbSParav Pandit  * Low-level drivers use ib_register_device() to register their
1295548cb4fbSParav Pandit  * devices with the IB core.  All registered clients will receive a
1296548cb4fbSParav Pandit  * callback for each device that is added. @device must be allocated
1297548cb4fbSParav Pandit  * with ib_alloc_device().
1298d0899892SJason Gunthorpe  *
1299d0899892SJason Gunthorpe  * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
1300d0899892SJason Gunthorpe  * asynchronously then the device pointer may become freed as soon as this
1301d0899892SJason Gunthorpe  * function returns.
1302548cb4fbSParav Pandit  */
1303ea4baf7fSParav Pandit int ib_register_device(struct ib_device *device, const char *name)
1304548cb4fbSParav Pandit {
1305548cb4fbSParav Pandit 	int ret;
13061da177e4SLinus Torvalds 
13070df91bb6SJason Gunthorpe 	ret = assign_name(device, name);
1308e349f858SJason Gunthorpe 	if (ret)
1309921eab11SJason Gunthorpe 		return ret;
13101da177e4SLinus Torvalds 
1311548cb4fbSParav Pandit 	ret = setup_device(device);
1312548cb4fbSParav Pandit 	if (ret)
1313d0899892SJason Gunthorpe 		return ret;
131403db3a2dSMatan Barak 
1315d45f89d5SJason Gunthorpe 	ret = ib_cache_setup_one(device);
1316d45f89d5SJason Gunthorpe 	if (ret) {
1317d45f89d5SJason Gunthorpe 		dev_warn(&device->dev,
1318d45f89d5SJason Gunthorpe 			 "Couldn't set up InfiniBand P_Key/GID cache\n");
1319d0899892SJason Gunthorpe 		return ret;
1320d45f89d5SJason Gunthorpe 	}
1321d45f89d5SJason Gunthorpe 
13227527a7b1SParav Pandit 	ib_device_register_rdmacg(device);
13233e153a93SIra Weiny 
1324e7a5b4aaSLeon Romanovsky 	/*
1325e7a5b4aaSLeon Romanovsky 	 * Ensure that ADD uevent is not fired because it
1326e7a5b4aaSLeon Romanovsky 	 * is too early amd device is not initialized yet.
1327e7a5b4aaSLeon Romanovsky 	 */
1328e7a5b4aaSLeon Romanovsky 	dev_set_uevent_suppress(&device->dev, true);
13295f8f5499SParav Pandit 	ret = device_add(&device->dev);
13305f8f5499SParav Pandit 	if (ret)
13315f8f5499SParav Pandit 		goto cg_cleanup;
13325f8f5499SParav Pandit 
1333ea4baf7fSParav Pandit 	ret = ib_device_register_sysfs(device);
13341da177e4SLinus Torvalds 	if (ret) {
133543c7c851SJason Gunthorpe 		dev_warn(&device->dev,
133643c7c851SJason Gunthorpe 			 "Couldn't register device with driver model\n");
13375f8f5499SParav Pandit 		goto dev_cleanup;
13381da177e4SLinus Torvalds 	}
13391da177e4SLinus Torvalds 
1340d0899892SJason Gunthorpe 	ret = enable_device_and_get(device);
1341e7a5b4aaSLeon Romanovsky 	dev_set_uevent_suppress(&device->dev, false);
1342e7a5b4aaSLeon Romanovsky 	/* Mark for userspace that device is ready */
1343e7a5b4aaSLeon Romanovsky 	kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1344d0899892SJason Gunthorpe 	if (ret) {
1345d0899892SJason Gunthorpe 		void (*dealloc_fn)(struct ib_device *);
1346d0899892SJason Gunthorpe 
1347d0899892SJason Gunthorpe 		/*
1348d0899892SJason Gunthorpe 		 * If we hit this error flow then we don't want to
1349d0899892SJason Gunthorpe 		 * automatically dealloc the device since the caller is
1350d0899892SJason Gunthorpe 		 * expected to call ib_dealloc_device() after
1351d0899892SJason Gunthorpe 		 * ib_register_device() fails. This is tricky due to the
1352d0899892SJason Gunthorpe 		 * possibility for a parallel unregistration along with this
1353d0899892SJason Gunthorpe 		 * error flow. Since we have a refcount here we know any
1354d0899892SJason Gunthorpe 		 * parallel flow is stopped in disable_device and will see the
1355d0899892SJason Gunthorpe 		 * NULL pointers, causing the responsibility to
1356d0899892SJason Gunthorpe 		 * ib_dealloc_device() to revert back to this thread.
1357d0899892SJason Gunthorpe 		 */
1358d0899892SJason Gunthorpe 		dealloc_fn = device->ops.dealloc_driver;
1359d0899892SJason Gunthorpe 		device->ops.dealloc_driver = NULL;
1360d0899892SJason Gunthorpe 		ib_device_put(device);
1361d0899892SJason Gunthorpe 		__ib_unregister_device(device);
1362d0899892SJason Gunthorpe 		device->ops.dealloc_driver = dealloc_fn;
1363d0899892SJason Gunthorpe 		return ret;
1364d0899892SJason Gunthorpe 	}
1365d0899892SJason Gunthorpe 	ib_device_put(device);
13661da177e4SLinus Torvalds 
13674be3a4faSParav Pandit 	return 0;
13684be3a4faSParav Pandit 
13695f8f5499SParav Pandit dev_cleanup:
13705f8f5499SParav Pandit 	device_del(&device->dev);
13712fb4f4eaSParav Pandit cg_cleanup:
1372e7a5b4aaSLeon Romanovsky 	dev_set_uevent_suppress(&device->dev, false);
13732fb4f4eaSParav Pandit 	ib_device_unregister_rdmacg(device);
1374d45f89d5SJason Gunthorpe 	ib_cache_cleanup_one(device);
13751da177e4SLinus Torvalds 	return ret;
13761da177e4SLinus Torvalds }
13771da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_device);
13781da177e4SLinus Torvalds 
1379d0899892SJason Gunthorpe /* Callers must hold a get on the device. */
1380d0899892SJason Gunthorpe static void __ib_unregister_device(struct ib_device *ib_dev)
1381d0899892SJason Gunthorpe {
1382d0899892SJason Gunthorpe 	/*
1383d0899892SJason Gunthorpe 	 * We have a registration lock so that all the calls to unregister are
1384d0899892SJason Gunthorpe 	 * fully fenced, once any unregister returns the device is truely
1385d0899892SJason Gunthorpe 	 * unregistered even if multiple callers are unregistering it at the
1386d0899892SJason Gunthorpe 	 * same time. This also interacts with the registration flow and
1387d0899892SJason Gunthorpe 	 * provides sane semantics if register and unregister are racing.
1388d0899892SJason Gunthorpe 	 */
1389d0899892SJason Gunthorpe 	mutex_lock(&ib_dev->unregistration_lock);
1390d0899892SJason Gunthorpe 	if (!refcount_read(&ib_dev->refcount))
1391d0899892SJason Gunthorpe 		goto out;
1392d0899892SJason Gunthorpe 
1393d0899892SJason Gunthorpe 	disable_device(ib_dev);
13943042492bSParav Pandit 
13953042492bSParav Pandit 	/* Expedite removing unregistered pointers from the hash table */
13963042492bSParav Pandit 	free_netdevs(ib_dev);
13973042492bSParav Pandit 
1398d0899892SJason Gunthorpe 	ib_device_unregister_sysfs(ib_dev);
1399d0899892SJason Gunthorpe 	device_del(&ib_dev->dev);
1400d0899892SJason Gunthorpe 	ib_device_unregister_rdmacg(ib_dev);
1401d0899892SJason Gunthorpe 	ib_cache_cleanup_one(ib_dev);
1402d0899892SJason Gunthorpe 
1403d0899892SJason Gunthorpe 	/*
1404d0899892SJason Gunthorpe 	 * Drivers using the new flow may not call ib_dealloc_device except
1405d0899892SJason Gunthorpe 	 * in error unwind prior to registration success.
1406d0899892SJason Gunthorpe 	 */
1407d0899892SJason Gunthorpe 	if (ib_dev->ops.dealloc_driver) {
1408d0899892SJason Gunthorpe 		WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
1409d0899892SJason Gunthorpe 		ib_dealloc_device(ib_dev);
1410d0899892SJason Gunthorpe 	}
1411d0899892SJason Gunthorpe out:
1412d0899892SJason Gunthorpe 	mutex_unlock(&ib_dev->unregistration_lock);
1413d0899892SJason Gunthorpe }
1414d0899892SJason Gunthorpe 
14151da177e4SLinus Torvalds /**
14161da177e4SLinus Torvalds  * ib_unregister_device - Unregister an IB device
1417d0899892SJason Gunthorpe  * @device: The device to unregister
14181da177e4SLinus Torvalds  *
14191da177e4SLinus Torvalds  * Unregister an IB device.  All clients will receive a remove callback.
1420d0899892SJason Gunthorpe  *
1421d0899892SJason Gunthorpe  * Callers should call this routine only once, and protect against races with
1422d0899892SJason Gunthorpe  * registration. Typically it should only be called as part of a remove
1423d0899892SJason Gunthorpe  * callback in an implementation of driver core's struct device_driver and
1424d0899892SJason Gunthorpe  * related.
1425d0899892SJason Gunthorpe  *
1426d0899892SJason Gunthorpe  * If ops.dealloc_driver is used then ib_dev will be freed upon return from
1427d0899892SJason Gunthorpe  * this function.
14281da177e4SLinus Torvalds  */
1429d0899892SJason Gunthorpe void ib_unregister_device(struct ib_device *ib_dev)
14301da177e4SLinus Torvalds {
1431d0899892SJason Gunthorpe 	get_device(&ib_dev->dev);
1432d0899892SJason Gunthorpe 	__ib_unregister_device(ib_dev);
1433d0899892SJason Gunthorpe 	put_device(&ib_dev->dev);
14341da177e4SLinus Torvalds }
14351da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_device);
14361da177e4SLinus Torvalds 
1437d0899892SJason Gunthorpe /**
1438d0899892SJason Gunthorpe  * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1439d0899892SJason Gunthorpe  * device: The device to unregister
1440d0899892SJason Gunthorpe  *
1441d0899892SJason Gunthorpe  * This is the same as ib_unregister_device(), except it includes an internal
1442d0899892SJason Gunthorpe  * ib_device_put() that should match a 'get' obtained by the caller.
1443d0899892SJason Gunthorpe  *
1444d0899892SJason Gunthorpe  * It is safe to call this routine concurrently from multiple threads while
1445d0899892SJason Gunthorpe  * holding the 'get'. When the function returns the device is fully
1446d0899892SJason Gunthorpe  * unregistered.
1447d0899892SJason Gunthorpe  *
1448d0899892SJason Gunthorpe  * Drivers using this flow MUST use the driver_unregister callback to clean up
1449d0899892SJason Gunthorpe  * their resources associated with the device and dealloc it.
1450d0899892SJason Gunthorpe  */
1451d0899892SJason Gunthorpe void ib_unregister_device_and_put(struct ib_device *ib_dev)
1452d0899892SJason Gunthorpe {
1453d0899892SJason Gunthorpe 	WARN_ON(!ib_dev->ops.dealloc_driver);
1454d0899892SJason Gunthorpe 	get_device(&ib_dev->dev);
1455d0899892SJason Gunthorpe 	ib_device_put(ib_dev);
1456d0899892SJason Gunthorpe 	__ib_unregister_device(ib_dev);
1457d0899892SJason Gunthorpe 	put_device(&ib_dev->dev);
1458d0899892SJason Gunthorpe }
1459d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_device_and_put);
1460d0899892SJason Gunthorpe 
1461d0899892SJason Gunthorpe /**
1462d0899892SJason Gunthorpe  * ib_unregister_driver - Unregister all IB devices for a driver
1463d0899892SJason Gunthorpe  * @driver_id: The driver to unregister
1464d0899892SJason Gunthorpe  *
1465d0899892SJason Gunthorpe  * This implements a fence for device unregistration. It only returns once all
1466d0899892SJason Gunthorpe  * devices associated with the driver_id have fully completed their
1467d0899892SJason Gunthorpe  * unregistration and returned from ib_unregister_device*().
1468d0899892SJason Gunthorpe  *
1469d0899892SJason Gunthorpe  * If device's are not yet unregistered it goes ahead and starts unregistering
1470d0899892SJason Gunthorpe  * them.
1471d0899892SJason Gunthorpe  *
1472d0899892SJason Gunthorpe  * This does not block creation of new devices with the given driver_id, that
1473d0899892SJason Gunthorpe  * is the responsibility of the caller.
1474d0899892SJason Gunthorpe  */
1475d0899892SJason Gunthorpe void ib_unregister_driver(enum rdma_driver_id driver_id)
1476d0899892SJason Gunthorpe {
1477d0899892SJason Gunthorpe 	struct ib_device *ib_dev;
1478d0899892SJason Gunthorpe 	unsigned long index;
1479d0899892SJason Gunthorpe 
1480d0899892SJason Gunthorpe 	down_read(&devices_rwsem);
1481d0899892SJason Gunthorpe 	xa_for_each (&devices, index, ib_dev) {
1482d0899892SJason Gunthorpe 		if (ib_dev->driver_id != driver_id)
1483d0899892SJason Gunthorpe 			continue;
1484d0899892SJason Gunthorpe 
1485d0899892SJason Gunthorpe 		get_device(&ib_dev->dev);
1486d0899892SJason Gunthorpe 		up_read(&devices_rwsem);
1487d0899892SJason Gunthorpe 
1488d0899892SJason Gunthorpe 		WARN_ON(!ib_dev->ops.dealloc_driver);
1489d0899892SJason Gunthorpe 		__ib_unregister_device(ib_dev);
1490d0899892SJason Gunthorpe 
1491d0899892SJason Gunthorpe 		put_device(&ib_dev->dev);
1492d0899892SJason Gunthorpe 		down_read(&devices_rwsem);
1493d0899892SJason Gunthorpe 	}
1494d0899892SJason Gunthorpe 	up_read(&devices_rwsem);
1495d0899892SJason Gunthorpe }
1496d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_driver);
1497d0899892SJason Gunthorpe 
1498d0899892SJason Gunthorpe static void ib_unregister_work(struct work_struct *work)
1499d0899892SJason Gunthorpe {
1500d0899892SJason Gunthorpe 	struct ib_device *ib_dev =
1501d0899892SJason Gunthorpe 		container_of(work, struct ib_device, unregistration_work);
1502d0899892SJason Gunthorpe 
1503d0899892SJason Gunthorpe 	__ib_unregister_device(ib_dev);
1504d0899892SJason Gunthorpe 	put_device(&ib_dev->dev);
1505d0899892SJason Gunthorpe }
1506d0899892SJason Gunthorpe 
1507d0899892SJason Gunthorpe /**
1508d0899892SJason Gunthorpe  * ib_unregister_device_queued - Unregister a device using a work queue
1509d0899892SJason Gunthorpe  * device: The device to unregister
1510d0899892SJason Gunthorpe  *
1511d0899892SJason Gunthorpe  * This schedules an asynchronous unregistration using a WQ for the device. A
1512d0899892SJason Gunthorpe  * driver should use this to avoid holding locks while doing unregistration,
1513d0899892SJason Gunthorpe  * such as holding the RTNL lock.
1514d0899892SJason Gunthorpe  *
1515d0899892SJason Gunthorpe  * Drivers using this API must use ib_unregister_driver before module unload
1516d0899892SJason Gunthorpe  * to ensure that all scheduled unregistrations have completed.
1517d0899892SJason Gunthorpe  */
1518d0899892SJason Gunthorpe void ib_unregister_device_queued(struct ib_device *ib_dev)
1519d0899892SJason Gunthorpe {
1520d0899892SJason Gunthorpe 	WARN_ON(!refcount_read(&ib_dev->refcount));
1521d0899892SJason Gunthorpe 	WARN_ON(!ib_dev->ops.dealloc_driver);
1522d0899892SJason Gunthorpe 	get_device(&ib_dev->dev);
1523d0899892SJason Gunthorpe 	if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
1524d0899892SJason Gunthorpe 		put_device(&ib_dev->dev);
1525d0899892SJason Gunthorpe }
1526d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_device_queued);
1527d0899892SJason Gunthorpe 
1528decbc7a6SParav Pandit /*
1529decbc7a6SParav Pandit  * The caller must pass in a device that has the kref held and the refcount
1530decbc7a6SParav Pandit  * released. If the device is in cur_net and still registered then it is moved
1531decbc7a6SParav Pandit  * into net.
1532decbc7a6SParav Pandit  */
1533decbc7a6SParav Pandit static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
1534decbc7a6SParav Pandit 				 struct net *net)
1535decbc7a6SParav Pandit {
1536decbc7a6SParav Pandit 	int ret2 = -EINVAL;
1537decbc7a6SParav Pandit 	int ret;
1538decbc7a6SParav Pandit 
1539decbc7a6SParav Pandit 	mutex_lock(&device->unregistration_lock);
1540decbc7a6SParav Pandit 
1541decbc7a6SParav Pandit 	/*
15422e5b8a01SParav Pandit 	 * If a device not under ib_device_get() or if the unregistration_lock
15432e5b8a01SParav Pandit 	 * is not held, the namespace can be changed, or it can be unregistered.
15442e5b8a01SParav Pandit 	 * Check again under the lock.
1545decbc7a6SParav Pandit 	 */
1546decbc7a6SParav Pandit 	if (refcount_read(&device->refcount) == 0 ||
1547decbc7a6SParav Pandit 	    !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
1548decbc7a6SParav Pandit 		ret = -ENODEV;
1549decbc7a6SParav Pandit 		goto out;
1550decbc7a6SParav Pandit 	}
1551decbc7a6SParav Pandit 
1552decbc7a6SParav Pandit 	kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
1553decbc7a6SParav Pandit 	disable_device(device);
1554decbc7a6SParav Pandit 
1555decbc7a6SParav Pandit 	/*
1556decbc7a6SParav Pandit 	 * At this point no one can be using the device, so it is safe to
1557decbc7a6SParav Pandit 	 * change the namespace.
1558decbc7a6SParav Pandit 	 */
1559decbc7a6SParav Pandit 	write_pnet(&device->coredev.rdma_net, net);
1560decbc7a6SParav Pandit 
15612e5b8a01SParav Pandit 	down_read(&devices_rwsem);
1562decbc7a6SParav Pandit 	/*
1563decbc7a6SParav Pandit 	 * Currently rdma devices are system wide unique. So the device name
1564decbc7a6SParav Pandit 	 * is guaranteed free in the new namespace. Publish the new namespace
1565decbc7a6SParav Pandit 	 * at the sysfs level.
1566decbc7a6SParav Pandit 	 */
1567decbc7a6SParav Pandit 	ret = device_rename(&device->dev, dev_name(&device->dev));
1568decbc7a6SParav Pandit 	up_read(&devices_rwsem);
1569decbc7a6SParav Pandit 	if (ret) {
1570decbc7a6SParav Pandit 		dev_warn(&device->dev,
1571decbc7a6SParav Pandit 			 "%s: Couldn't rename device after namespace change\n",
1572decbc7a6SParav Pandit 			 __func__);
1573decbc7a6SParav Pandit 		/* Try and put things back and re-enable the device */
1574decbc7a6SParav Pandit 		write_pnet(&device->coredev.rdma_net, cur_net);
1575decbc7a6SParav Pandit 	}
1576decbc7a6SParav Pandit 
1577decbc7a6SParav Pandit 	ret2 = enable_device_and_get(device);
15782e5b8a01SParav Pandit 	if (ret2) {
1579decbc7a6SParav Pandit 		/*
1580decbc7a6SParav Pandit 		 * This shouldn't really happen, but if it does, let the user
1581decbc7a6SParav Pandit 		 * retry at later point. So don't disable the device.
1582decbc7a6SParav Pandit 		 */
1583decbc7a6SParav Pandit 		dev_warn(&device->dev,
1584decbc7a6SParav Pandit 			 "%s: Couldn't re-enable device after namespace change\n",
1585decbc7a6SParav Pandit 			 __func__);
15862e5b8a01SParav Pandit 	}
1587decbc7a6SParav Pandit 	kobject_uevent(&device->dev.kobj, KOBJ_ADD);
15882e5b8a01SParav Pandit 
1589decbc7a6SParav Pandit 	ib_device_put(device);
1590decbc7a6SParav Pandit out:
1591decbc7a6SParav Pandit 	mutex_unlock(&device->unregistration_lock);
1592decbc7a6SParav Pandit 	if (ret)
1593decbc7a6SParav Pandit 		return ret;
1594decbc7a6SParav Pandit 	return ret2;
1595decbc7a6SParav Pandit }
1596decbc7a6SParav Pandit 
15972e5b8a01SParav Pandit int ib_device_set_netns_put(struct sk_buff *skb,
15982e5b8a01SParav Pandit 			    struct ib_device *dev, u32 ns_fd)
15992e5b8a01SParav Pandit {
16002e5b8a01SParav Pandit 	struct net *net;
16012e5b8a01SParav Pandit 	int ret;
16022e5b8a01SParav Pandit 
16032e5b8a01SParav Pandit 	net = get_net_ns_by_fd(ns_fd);
16042e5b8a01SParav Pandit 	if (IS_ERR(net)) {
16052e5b8a01SParav Pandit 		ret = PTR_ERR(net);
16062e5b8a01SParav Pandit 		goto net_err;
16072e5b8a01SParav Pandit 	}
16082e5b8a01SParav Pandit 
16092e5b8a01SParav Pandit 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
16102e5b8a01SParav Pandit 		ret = -EPERM;
16112e5b8a01SParav Pandit 		goto ns_err;
16122e5b8a01SParav Pandit 	}
16132e5b8a01SParav Pandit 
16142e5b8a01SParav Pandit 	/*
16152e5b8a01SParav Pandit 	 * Currently supported only for those providers which support
16162e5b8a01SParav Pandit 	 * disassociation and don't do port specific sysfs init. Once a
16172e5b8a01SParav Pandit 	 * port_cleanup infrastructure is implemented, this limitation will be
16182e5b8a01SParav Pandit 	 * removed.
16192e5b8a01SParav Pandit 	 */
16202e5b8a01SParav Pandit 	if (!dev->ops.disassociate_ucontext || dev->ops.init_port ||
16212e5b8a01SParav Pandit 	    ib_devices_shared_netns) {
16222e5b8a01SParav Pandit 		ret = -EOPNOTSUPP;
16232e5b8a01SParav Pandit 		goto ns_err;
16242e5b8a01SParav Pandit 	}
16252e5b8a01SParav Pandit 
16262e5b8a01SParav Pandit 	get_device(&dev->dev);
16272e5b8a01SParav Pandit 	ib_device_put(dev);
16282e5b8a01SParav Pandit 	ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
16292e5b8a01SParav Pandit 	put_device(&dev->dev);
16302e5b8a01SParav Pandit 
16312e5b8a01SParav Pandit 	put_net(net);
16322e5b8a01SParav Pandit 	return ret;
16332e5b8a01SParav Pandit 
16342e5b8a01SParav Pandit ns_err:
16352e5b8a01SParav Pandit 	put_net(net);
16362e5b8a01SParav Pandit net_err:
16372e5b8a01SParav Pandit 	ib_device_put(dev);
16382e5b8a01SParav Pandit 	return ret;
16392e5b8a01SParav Pandit }
16402e5b8a01SParav Pandit 
16414e0f7b90SParav Pandit static struct pernet_operations rdma_dev_net_ops = {
16424e0f7b90SParav Pandit 	.init = rdma_dev_init_net,
16434e0f7b90SParav Pandit 	.exit = rdma_dev_exit_net,
16444e0f7b90SParav Pandit 	.id = &rdma_dev_net_id,
16454e0f7b90SParav Pandit 	.size = sizeof(struct rdma_dev_net),
16464e0f7b90SParav Pandit };
16474e0f7b90SParav Pandit 
1648e59178d8SJason Gunthorpe static int assign_client_id(struct ib_client *client)
1649e59178d8SJason Gunthorpe {
1650e59178d8SJason Gunthorpe 	int ret;
1651e59178d8SJason Gunthorpe 
1652921eab11SJason Gunthorpe 	down_write(&clients_rwsem);
1653e59178d8SJason Gunthorpe 	/*
1654e59178d8SJason Gunthorpe 	 * The add/remove callbacks must be called in FIFO/LIFO order. To
1655e59178d8SJason Gunthorpe 	 * achieve this we assign client_ids so they are sorted in
1656e59178d8SJason Gunthorpe 	 * registration order, and retain a linked list we can reverse iterate
1657e59178d8SJason Gunthorpe 	 * to get the LIFO order. The extra linked list can go away if xarray
1658e59178d8SJason Gunthorpe 	 * learns to reverse iterate.
1659e59178d8SJason Gunthorpe 	 */
1660ea295481SLinus Torvalds 	if (list_empty(&client_list)) {
1661e59178d8SJason Gunthorpe 		client->client_id = 0;
1662ea295481SLinus Torvalds 	} else {
1663ea295481SLinus Torvalds 		struct ib_client *last;
1664ea295481SLinus Torvalds 
1665ea295481SLinus Torvalds 		last = list_last_entry(&client_list, struct ib_client, list);
1666ea295481SLinus Torvalds 		client->client_id = last->client_id + 1;
1667ea295481SLinus Torvalds 	}
1668ea295481SLinus Torvalds 	ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
1669e59178d8SJason Gunthorpe 	if (ret)
1670e59178d8SJason Gunthorpe 		goto out;
1671e59178d8SJason Gunthorpe 
1672921eab11SJason Gunthorpe 	xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
1673921eab11SJason Gunthorpe 	list_add_tail(&client->list, &client_list);
1674921eab11SJason Gunthorpe 
1675e59178d8SJason Gunthorpe out:
1676921eab11SJason Gunthorpe 	up_write(&clients_rwsem);
1677e59178d8SJason Gunthorpe 	return ret;
1678e59178d8SJason Gunthorpe }
1679e59178d8SJason Gunthorpe 
16801da177e4SLinus Torvalds /**
16811da177e4SLinus Torvalds  * ib_register_client - Register an IB client
16821da177e4SLinus Torvalds  * @client:Client to register
16831da177e4SLinus Torvalds  *
16841da177e4SLinus Torvalds  * Upper level users of the IB drivers can use ib_register_client() to
16851da177e4SLinus Torvalds  * register callbacks for IB device addition and removal.  When an IB
16861da177e4SLinus Torvalds  * device is added, each registered client's add method will be called
16871da177e4SLinus Torvalds  * (in the order the clients were registered), and when a device is
16881da177e4SLinus Torvalds  * removed, each client's remove method will be called (in the reverse
16891da177e4SLinus Torvalds  * order that clients were registered).  In addition, when
16901da177e4SLinus Torvalds  * ib_register_client() is called, the client will receive an add
16911da177e4SLinus Torvalds  * callback for all devices already registered.
16921da177e4SLinus Torvalds  */
16931da177e4SLinus Torvalds int ib_register_client(struct ib_client *client)
16941da177e4SLinus Torvalds {
16951da177e4SLinus Torvalds 	struct ib_device *device;
16960df91bb6SJason Gunthorpe 	unsigned long index;
1697e59178d8SJason Gunthorpe 	int ret;
16981da177e4SLinus Torvalds 
1699e59178d8SJason Gunthorpe 	ret = assign_client_id(client);
1700921eab11SJason Gunthorpe 	if (ret)
1701921eab11SJason Gunthorpe 		return ret;
1702921eab11SJason Gunthorpe 
1703921eab11SJason Gunthorpe 	down_read(&devices_rwsem);
1704921eab11SJason Gunthorpe 	xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
1705921eab11SJason Gunthorpe 		ret = add_client_context(device, client);
1706e59178d8SJason Gunthorpe 		if (ret) {
1707921eab11SJason Gunthorpe 			up_read(&devices_rwsem);
1708921eab11SJason Gunthorpe 			ib_unregister_client(client);
1709e59178d8SJason Gunthorpe 			return ret;
1710e59178d8SJason Gunthorpe 		}
1711921eab11SJason Gunthorpe 	}
1712921eab11SJason Gunthorpe 	up_read(&devices_rwsem);
17131da177e4SLinus Torvalds 	return 0;
17141da177e4SLinus Torvalds }
17151da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_client);
17161da177e4SLinus Torvalds 
17171da177e4SLinus Torvalds /**
17181da177e4SLinus Torvalds  * ib_unregister_client - Unregister an IB client
17191da177e4SLinus Torvalds  * @client:Client to unregister
17201da177e4SLinus Torvalds  *
17211da177e4SLinus Torvalds  * Upper level users use ib_unregister_client() to remove their client
17221da177e4SLinus Torvalds  * registration.  When ib_unregister_client() is called, the client
17231da177e4SLinus Torvalds  * will receive a remove callback for each IB device still registered.
1724921eab11SJason Gunthorpe  *
1725921eab11SJason Gunthorpe  * This is a full fence, once it returns no client callbacks will be called,
1726921eab11SJason Gunthorpe  * or are running in another thread.
17271da177e4SLinus Torvalds  */
17281da177e4SLinus Torvalds void ib_unregister_client(struct ib_client *client)
17291da177e4SLinus Torvalds {
17301da177e4SLinus Torvalds 	struct ib_device *device;
17310df91bb6SJason Gunthorpe 	unsigned long index;
17321da177e4SLinus Torvalds 
1733921eab11SJason Gunthorpe 	down_write(&clients_rwsem);
1734e59178d8SJason Gunthorpe 	xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
1735921eab11SJason Gunthorpe 	up_write(&clients_rwsem);
1736921eab11SJason Gunthorpe 	/*
1737921eab11SJason Gunthorpe 	 * Every device still known must be serialized to make sure we are
1738921eab11SJason Gunthorpe 	 * done with the client callbacks before we return.
1739921eab11SJason Gunthorpe 	 */
1740921eab11SJason Gunthorpe 	down_read(&devices_rwsem);
1741921eab11SJason Gunthorpe 	xa_for_each (&devices, index, device)
1742921eab11SJason Gunthorpe 		remove_client_context(device, client->client_id);
1743921eab11SJason Gunthorpe 	up_read(&devices_rwsem);
17445aa44bb9SHaggai Eran 
1745921eab11SJason Gunthorpe 	down_write(&clients_rwsem);
1746e59178d8SJason Gunthorpe 	list_del(&client->list);
1747e59178d8SJason Gunthorpe 	xa_erase(&clients, client->client_id);
1748921eab11SJason Gunthorpe 	up_write(&clients_rwsem);
17491da177e4SLinus Torvalds }
17501da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_client);
17511da177e4SLinus Torvalds 
17521da177e4SLinus Torvalds /**
17539cd330d3SKrishna Kumar  * ib_set_client_data - Set IB client context
17541da177e4SLinus Torvalds  * @device:Device to set context for
17551da177e4SLinus Torvalds  * @client:Client to set context for
17561da177e4SLinus Torvalds  * @data:Context to set
17571da177e4SLinus Torvalds  *
17580df91bb6SJason Gunthorpe  * ib_set_client_data() sets client context data that can be retrieved with
17590df91bb6SJason Gunthorpe  * ib_get_client_data(). This can only be called while the client is
17600df91bb6SJason Gunthorpe  * registered to the device, once the ib_client remove() callback returns this
17610df91bb6SJason Gunthorpe  * cannot be called.
17621da177e4SLinus Torvalds  */
17631da177e4SLinus Torvalds void ib_set_client_data(struct ib_device *device, struct ib_client *client,
17641da177e4SLinus Torvalds 			void *data)
17651da177e4SLinus Torvalds {
17660df91bb6SJason Gunthorpe 	void *rc;
17671da177e4SLinus Torvalds 
17680df91bb6SJason Gunthorpe 	if (WARN_ON(IS_ERR(data)))
17690df91bb6SJason Gunthorpe 		data = NULL;
17701da177e4SLinus Torvalds 
17710df91bb6SJason Gunthorpe 	rc = xa_store(&device->client_data, client->client_id, data,
17720df91bb6SJason Gunthorpe 		      GFP_KERNEL);
17730df91bb6SJason Gunthorpe 	WARN_ON(xa_is_err(rc));
17741da177e4SLinus Torvalds }
17751da177e4SLinus Torvalds EXPORT_SYMBOL(ib_set_client_data);
17761da177e4SLinus Torvalds 
17771da177e4SLinus Torvalds /**
17781da177e4SLinus Torvalds  * ib_register_event_handler - Register an IB event handler
17791da177e4SLinus Torvalds  * @event_handler:Handler to register
17801da177e4SLinus Torvalds  *
17811da177e4SLinus Torvalds  * ib_register_event_handler() registers an event handler that will be
17821da177e4SLinus Torvalds  * called back when asynchronous IB events occur (as defined in
17831da177e4SLinus Torvalds  * chapter 11 of the InfiniBand Architecture Specification).  This
17841da177e4SLinus Torvalds  * callback may occur in interrupt context.
17851da177e4SLinus Torvalds  */
1786dcc9881eSLeon Romanovsky void ib_register_event_handler(struct ib_event_handler *event_handler)
17871da177e4SLinus Torvalds {
17881da177e4SLinus Torvalds 	unsigned long flags;
17891da177e4SLinus Torvalds 
17901da177e4SLinus Torvalds 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
17911da177e4SLinus Torvalds 	list_add_tail(&event_handler->list,
17921da177e4SLinus Torvalds 		      &event_handler->device->event_handler_list);
17931da177e4SLinus Torvalds 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
17941da177e4SLinus Torvalds }
17951da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_event_handler);
17961da177e4SLinus Torvalds 
17971da177e4SLinus Torvalds /**
17981da177e4SLinus Torvalds  * ib_unregister_event_handler - Unregister an event handler
17991da177e4SLinus Torvalds  * @event_handler:Handler to unregister
18001da177e4SLinus Torvalds  *
18011da177e4SLinus Torvalds  * Unregister an event handler registered with
18021da177e4SLinus Torvalds  * ib_register_event_handler().
18031da177e4SLinus Torvalds  */
1804dcc9881eSLeon Romanovsky void ib_unregister_event_handler(struct ib_event_handler *event_handler)
18051da177e4SLinus Torvalds {
18061da177e4SLinus Torvalds 	unsigned long flags;
18071da177e4SLinus Torvalds 
18081da177e4SLinus Torvalds 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
18091da177e4SLinus Torvalds 	list_del(&event_handler->list);
18101da177e4SLinus Torvalds 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
18111da177e4SLinus Torvalds }
18121da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_event_handler);
18131da177e4SLinus Torvalds 
18141da177e4SLinus Torvalds /**
18151da177e4SLinus Torvalds  * ib_dispatch_event - Dispatch an asynchronous event
18161da177e4SLinus Torvalds  * @event:Event to dispatch
18171da177e4SLinus Torvalds  *
18181da177e4SLinus Torvalds  * Low-level drivers must call ib_dispatch_event() to dispatch the
18191da177e4SLinus Torvalds  * event to all registered event handlers when an asynchronous event
18201da177e4SLinus Torvalds  * occurs.
18211da177e4SLinus Torvalds  */
18221da177e4SLinus Torvalds void ib_dispatch_event(struct ib_event *event)
18231da177e4SLinus Torvalds {
18241da177e4SLinus Torvalds 	unsigned long flags;
18251da177e4SLinus Torvalds 	struct ib_event_handler *handler;
18261da177e4SLinus Torvalds 
18271da177e4SLinus Torvalds 	spin_lock_irqsave(&event->device->event_handler_lock, flags);
18281da177e4SLinus Torvalds 
18291da177e4SLinus Torvalds 	list_for_each_entry(handler, &event->device->event_handler_list, list)
18301da177e4SLinus Torvalds 		handler->handler(handler, event);
18311da177e4SLinus Torvalds 
18321da177e4SLinus Torvalds 	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
18331da177e4SLinus Torvalds }
18341da177e4SLinus Torvalds EXPORT_SYMBOL(ib_dispatch_event);
18351da177e4SLinus Torvalds 
18361da177e4SLinus Torvalds /**
18371da177e4SLinus Torvalds  * ib_query_port - Query IB port attributes
18381da177e4SLinus Torvalds  * @device:Device to query
18391da177e4SLinus Torvalds  * @port_num:Port number to query
18401da177e4SLinus Torvalds  * @port_attr:Port attributes
18411da177e4SLinus Torvalds  *
18421da177e4SLinus Torvalds  * ib_query_port() returns the attributes of a port through the
18431da177e4SLinus Torvalds  * @port_attr pointer.
18441da177e4SLinus Torvalds  */
18451da177e4SLinus Torvalds int ib_query_port(struct ib_device *device,
18461da177e4SLinus Torvalds 		  u8 port_num,
18471da177e4SLinus Torvalds 		  struct ib_port_attr *port_attr)
18481da177e4SLinus Torvalds {
1849fad61ad4SEli Cohen 	union ib_gid gid;
1850fad61ad4SEli Cohen 	int err;
1851fad61ad4SEli Cohen 
185224dc831bSYuval Shaia 	if (!rdma_is_port_valid(device, port_num))
1853116c0074SRoland Dreier 		return -EINVAL;
1854116c0074SRoland Dreier 
1855fad61ad4SEli Cohen 	memset(port_attr, 0, sizeof(*port_attr));
18563023a1e9SKamal Heib 	err = device->ops.query_port(device, port_num, port_attr);
1857fad61ad4SEli Cohen 	if (err || port_attr->subnet_prefix)
1858fad61ad4SEli Cohen 		return err;
1859fad61ad4SEli Cohen 
1860d7012467SEli Cohen 	if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
1861d7012467SEli Cohen 		return 0;
1862d7012467SEli Cohen 
18633023a1e9SKamal Heib 	err = device->ops.query_gid(device, port_num, 0, &gid);
1864fad61ad4SEli Cohen 	if (err)
1865fad61ad4SEli Cohen 		return err;
1866fad61ad4SEli Cohen 
1867fad61ad4SEli Cohen 	port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
1868fad61ad4SEli Cohen 	return 0;
18691da177e4SLinus Torvalds }
18701da177e4SLinus Torvalds EXPORT_SYMBOL(ib_query_port);
18711da177e4SLinus Torvalds 
1872324e227eSJason Gunthorpe static void add_ndev_hash(struct ib_port_data *pdata)
1873324e227eSJason Gunthorpe {
1874324e227eSJason Gunthorpe 	unsigned long flags;
1875324e227eSJason Gunthorpe 
1876324e227eSJason Gunthorpe 	might_sleep();
1877324e227eSJason Gunthorpe 
1878324e227eSJason Gunthorpe 	spin_lock_irqsave(&ndev_hash_lock, flags);
1879324e227eSJason Gunthorpe 	if (hash_hashed(&pdata->ndev_hash_link)) {
1880324e227eSJason Gunthorpe 		hash_del_rcu(&pdata->ndev_hash_link);
1881324e227eSJason Gunthorpe 		spin_unlock_irqrestore(&ndev_hash_lock, flags);
1882324e227eSJason Gunthorpe 		/*
1883324e227eSJason Gunthorpe 		 * We cannot do hash_add_rcu after a hash_del_rcu until the
1884324e227eSJason Gunthorpe 		 * grace period
1885324e227eSJason Gunthorpe 		 */
1886324e227eSJason Gunthorpe 		synchronize_rcu();
1887324e227eSJason Gunthorpe 		spin_lock_irqsave(&ndev_hash_lock, flags);
1888324e227eSJason Gunthorpe 	}
1889324e227eSJason Gunthorpe 	if (pdata->netdev)
1890324e227eSJason Gunthorpe 		hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
1891324e227eSJason Gunthorpe 			     (uintptr_t)pdata->netdev);
1892324e227eSJason Gunthorpe 	spin_unlock_irqrestore(&ndev_hash_lock, flags);
1893324e227eSJason Gunthorpe }
1894324e227eSJason Gunthorpe 
18951da177e4SLinus Torvalds /**
1896c2261dd7SJason Gunthorpe  * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
1897c2261dd7SJason Gunthorpe  * @ib_dev: Device to modify
1898c2261dd7SJason Gunthorpe  * @ndev: net_device to affiliate, may be NULL
1899c2261dd7SJason Gunthorpe  * @port: IB port the net_device is connected to
1900c2261dd7SJason Gunthorpe  *
1901c2261dd7SJason Gunthorpe  * Drivers should use this to link the ib_device to a netdev so the netdev
1902c2261dd7SJason Gunthorpe  * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
1903c2261dd7SJason Gunthorpe  * affiliated with any port.
1904c2261dd7SJason Gunthorpe  *
1905c2261dd7SJason Gunthorpe  * The caller must ensure that the given ndev is not unregistered or
1906c2261dd7SJason Gunthorpe  * unregistering, and that either the ib_device is unregistered or
1907c2261dd7SJason Gunthorpe  * ib_device_set_netdev() is called with NULL when the ndev sends a
1908c2261dd7SJason Gunthorpe  * NETDEV_UNREGISTER event.
1909c2261dd7SJason Gunthorpe  */
1910c2261dd7SJason Gunthorpe int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
1911c2261dd7SJason Gunthorpe 			 unsigned int port)
1912c2261dd7SJason Gunthorpe {
1913c2261dd7SJason Gunthorpe 	struct net_device *old_ndev;
1914c2261dd7SJason Gunthorpe 	struct ib_port_data *pdata;
1915c2261dd7SJason Gunthorpe 	unsigned long flags;
1916c2261dd7SJason Gunthorpe 	int ret;
1917c2261dd7SJason Gunthorpe 
1918c2261dd7SJason Gunthorpe 	/*
1919c2261dd7SJason Gunthorpe 	 * Drivers wish to call this before ib_register_driver, so we have to
1920c2261dd7SJason Gunthorpe 	 * setup the port data early.
1921c2261dd7SJason Gunthorpe 	 */
1922c2261dd7SJason Gunthorpe 	ret = alloc_port_data(ib_dev);
1923c2261dd7SJason Gunthorpe 	if (ret)
1924c2261dd7SJason Gunthorpe 		return ret;
1925c2261dd7SJason Gunthorpe 
1926c2261dd7SJason Gunthorpe 	if (!rdma_is_port_valid(ib_dev, port))
1927c2261dd7SJason Gunthorpe 		return -EINVAL;
1928c2261dd7SJason Gunthorpe 
1929c2261dd7SJason Gunthorpe 	pdata = &ib_dev->port_data[port];
1930c2261dd7SJason Gunthorpe 	spin_lock_irqsave(&pdata->netdev_lock, flags);
1931324e227eSJason Gunthorpe 	old_ndev = rcu_dereference_protected(
1932324e227eSJason Gunthorpe 		pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
1933324e227eSJason Gunthorpe 	if (old_ndev == ndev) {
1934c2261dd7SJason Gunthorpe 		spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1935c2261dd7SJason Gunthorpe 		return 0;
1936c2261dd7SJason Gunthorpe 	}
1937c2261dd7SJason Gunthorpe 
1938c2261dd7SJason Gunthorpe 	if (ndev)
1939c2261dd7SJason Gunthorpe 		dev_hold(ndev);
1940324e227eSJason Gunthorpe 	rcu_assign_pointer(pdata->netdev, ndev);
1941c2261dd7SJason Gunthorpe 	spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1942c2261dd7SJason Gunthorpe 
1943324e227eSJason Gunthorpe 	add_ndev_hash(pdata);
1944c2261dd7SJason Gunthorpe 	if (old_ndev)
1945c2261dd7SJason Gunthorpe 		dev_put(old_ndev);
1946c2261dd7SJason Gunthorpe 
1947c2261dd7SJason Gunthorpe 	return 0;
1948c2261dd7SJason Gunthorpe }
1949c2261dd7SJason Gunthorpe EXPORT_SYMBOL(ib_device_set_netdev);
1950c2261dd7SJason Gunthorpe 
1951c2261dd7SJason Gunthorpe static void free_netdevs(struct ib_device *ib_dev)
1952c2261dd7SJason Gunthorpe {
1953c2261dd7SJason Gunthorpe 	unsigned long flags;
1954c2261dd7SJason Gunthorpe 	unsigned int port;
1955c2261dd7SJason Gunthorpe 
195646bdf370SKamal Heib 	if (!ib_dev->port_data)
195746bdf370SKamal Heib 		return;
195846bdf370SKamal Heib 
1959c2261dd7SJason Gunthorpe 	rdma_for_each_port (ib_dev, port) {
1960c2261dd7SJason Gunthorpe 		struct ib_port_data *pdata = &ib_dev->port_data[port];
1961324e227eSJason Gunthorpe 		struct net_device *ndev;
1962c2261dd7SJason Gunthorpe 
1963c2261dd7SJason Gunthorpe 		spin_lock_irqsave(&pdata->netdev_lock, flags);
1964324e227eSJason Gunthorpe 		ndev = rcu_dereference_protected(
1965324e227eSJason Gunthorpe 			pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
1966324e227eSJason Gunthorpe 		if (ndev) {
1967324e227eSJason Gunthorpe 			spin_lock(&ndev_hash_lock);
1968324e227eSJason Gunthorpe 			hash_del_rcu(&pdata->ndev_hash_link);
1969324e227eSJason Gunthorpe 			spin_unlock(&ndev_hash_lock);
1970324e227eSJason Gunthorpe 
1971324e227eSJason Gunthorpe 			/*
1972324e227eSJason Gunthorpe 			 * If this is the last dev_put there is still a
1973324e227eSJason Gunthorpe 			 * synchronize_rcu before the netdev is kfreed, so we
1974324e227eSJason Gunthorpe 			 * can continue to rely on unlocked pointer
1975324e227eSJason Gunthorpe 			 * comparisons after the put
1976324e227eSJason Gunthorpe 			 */
1977324e227eSJason Gunthorpe 			rcu_assign_pointer(pdata->netdev, NULL);
1978324e227eSJason Gunthorpe 			dev_put(ndev);
1979c2261dd7SJason Gunthorpe 		}
1980c2261dd7SJason Gunthorpe 		spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1981c2261dd7SJason Gunthorpe 	}
1982c2261dd7SJason Gunthorpe }
1983c2261dd7SJason Gunthorpe 
1984c2261dd7SJason Gunthorpe struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
1985c2261dd7SJason Gunthorpe 					unsigned int port)
1986c2261dd7SJason Gunthorpe {
1987c2261dd7SJason Gunthorpe 	struct ib_port_data *pdata;
1988c2261dd7SJason Gunthorpe 	struct net_device *res;
1989c2261dd7SJason Gunthorpe 
1990c2261dd7SJason Gunthorpe 	if (!rdma_is_port_valid(ib_dev, port))
1991c2261dd7SJason Gunthorpe 		return NULL;
1992c2261dd7SJason Gunthorpe 
1993c2261dd7SJason Gunthorpe 	pdata = &ib_dev->port_data[port];
1994c2261dd7SJason Gunthorpe 
1995c2261dd7SJason Gunthorpe 	/*
1996c2261dd7SJason Gunthorpe 	 * New drivers should use ib_device_set_netdev() not the legacy
1997c2261dd7SJason Gunthorpe 	 * get_netdev().
1998c2261dd7SJason Gunthorpe 	 */
1999c2261dd7SJason Gunthorpe 	if (ib_dev->ops.get_netdev)
2000c2261dd7SJason Gunthorpe 		res = ib_dev->ops.get_netdev(ib_dev, port);
2001c2261dd7SJason Gunthorpe 	else {
2002c2261dd7SJason Gunthorpe 		spin_lock(&pdata->netdev_lock);
2003324e227eSJason Gunthorpe 		res = rcu_dereference_protected(
2004324e227eSJason Gunthorpe 			pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2005c2261dd7SJason Gunthorpe 		if (res)
2006c2261dd7SJason Gunthorpe 			dev_hold(res);
2007c2261dd7SJason Gunthorpe 		spin_unlock(&pdata->netdev_lock);
2008c2261dd7SJason Gunthorpe 	}
2009c2261dd7SJason Gunthorpe 
2010c2261dd7SJason Gunthorpe 	/*
2011c2261dd7SJason Gunthorpe 	 * If we are starting to unregister expedite things by preventing
2012c2261dd7SJason Gunthorpe 	 * propagation of an unregistering netdev.
2013c2261dd7SJason Gunthorpe 	 */
2014c2261dd7SJason Gunthorpe 	if (res && res->reg_state != NETREG_REGISTERED) {
2015c2261dd7SJason Gunthorpe 		dev_put(res);
2016c2261dd7SJason Gunthorpe 		return NULL;
2017c2261dd7SJason Gunthorpe 	}
2018c2261dd7SJason Gunthorpe 
2019c2261dd7SJason Gunthorpe 	return res;
2020c2261dd7SJason Gunthorpe }
2021c2261dd7SJason Gunthorpe 
2022c2261dd7SJason Gunthorpe /**
2023324e227eSJason Gunthorpe  * ib_device_get_by_netdev - Find an IB device associated with a netdev
2024324e227eSJason Gunthorpe  * @ndev: netdev to locate
2025324e227eSJason Gunthorpe  * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
2026324e227eSJason Gunthorpe  *
2027324e227eSJason Gunthorpe  * Find and hold an ib_device that is associated with a netdev via
2028324e227eSJason Gunthorpe  * ib_device_set_netdev(). The caller must call ib_device_put() on the
2029324e227eSJason Gunthorpe  * returned pointer.
2030324e227eSJason Gunthorpe  */
2031324e227eSJason Gunthorpe struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
2032324e227eSJason Gunthorpe 					  enum rdma_driver_id driver_id)
2033324e227eSJason Gunthorpe {
2034324e227eSJason Gunthorpe 	struct ib_device *res = NULL;
2035324e227eSJason Gunthorpe 	struct ib_port_data *cur;
2036324e227eSJason Gunthorpe 
2037324e227eSJason Gunthorpe 	rcu_read_lock();
2038324e227eSJason Gunthorpe 	hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
2039324e227eSJason Gunthorpe 				    (uintptr_t)ndev) {
2040324e227eSJason Gunthorpe 		if (rcu_access_pointer(cur->netdev) == ndev &&
2041324e227eSJason Gunthorpe 		    (driver_id == RDMA_DRIVER_UNKNOWN ||
2042324e227eSJason Gunthorpe 		     cur->ib_dev->driver_id == driver_id) &&
2043324e227eSJason Gunthorpe 		    ib_device_try_get(cur->ib_dev)) {
2044324e227eSJason Gunthorpe 			res = cur->ib_dev;
2045324e227eSJason Gunthorpe 			break;
2046324e227eSJason Gunthorpe 		}
2047324e227eSJason Gunthorpe 	}
2048324e227eSJason Gunthorpe 	rcu_read_unlock();
2049324e227eSJason Gunthorpe 
2050324e227eSJason Gunthorpe 	return res;
2051324e227eSJason Gunthorpe }
2052324e227eSJason Gunthorpe EXPORT_SYMBOL(ib_device_get_by_netdev);
2053324e227eSJason Gunthorpe 
2054324e227eSJason Gunthorpe /**
205503db3a2dSMatan Barak  * ib_enum_roce_netdev - enumerate all RoCE ports
205603db3a2dSMatan Barak  * @ib_dev : IB device we want to query
205703db3a2dSMatan Barak  * @filter: Should we call the callback?
205803db3a2dSMatan Barak  * @filter_cookie: Cookie passed to filter
205903db3a2dSMatan Barak  * @cb: Callback to call for each found RoCE ports
206003db3a2dSMatan Barak  * @cookie: Cookie passed back to the callback
206103db3a2dSMatan Barak  *
206203db3a2dSMatan Barak  * Enumerates all of the physical RoCE ports of ib_dev
206303db3a2dSMatan Barak  * which are related to netdevice and calls callback() on each
206403db3a2dSMatan Barak  * device for which filter() function returns non zero.
206503db3a2dSMatan Barak  */
206603db3a2dSMatan Barak void ib_enum_roce_netdev(struct ib_device *ib_dev,
206703db3a2dSMatan Barak 			 roce_netdev_filter filter,
206803db3a2dSMatan Barak 			 void *filter_cookie,
206903db3a2dSMatan Barak 			 roce_netdev_callback cb,
207003db3a2dSMatan Barak 			 void *cookie)
207103db3a2dSMatan Barak {
2072ea1075edSJason Gunthorpe 	unsigned int port;
207303db3a2dSMatan Barak 
2074ea1075edSJason Gunthorpe 	rdma_for_each_port (ib_dev, port)
207503db3a2dSMatan Barak 		if (rdma_protocol_roce(ib_dev, port)) {
2076c2261dd7SJason Gunthorpe 			struct net_device *idev =
2077c2261dd7SJason Gunthorpe 				ib_device_get_netdev(ib_dev, port);
207803db3a2dSMatan Barak 
207903db3a2dSMatan Barak 			if (filter(ib_dev, port, idev, filter_cookie))
208003db3a2dSMatan Barak 				cb(ib_dev, port, idev, cookie);
208103db3a2dSMatan Barak 
208203db3a2dSMatan Barak 			if (idev)
208303db3a2dSMatan Barak 				dev_put(idev);
208403db3a2dSMatan Barak 		}
208503db3a2dSMatan Barak }
208603db3a2dSMatan Barak 
208703db3a2dSMatan Barak /**
208803db3a2dSMatan Barak  * ib_enum_all_roce_netdevs - enumerate all RoCE devices
208903db3a2dSMatan Barak  * @filter: Should we call the callback?
209003db3a2dSMatan Barak  * @filter_cookie: Cookie passed to filter
209103db3a2dSMatan Barak  * @cb: Callback to call for each found RoCE ports
209203db3a2dSMatan Barak  * @cookie: Cookie passed back to the callback
209303db3a2dSMatan Barak  *
209403db3a2dSMatan Barak  * Enumerates all RoCE devices' physical ports which are related
209503db3a2dSMatan Barak  * to netdevices and calls callback() on each device for which
209603db3a2dSMatan Barak  * filter() function returns non zero.
209703db3a2dSMatan Barak  */
209803db3a2dSMatan Barak void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
209903db3a2dSMatan Barak 			      void *filter_cookie,
210003db3a2dSMatan Barak 			      roce_netdev_callback cb,
210103db3a2dSMatan Barak 			      void *cookie)
210203db3a2dSMatan Barak {
210303db3a2dSMatan Barak 	struct ib_device *dev;
21040df91bb6SJason Gunthorpe 	unsigned long index;
210503db3a2dSMatan Barak 
2106921eab11SJason Gunthorpe 	down_read(&devices_rwsem);
21070df91bb6SJason Gunthorpe 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
210803db3a2dSMatan Barak 		ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
2109921eab11SJason Gunthorpe 	up_read(&devices_rwsem);
211003db3a2dSMatan Barak }
211103db3a2dSMatan Barak 
211203db3a2dSMatan Barak /**
21138030c835SLeon Romanovsky  * ib_enum_all_devs - enumerate all ib_devices
21148030c835SLeon Romanovsky  * @cb: Callback to call for each found ib_device
21158030c835SLeon Romanovsky  *
21168030c835SLeon Romanovsky  * Enumerates all ib_devices and calls callback() on each device.
21178030c835SLeon Romanovsky  */
21188030c835SLeon Romanovsky int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
21198030c835SLeon Romanovsky 		     struct netlink_callback *cb)
21208030c835SLeon Romanovsky {
21210df91bb6SJason Gunthorpe 	unsigned long index;
21228030c835SLeon Romanovsky 	struct ib_device *dev;
21238030c835SLeon Romanovsky 	unsigned int idx = 0;
21248030c835SLeon Romanovsky 	int ret = 0;
21258030c835SLeon Romanovsky 
2126921eab11SJason Gunthorpe 	down_read(&devices_rwsem);
21270df91bb6SJason Gunthorpe 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
212837eeab55SParav Pandit 		if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
212937eeab55SParav Pandit 			continue;
213037eeab55SParav Pandit 
21318030c835SLeon Romanovsky 		ret = nldev_cb(dev, skb, cb, idx);
21328030c835SLeon Romanovsky 		if (ret)
21338030c835SLeon Romanovsky 			break;
21348030c835SLeon Romanovsky 		idx++;
21358030c835SLeon Romanovsky 	}
2136921eab11SJason Gunthorpe 	up_read(&devices_rwsem);
21378030c835SLeon Romanovsky 	return ret;
21388030c835SLeon Romanovsky }
21398030c835SLeon Romanovsky 
21408030c835SLeon Romanovsky /**
21411da177e4SLinus Torvalds  * ib_query_pkey - Get P_Key table entry
21421da177e4SLinus Torvalds  * @device:Device to query
21431da177e4SLinus Torvalds  * @port_num:Port number to query
21441da177e4SLinus Torvalds  * @index:P_Key table index to query
21451da177e4SLinus Torvalds  * @pkey:Returned P_Key
21461da177e4SLinus Torvalds  *
21471da177e4SLinus Torvalds  * ib_query_pkey() fetches the specified P_Key table entry.
21481da177e4SLinus Torvalds  */
21491da177e4SLinus Torvalds int ib_query_pkey(struct ib_device *device,
21501da177e4SLinus Torvalds 		  u8 port_num, u16 index, u16 *pkey)
21511da177e4SLinus Torvalds {
21529af3f5cfSYuval Shaia 	if (!rdma_is_port_valid(device, port_num))
21539af3f5cfSYuval Shaia 		return -EINVAL;
21549af3f5cfSYuval Shaia 
21553023a1e9SKamal Heib 	return device->ops.query_pkey(device, port_num, index, pkey);
21561da177e4SLinus Torvalds }
21571da177e4SLinus Torvalds EXPORT_SYMBOL(ib_query_pkey);
21581da177e4SLinus Torvalds 
21591da177e4SLinus Torvalds /**
21601da177e4SLinus Torvalds  * ib_modify_device - Change IB device attributes
21611da177e4SLinus Torvalds  * @device:Device to modify
21621da177e4SLinus Torvalds  * @device_modify_mask:Mask of attributes to change
21631da177e4SLinus Torvalds  * @device_modify:New attribute values
21641da177e4SLinus Torvalds  *
21651da177e4SLinus Torvalds  * ib_modify_device() changes a device's attributes as specified by
21661da177e4SLinus Torvalds  * the @device_modify_mask and @device_modify structure.
21671da177e4SLinus Torvalds  */
21681da177e4SLinus Torvalds int ib_modify_device(struct ib_device *device,
21691da177e4SLinus Torvalds 		     int device_modify_mask,
21701da177e4SLinus Torvalds 		     struct ib_device_modify *device_modify)
21711da177e4SLinus Torvalds {
21723023a1e9SKamal Heib 	if (!device->ops.modify_device)
217310e1b54bSBart Van Assche 		return -ENOSYS;
217410e1b54bSBart Van Assche 
21753023a1e9SKamal Heib 	return device->ops.modify_device(device, device_modify_mask,
21761da177e4SLinus Torvalds 					 device_modify);
21771da177e4SLinus Torvalds }
21781da177e4SLinus Torvalds EXPORT_SYMBOL(ib_modify_device);
21791da177e4SLinus Torvalds 
21801da177e4SLinus Torvalds /**
21811da177e4SLinus Torvalds  * ib_modify_port - Modifies the attributes for the specified port.
21821da177e4SLinus Torvalds  * @device: The device to modify.
21831da177e4SLinus Torvalds  * @port_num: The number of the port to modify.
21841da177e4SLinus Torvalds  * @port_modify_mask: Mask used to specify which attributes of the port
21851da177e4SLinus Torvalds  *   to change.
21861da177e4SLinus Torvalds  * @port_modify: New attribute values for the port.
21871da177e4SLinus Torvalds  *
21881da177e4SLinus Torvalds  * ib_modify_port() changes a port's attributes as specified by the
21891da177e4SLinus Torvalds  * @port_modify_mask and @port_modify structure.
21901da177e4SLinus Torvalds  */
21911da177e4SLinus Torvalds int ib_modify_port(struct ib_device *device,
21921da177e4SLinus Torvalds 		   u8 port_num, int port_modify_mask,
21931da177e4SLinus Torvalds 		   struct ib_port_modify *port_modify)
21941da177e4SLinus Torvalds {
219561e0962dSSelvin Xavier 	int rc;
219610e1b54bSBart Van Assche 
219724dc831bSYuval Shaia 	if (!rdma_is_port_valid(device, port_num))
2198116c0074SRoland Dreier 		return -EINVAL;
2199116c0074SRoland Dreier 
22003023a1e9SKamal Heib 	if (device->ops.modify_port)
22013023a1e9SKamal Heib 		rc = device->ops.modify_port(device, port_num,
22023023a1e9SKamal Heib 					     port_modify_mask,
22031da177e4SLinus Torvalds 					     port_modify);
220461e0962dSSelvin Xavier 	else
220561e0962dSSelvin Xavier 		rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
220661e0962dSSelvin Xavier 	return rc;
22071da177e4SLinus Torvalds }
22081da177e4SLinus Torvalds EXPORT_SYMBOL(ib_modify_port);
22091da177e4SLinus Torvalds 
22105eb620c8SYosef Etigin /**
22115eb620c8SYosef Etigin  * ib_find_gid - Returns the port number and GID table index where
2212dbb12562SParav Pandit  *   a specified GID value occurs. Its searches only for IB link layer.
22135eb620c8SYosef Etigin  * @device: The device to query.
22145eb620c8SYosef Etigin  * @gid: The GID value to search for.
22155eb620c8SYosef Etigin  * @port_num: The port number of the device where the GID value was found.
22165eb620c8SYosef Etigin  * @index: The index into the GID table where the GID was found.  This
22175eb620c8SYosef Etigin  *   parameter may be NULL.
22185eb620c8SYosef Etigin  */
22195eb620c8SYosef Etigin int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2220b26c4a11SParav Pandit 		u8 *port_num, u16 *index)
22215eb620c8SYosef Etigin {
22225eb620c8SYosef Etigin 	union ib_gid tmp_gid;
2223ea1075edSJason Gunthorpe 	unsigned int port;
2224ea1075edSJason Gunthorpe 	int ret, i;
22255eb620c8SYosef Etigin 
2226ea1075edSJason Gunthorpe 	rdma_for_each_port (device, port) {
222722d24f75SParav Pandit 		if (!rdma_protocol_ib(device, port))
2228b39ffa1dSMatan Barak 			continue;
2229b39ffa1dSMatan Barak 
22308ceb1357SJason Gunthorpe 		for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
22318ceb1357SJason Gunthorpe 		     ++i) {
22321dfce294SParav Pandit 			ret = rdma_query_gid(device, port, i, &tmp_gid);
22335eb620c8SYosef Etigin 			if (ret)
22345eb620c8SYosef Etigin 				return ret;
22355eb620c8SYosef Etigin 			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
22365eb620c8SYosef Etigin 				*port_num = port;
22375eb620c8SYosef Etigin 				if (index)
22385eb620c8SYosef Etigin 					*index = i;
22395eb620c8SYosef Etigin 				return 0;
22405eb620c8SYosef Etigin 			}
22415eb620c8SYosef Etigin 		}
22425eb620c8SYosef Etigin 	}
22435eb620c8SYosef Etigin 
22445eb620c8SYosef Etigin 	return -ENOENT;
22455eb620c8SYosef Etigin }
22465eb620c8SYosef Etigin EXPORT_SYMBOL(ib_find_gid);
22475eb620c8SYosef Etigin 
22485eb620c8SYosef Etigin /**
22495eb620c8SYosef Etigin  * ib_find_pkey - Returns the PKey table index where a specified
22505eb620c8SYosef Etigin  *   PKey value occurs.
22515eb620c8SYosef Etigin  * @device: The device to query.
22525eb620c8SYosef Etigin  * @port_num: The port number of the device to search for the PKey.
22535eb620c8SYosef Etigin  * @pkey: The PKey value to search for.
22545eb620c8SYosef Etigin  * @index: The index into the PKey table where the PKey was found.
22555eb620c8SYosef Etigin  */
22565eb620c8SYosef Etigin int ib_find_pkey(struct ib_device *device,
22575eb620c8SYosef Etigin 		 u8 port_num, u16 pkey, u16 *index)
22585eb620c8SYosef Etigin {
22595eb620c8SYosef Etigin 	int ret, i;
22605eb620c8SYosef Etigin 	u16 tmp_pkey;
2261ff7166c4SJack Morgenstein 	int partial_ix = -1;
22625eb620c8SYosef Etigin 
22638ceb1357SJason Gunthorpe 	for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
22648ceb1357SJason Gunthorpe 	     ++i) {
22655eb620c8SYosef Etigin 		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
22665eb620c8SYosef Etigin 		if (ret)
22675eb620c8SYosef Etigin 			return ret;
226836026eccSMoni Shoua 		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
2269ff7166c4SJack Morgenstein 			/* if there is full-member pkey take it.*/
2270ff7166c4SJack Morgenstein 			if (tmp_pkey & 0x8000) {
22715eb620c8SYosef Etigin 				*index = i;
22725eb620c8SYosef Etigin 				return 0;
22735eb620c8SYosef Etigin 			}
2274ff7166c4SJack Morgenstein 			if (partial_ix < 0)
2275ff7166c4SJack Morgenstein 				partial_ix = i;
2276ff7166c4SJack Morgenstein 		}
22775eb620c8SYosef Etigin 	}
22785eb620c8SYosef Etigin 
2279ff7166c4SJack Morgenstein 	/*no full-member, if exists take the limited*/
2280ff7166c4SJack Morgenstein 	if (partial_ix >= 0) {
2281ff7166c4SJack Morgenstein 		*index = partial_ix;
2282ff7166c4SJack Morgenstein 		return 0;
2283ff7166c4SJack Morgenstein 	}
22845eb620c8SYosef Etigin 	return -ENOENT;
22855eb620c8SYosef Etigin }
22865eb620c8SYosef Etigin EXPORT_SYMBOL(ib_find_pkey);
22875eb620c8SYosef Etigin 
22889268f72dSYotam Kenneth /**
22899268f72dSYotam Kenneth  * ib_get_net_dev_by_params() - Return the appropriate net_dev
22909268f72dSYotam Kenneth  * for a received CM request
22919268f72dSYotam Kenneth  * @dev:	An RDMA device on which the request has been received.
22929268f72dSYotam Kenneth  * @port:	Port number on the RDMA device.
22939268f72dSYotam Kenneth  * @pkey:	The Pkey the request came on.
22949268f72dSYotam Kenneth  * @gid:	A GID that the net_dev uses to communicate.
22959268f72dSYotam Kenneth  * @addr:	Contains the IP address that the request specified as its
22969268f72dSYotam Kenneth  *		destination.
2297921eab11SJason Gunthorpe  *
22989268f72dSYotam Kenneth  */
22999268f72dSYotam Kenneth struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
23009268f72dSYotam Kenneth 					    u8 port,
23019268f72dSYotam Kenneth 					    u16 pkey,
23029268f72dSYotam Kenneth 					    const union ib_gid *gid,
23039268f72dSYotam Kenneth 					    const struct sockaddr *addr)
23049268f72dSYotam Kenneth {
23059268f72dSYotam Kenneth 	struct net_device *net_dev = NULL;
23060df91bb6SJason Gunthorpe 	unsigned long index;
23070df91bb6SJason Gunthorpe 	void *client_data;
23089268f72dSYotam Kenneth 
23099268f72dSYotam Kenneth 	if (!rdma_protocol_ib(dev, port))
23109268f72dSYotam Kenneth 		return NULL;
23119268f72dSYotam Kenneth 
2312921eab11SJason Gunthorpe 	/*
2313921eab11SJason Gunthorpe 	 * Holding the read side guarantees that the client will not become
2314921eab11SJason Gunthorpe 	 * unregistered while we are calling get_net_dev_by_params()
2315921eab11SJason Gunthorpe 	 */
2316921eab11SJason Gunthorpe 	down_read(&dev->client_data_rwsem);
23170df91bb6SJason Gunthorpe 	xan_for_each_marked (&dev->client_data, index, client_data,
23180df91bb6SJason Gunthorpe 			     CLIENT_DATA_REGISTERED) {
23190df91bb6SJason Gunthorpe 		struct ib_client *client = xa_load(&clients, index);
23209268f72dSYotam Kenneth 
23210df91bb6SJason Gunthorpe 		if (!client || !client->get_net_dev_by_params)
23229268f72dSYotam Kenneth 			continue;
23239268f72dSYotam Kenneth 
23240df91bb6SJason Gunthorpe 		net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
23250df91bb6SJason Gunthorpe 							addr, client_data);
23269268f72dSYotam Kenneth 		if (net_dev)
23279268f72dSYotam Kenneth 			break;
23289268f72dSYotam Kenneth 	}
2329921eab11SJason Gunthorpe 	up_read(&dev->client_data_rwsem);
23309268f72dSYotam Kenneth 
23319268f72dSYotam Kenneth 	return net_dev;
23329268f72dSYotam Kenneth }
23339268f72dSYotam Kenneth EXPORT_SYMBOL(ib_get_net_dev_by_params);
23349268f72dSYotam Kenneth 
2335521ed0d9SKamal Heib void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
2336521ed0d9SKamal Heib {
23373023a1e9SKamal Heib 	struct ib_device_ops *dev_ops = &dev->ops;
2338521ed0d9SKamal Heib #define SET_DEVICE_OP(ptr, name)                                               \
2339521ed0d9SKamal Heib 	do {                                                                   \
2340521ed0d9SKamal Heib 		if (ops->name)                                                 \
2341521ed0d9SKamal Heib 			if (!((ptr)->name))				       \
2342521ed0d9SKamal Heib 				(ptr)->name = ops->name;                       \
2343521ed0d9SKamal Heib 	} while (0)
2344521ed0d9SKamal Heib 
234530471d4bSLeon Romanovsky #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
234630471d4bSLeon Romanovsky 
23473023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, add_gid);
23482f1927b0SMoni Shoua 	SET_DEVICE_OP(dev_ops, advise_mr);
23493023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_dm);
23503023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_fmr);
23513023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_hw_stats);
23523023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_mr);
23533023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_mw);
23543023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_pd);
23553023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
23563023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_ucontext);
23573023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, alloc_xrcd);
23583023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, attach_mcast);
23593023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, check_mr_status);
23603023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_ah);
23613023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_counters);
23623023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_cq);
23633023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_flow);
23643023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_flow_action_esp);
23653023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_qp);
23663023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
23673023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_srq);
23683023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, create_wq);
23693023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, dealloc_dm);
2370d0899892SJason Gunthorpe 	SET_DEVICE_OP(dev_ops, dealloc_driver);
23713023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, dealloc_fmr);
23723023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, dealloc_mw);
23733023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, dealloc_pd);
23743023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, dealloc_ucontext);
23753023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, dealloc_xrcd);
23763023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, del_gid);
23773023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, dereg_mr);
23783023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_ah);
23793023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_counters);
23803023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_cq);
23813023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_flow);
23823023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_flow_action);
23833023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_qp);
23843023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
23853023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_srq);
23863023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, destroy_wq);
23873023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, detach_mcast);
23883023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, disassociate_ucontext);
23893023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, drain_rq);
23903023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, drain_sq);
2391ca22354bSJason Gunthorpe 	SET_DEVICE_OP(dev_ops, enable_driver);
239202da3750SLeon Romanovsky 	SET_DEVICE_OP(dev_ops, fill_res_entry);
23933023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_dev_fw_str);
23943023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_dma_mr);
23953023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_hw_stats);
23963023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_link_layer);
23973023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_netdev);
23983023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_port_immutable);
23993023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_vector_affinity);
24003023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_vf_config);
24013023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, get_vf_stats);
2402ea4baf7fSParav Pandit 	SET_DEVICE_OP(dev_ops, init_port);
2403dd05cb82SKamal Heib 	SET_DEVICE_OP(dev_ops, iw_accept);
2404dd05cb82SKamal Heib 	SET_DEVICE_OP(dev_ops, iw_add_ref);
2405dd05cb82SKamal Heib 	SET_DEVICE_OP(dev_ops, iw_connect);
2406dd05cb82SKamal Heib 	SET_DEVICE_OP(dev_ops, iw_create_listen);
2407dd05cb82SKamal Heib 	SET_DEVICE_OP(dev_ops, iw_destroy_listen);
2408dd05cb82SKamal Heib 	SET_DEVICE_OP(dev_ops, iw_get_qp);
2409dd05cb82SKamal Heib 	SET_DEVICE_OP(dev_ops, iw_reject);
2410dd05cb82SKamal Heib 	SET_DEVICE_OP(dev_ops, iw_rem_ref);
24113023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, map_mr_sg);
24123023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, map_phys_fmr);
24133023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, mmap);
24143023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, modify_ah);
24153023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, modify_cq);
24163023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, modify_device);
24173023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
24183023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, modify_port);
24193023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, modify_qp);
24203023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, modify_srq);
24213023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, modify_wq);
24223023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, peek_cq);
24233023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, poll_cq);
24243023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, post_recv);
24253023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, post_send);
24263023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, post_srq_recv);
24273023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, process_mad);
24283023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, query_ah);
24293023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, query_device);
24303023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, query_gid);
24313023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, query_pkey);
24323023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, query_port);
24333023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, query_qp);
24343023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, query_srq);
24353023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
24363023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, read_counters);
24373023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, reg_dm_mr);
24383023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, reg_user_mr);
24393023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, req_ncomp_notif);
24403023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, req_notify_cq);
24413023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, rereg_user_mr);
24423023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, resize_cq);
24433023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, set_vf_guid);
24443023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, set_vf_link_state);
24453023a1e9SKamal Heib 	SET_DEVICE_OP(dev_ops, unmap_fmr);
244621a428a0SLeon Romanovsky 
2447d3456914SLeon Romanovsky 	SET_OBJ_SIZE(dev_ops, ib_ah);
244821a428a0SLeon Romanovsky 	SET_OBJ_SIZE(dev_ops, ib_pd);
244968e326deSLeon Romanovsky 	SET_OBJ_SIZE(dev_ops, ib_srq);
2450a2a074efSLeon Romanovsky 	SET_OBJ_SIZE(dev_ops, ib_ucontext);
2451521ed0d9SKamal Heib }
2452521ed0d9SKamal Heib EXPORT_SYMBOL(ib_set_device_ops);
2453521ed0d9SKamal Heib 
2454d0e312feSLeon Romanovsky static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
2455735c631aSMark Bloch 	[RDMA_NL_LS_OP_RESOLVE] = {
2456647c75acSLeon Romanovsky 		.doit = ib_nl_handle_resolve_resp,
2457e3a2b93dSLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
2458e3a2b93dSLeon Romanovsky 	},
2459735c631aSMark Bloch 	[RDMA_NL_LS_OP_SET_TIMEOUT] = {
2460647c75acSLeon Romanovsky 		.doit = ib_nl_handle_set_timeout,
2461e3a2b93dSLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
2462e3a2b93dSLeon Romanovsky 	},
2463ae43f828SMark Bloch 	[RDMA_NL_LS_OP_IP_RESOLVE] = {
2464647c75acSLeon Romanovsky 		.doit = ib_nl_handle_ip_res_resp,
2465e3a2b93dSLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
2466e3a2b93dSLeon Romanovsky 	},
2467735c631aSMark Bloch };
2468735c631aSMark Bloch 
24691da177e4SLinus Torvalds static int __init ib_core_init(void)
24701da177e4SLinus Torvalds {
24711da177e4SLinus Torvalds 	int ret;
24721da177e4SLinus Torvalds 
2473f0626710STejun Heo 	ib_wq = alloc_workqueue("infiniband", 0, 0);
2474f0626710STejun Heo 	if (!ib_wq)
2475f0626710STejun Heo 		return -ENOMEM;
2476f0626710STejun Heo 
247714d3a3b2SChristoph Hellwig 	ib_comp_wq = alloc_workqueue("ib-comp-wq",
2478b7363e67SSagi Grimberg 			WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
247914d3a3b2SChristoph Hellwig 	if (!ib_comp_wq) {
248014d3a3b2SChristoph Hellwig 		ret = -ENOMEM;
248114d3a3b2SChristoph Hellwig 		goto err;
248214d3a3b2SChristoph Hellwig 	}
248314d3a3b2SChristoph Hellwig 
2484f794809aSJack Morgenstein 	ib_comp_unbound_wq =
2485f794809aSJack Morgenstein 		alloc_workqueue("ib-comp-unb-wq",
2486f794809aSJack Morgenstein 				WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
2487f794809aSJack Morgenstein 				WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
2488f794809aSJack Morgenstein 	if (!ib_comp_unbound_wq) {
2489f794809aSJack Morgenstein 		ret = -ENOMEM;
2490f794809aSJack Morgenstein 		goto err_comp;
2491f794809aSJack Morgenstein 	}
2492f794809aSJack Morgenstein 
249355aeed06SJason Gunthorpe 	ret = class_register(&ib_class);
2494fd75c789SNir Muchtar 	if (ret) {
2495aba25a3eSParav Pandit 		pr_warn("Couldn't create InfiniBand device class\n");
2496f794809aSJack Morgenstein 		goto err_comp_unbound;
2497fd75c789SNir Muchtar 	}
24981da177e4SLinus Torvalds 
2499c9901724SLeon Romanovsky 	ret = rdma_nl_init();
25001da177e4SLinus Torvalds 	if (ret) {
2501c9901724SLeon Romanovsky 		pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
2502fd75c789SNir Muchtar 		goto err_sysfs;
25031da177e4SLinus Torvalds 	}
25041da177e4SLinus Torvalds 
2505e3f20f02SLeon Romanovsky 	ret = addr_init();
2506e3f20f02SLeon Romanovsky 	if (ret) {
2507e3f20f02SLeon Romanovsky 		pr_warn("Could't init IB address resolution\n");
2508e3f20f02SLeon Romanovsky 		goto err_ibnl;
2509e3f20f02SLeon Romanovsky 	}
2510e3f20f02SLeon Romanovsky 
25114c2cb422SMark Bloch 	ret = ib_mad_init();
25124c2cb422SMark Bloch 	if (ret) {
25134c2cb422SMark Bloch 		pr_warn("Couldn't init IB MAD\n");
25144c2cb422SMark Bloch 		goto err_addr;
25154c2cb422SMark Bloch 	}
25164c2cb422SMark Bloch 
2517c2e49c92SMark Bloch 	ret = ib_sa_init();
2518c2e49c92SMark Bloch 	if (ret) {
2519c2e49c92SMark Bloch 		pr_warn("Couldn't init SA\n");
2520c2e49c92SMark Bloch 		goto err_mad;
2521c2e49c92SMark Bloch 	}
2522c2e49c92SMark Bloch 
25238f408ab6SDaniel Jurgens 	ret = register_lsm_notifier(&ibdev_lsm_nb);
25248f408ab6SDaniel Jurgens 	if (ret) {
25258f408ab6SDaniel Jurgens 		pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
2526c9901724SLeon Romanovsky 		goto err_sa;
25278f408ab6SDaniel Jurgens 	}
25288f408ab6SDaniel Jurgens 
25294e0f7b90SParav Pandit 	ret = register_pernet_device(&rdma_dev_net_ops);
25304e0f7b90SParav Pandit 	if (ret) {
25314e0f7b90SParav Pandit 		pr_warn("Couldn't init compat dev. ret %d\n", ret);
25324e0f7b90SParav Pandit 		goto err_compat;
25334e0f7b90SParav Pandit 	}
25344e0f7b90SParav Pandit 
25356c80b41aSLeon Romanovsky 	nldev_init();
2536c9901724SLeon Romanovsky 	rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
25375ef8c0c1SJason Gunthorpe 	roce_gid_mgmt_init();
2538b2cbae2cSRoland Dreier 
2539fd75c789SNir Muchtar 	return 0;
2540fd75c789SNir Muchtar 
25414e0f7b90SParav Pandit err_compat:
25424e0f7b90SParav Pandit 	unregister_lsm_notifier(&ibdev_lsm_nb);
2543735c631aSMark Bloch err_sa:
2544735c631aSMark Bloch 	ib_sa_cleanup();
2545c2e49c92SMark Bloch err_mad:
2546c2e49c92SMark Bloch 	ib_mad_cleanup();
25474c2cb422SMark Bloch err_addr:
25484c2cb422SMark Bloch 	addr_cleanup();
2549e3f20f02SLeon Romanovsky err_ibnl:
2550c9901724SLeon Romanovsky 	rdma_nl_exit();
2551fd75c789SNir Muchtar err_sysfs:
255255aeed06SJason Gunthorpe 	class_unregister(&ib_class);
2553f794809aSJack Morgenstein err_comp_unbound:
2554f794809aSJack Morgenstein 	destroy_workqueue(ib_comp_unbound_wq);
255514d3a3b2SChristoph Hellwig err_comp:
255614d3a3b2SChristoph Hellwig 	destroy_workqueue(ib_comp_wq);
2557fd75c789SNir Muchtar err:
2558fd75c789SNir Muchtar 	destroy_workqueue(ib_wq);
25591da177e4SLinus Torvalds 	return ret;
25601da177e4SLinus Torvalds }
25611da177e4SLinus Torvalds 
25621da177e4SLinus Torvalds static void __exit ib_core_cleanup(void)
25631da177e4SLinus Torvalds {
25645ef8c0c1SJason Gunthorpe 	roce_gid_mgmt_cleanup();
25656c80b41aSLeon Romanovsky 	nldev_exit();
2566c9901724SLeon Romanovsky 	rdma_nl_unregister(RDMA_NL_LS);
25674e0f7b90SParav Pandit 	unregister_pernet_device(&rdma_dev_net_ops);
2568c9901724SLeon Romanovsky 	unregister_lsm_notifier(&ibdev_lsm_nb);
2569c2e49c92SMark Bloch 	ib_sa_cleanup();
25704c2cb422SMark Bloch 	ib_mad_cleanup();
2571e3f20f02SLeon Romanovsky 	addr_cleanup();
2572c9901724SLeon Romanovsky 	rdma_nl_exit();
257355aeed06SJason Gunthorpe 	class_unregister(&ib_class);
2574f794809aSJack Morgenstein 	destroy_workqueue(ib_comp_unbound_wq);
257514d3a3b2SChristoph Hellwig 	destroy_workqueue(ib_comp_wq);
2576f7c6a7b5SRoland Dreier 	/* Make sure that any pending umem accounting work is done. */
2577f0626710STejun Heo 	destroy_workqueue(ib_wq);
2578d0899892SJason Gunthorpe 	flush_workqueue(system_unbound_wq);
2579e59178d8SJason Gunthorpe 	WARN_ON(!xa_empty(&clients));
25800df91bb6SJason Gunthorpe 	WARN_ON(!xa_empty(&devices));
25811da177e4SLinus Torvalds }
25821da177e4SLinus Torvalds 
2583e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
2584e3bf14bdSJason Gunthorpe 
258562dfa795SParav Pandit /* ib core relies on netdev stack to first register net_ns_type_operations
258662dfa795SParav Pandit  * ns kobject type before ib_core initialization.
258762dfa795SParav Pandit  */
258862dfa795SParav Pandit fs_initcall(ib_core_init);
25891da177e4SLinus Torvalds module_exit(ib_core_cleanup);
2590