11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (c) 2004 Topspin Communications. All rights reserved. 32a1d9b7fSRoland Dreier * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This software is available to you under a choice of one of two 61da177e4SLinus Torvalds * licenses. You may choose to be licensed under the terms of the GNU 71da177e4SLinus Torvalds * General Public License (GPL) Version 2, available from the file 81da177e4SLinus Torvalds * COPYING in the main directory of this source tree, or the 91da177e4SLinus Torvalds * OpenIB.org BSD license below: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Redistribution and use in source and binary forms, with or 121da177e4SLinus Torvalds * without modification, are permitted provided that the following 131da177e4SLinus Torvalds * conditions are met: 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * - Redistributions of source code must retain the above 161da177e4SLinus Torvalds * copyright notice, this list of conditions and the following 171da177e4SLinus Torvalds * disclaimer. 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * - Redistributions in binary form must reproduce the above 201da177e4SLinus Torvalds * copyright notice, this list of conditions and the following 211da177e4SLinus Torvalds * disclaimer in the documentation and/or other materials 221da177e4SLinus Torvalds * provided with the distribution. 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 251da177e4SLinus Torvalds * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 261da177e4SLinus Torvalds * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 271da177e4SLinus Torvalds * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 281da177e4SLinus Torvalds * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 291da177e4SLinus Torvalds * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 301da177e4SLinus Torvalds * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 311da177e4SLinus Torvalds * SOFTWARE. 321da177e4SLinus Torvalds */ 331da177e4SLinus Torvalds 341da177e4SLinus Torvalds #include <linux/module.h> 351da177e4SLinus Torvalds #include <linux/string.h> 361da177e4SLinus Torvalds #include <linux/errno.h> 379a6b090cSAhmed S. Darwish #include <linux/kernel.h> 381da177e4SLinus Torvalds #include <linux/slab.h> 391da177e4SLinus Torvalds #include <linux/init.h> 409268f72dSYotam Kenneth #include <linux/netdevice.h> 414e0f7b90SParav Pandit #include <net/net_namespace.h> 424e0f7b90SParav Pandit #include <net/netns/generic.h> 438f408ab6SDaniel Jurgens #include <linux/security.h> 448f408ab6SDaniel Jurgens #include <linux/notifier.h> 45324e227eSJason Gunthorpe #include <linux/hashtable.h> 46b2cbae2cSRoland Dreier #include <rdma/rdma_netlink.h> 4703db3a2dSMatan Barak #include <rdma/ib_addr.h> 4803db3a2dSMatan Barak #include <rdma/ib_cache.h> 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds #include "core_priv.h" 5141eda65cSLeon Romanovsky #include "restrack.h" 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds MODULE_AUTHOR("Roland Dreier"); 541da177e4SLinus Torvalds MODULE_DESCRIPTION("core kernel InfiniBand API"); 551da177e4SLinus Torvalds MODULE_LICENSE("Dual BSD/GPL"); 561da177e4SLinus Torvalds 5714d3a3b2SChristoph Hellwig struct workqueue_struct *ib_comp_wq; 58f794809aSJack Morgenstein struct workqueue_struct *ib_comp_unbound_wq; 59f0626710STejun Heo struct workqueue_struct *ib_wq; 60f0626710STejun Heo EXPORT_SYMBOL_GPL(ib_wq); 61f0626710STejun Heo 620df91bb6SJason Gunthorpe /* 63921eab11SJason Gunthorpe * Each of the three rwsem locks (devices, clients, client_data) protects the 64921eab11SJason Gunthorpe * xarray of the same name. Specifically it allows the caller to assert that 65921eab11SJason Gunthorpe * the MARK will/will not be changing under the lock, and for devices and 66921eab11SJason Gunthorpe * clients, that the value in the xarray is still a valid pointer. Change of 67921eab11SJason Gunthorpe * the MARK is linked to the object state, so holding the lock and testing the 68921eab11SJason Gunthorpe * MARK also asserts that the contained object is in a certain state. 69921eab11SJason Gunthorpe * 70921eab11SJason Gunthorpe * This is used to build a two stage register/unregister flow where objects 71921eab11SJason Gunthorpe * can continue to be in the xarray even though they are still in progress to 72921eab11SJason Gunthorpe * register/unregister. 73921eab11SJason Gunthorpe * 74921eab11SJason Gunthorpe * The xarray itself provides additional locking, and restartable iteration, 75921eab11SJason Gunthorpe * which is also relied on. 76921eab11SJason Gunthorpe * 77921eab11SJason Gunthorpe * Locks should not be nested, with the exception of client_data, which is 78921eab11SJason Gunthorpe * allowed to nest under the read side of the other two locks. 79921eab11SJason Gunthorpe * 80921eab11SJason Gunthorpe * The devices_rwsem also protects the device name list, any change or 81921eab11SJason Gunthorpe * assignment of device name must also hold the write side to guarantee unique 82921eab11SJason Gunthorpe * names. 83921eab11SJason Gunthorpe */ 84921eab11SJason Gunthorpe 85921eab11SJason Gunthorpe /* 860df91bb6SJason Gunthorpe * devices contains devices that have had their names assigned. The 870df91bb6SJason Gunthorpe * devices may not be registered. Users that care about the registration 880df91bb6SJason Gunthorpe * status need to call ib_device_try_get() on the device to ensure it is 890df91bb6SJason Gunthorpe * registered, and keep it registered, for the required duration. 900df91bb6SJason Gunthorpe * 910df91bb6SJason Gunthorpe */ 920df91bb6SJason Gunthorpe static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); 93921eab11SJason Gunthorpe static DECLARE_RWSEM(devices_rwsem); 940df91bb6SJason Gunthorpe #define DEVICE_REGISTERED XA_MARK_1 950df91bb6SJason Gunthorpe 961da177e4SLinus Torvalds static LIST_HEAD(client_list); 97e59178d8SJason Gunthorpe #define CLIENT_REGISTERED XA_MARK_1 98e59178d8SJason Gunthorpe static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); 99921eab11SJason Gunthorpe static DECLARE_RWSEM(clients_rwsem); 1001da177e4SLinus Torvalds 1011da177e4SLinus Torvalds /* 1020df91bb6SJason Gunthorpe * If client_data is registered then the corresponding client must also still 1030df91bb6SJason Gunthorpe * be registered. 1040df91bb6SJason Gunthorpe */ 1050df91bb6SJason Gunthorpe #define CLIENT_DATA_REGISTERED XA_MARK_1 1064e0f7b90SParav Pandit 1074e0f7b90SParav Pandit /** 1084e0f7b90SParav Pandit * struct rdma_dev_net - rdma net namespace metadata for a net 1094e0f7b90SParav Pandit * @net: Pointer to owner net namespace 1104e0f7b90SParav Pandit * @id: xarray id to identify the net namespace. 1114e0f7b90SParav Pandit */ 1124e0f7b90SParav Pandit struct rdma_dev_net { 1134e0f7b90SParav Pandit possible_net_t net; 1144e0f7b90SParav Pandit u32 id; 1154e0f7b90SParav Pandit }; 1164e0f7b90SParav Pandit 1174e0f7b90SParav Pandit static unsigned int rdma_dev_net_id; 1184e0f7b90SParav Pandit 1194e0f7b90SParav Pandit /* 1204e0f7b90SParav Pandit * A list of net namespaces is maintained in an xarray. This is necessary 1214e0f7b90SParav Pandit * because we can't get the locking right using the existing net ns list. We 1224e0f7b90SParav Pandit * would require a init_net callback after the list is updated. 1234e0f7b90SParav Pandit */ 1244e0f7b90SParav Pandit static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC); 1254e0f7b90SParav Pandit /* 1264e0f7b90SParav Pandit * rwsem to protect accessing the rdma_nets xarray entries. 1274e0f7b90SParav Pandit */ 1284e0f7b90SParav Pandit static DECLARE_RWSEM(rdma_nets_rwsem); 1294e0f7b90SParav Pandit 130a56bc45bSParav Pandit static bool ib_devices_shared_netns = true; 131a56bc45bSParav Pandit module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444); 132a56bc45bSParav Pandit MODULE_PARM_DESC(netns_mode, 133a56bc45bSParav Pandit "Share device among net namespaces; default=1 (shared)"); 1340df91bb6SJason Gunthorpe /* 1350df91bb6SJason Gunthorpe * xarray has this behavior where it won't iterate over NULL values stored in 1360df91bb6SJason Gunthorpe * allocated arrays. So we need our own iterator to see all values stored in 1370df91bb6SJason Gunthorpe * the array. This does the same thing as xa_for_each except that it also 1380df91bb6SJason Gunthorpe * returns NULL valued entries if the array is allocating. Simplified to only 1390df91bb6SJason Gunthorpe * work on simple xarrays. 1400df91bb6SJason Gunthorpe */ 1410df91bb6SJason Gunthorpe static void *xan_find_marked(struct xarray *xa, unsigned long *indexp, 1420df91bb6SJason Gunthorpe xa_mark_t filter) 1430df91bb6SJason Gunthorpe { 1440df91bb6SJason Gunthorpe XA_STATE(xas, xa, *indexp); 1450df91bb6SJason Gunthorpe void *entry; 1460df91bb6SJason Gunthorpe 1470df91bb6SJason Gunthorpe rcu_read_lock(); 1480df91bb6SJason Gunthorpe do { 1490df91bb6SJason Gunthorpe entry = xas_find_marked(&xas, ULONG_MAX, filter); 1500df91bb6SJason Gunthorpe if (xa_is_zero(entry)) 1510df91bb6SJason Gunthorpe break; 1520df91bb6SJason Gunthorpe } while (xas_retry(&xas, entry)); 1530df91bb6SJason Gunthorpe rcu_read_unlock(); 1540df91bb6SJason Gunthorpe 1550df91bb6SJason Gunthorpe if (entry) { 1560df91bb6SJason Gunthorpe *indexp = xas.xa_index; 1570df91bb6SJason Gunthorpe if (xa_is_zero(entry)) 1580df91bb6SJason Gunthorpe return NULL; 1590df91bb6SJason Gunthorpe return entry; 1600df91bb6SJason Gunthorpe } 1610df91bb6SJason Gunthorpe return XA_ERROR(-ENOENT); 1620df91bb6SJason Gunthorpe } 1630df91bb6SJason Gunthorpe #define xan_for_each_marked(xa, index, entry, filter) \ 1640df91bb6SJason Gunthorpe for (index = 0, entry = xan_find_marked(xa, &(index), filter); \ 1650df91bb6SJason Gunthorpe !xa_is_err(entry); \ 1660df91bb6SJason Gunthorpe (index)++, entry = xan_find_marked(xa, &(index), filter)) 1670df91bb6SJason Gunthorpe 168324e227eSJason Gunthorpe /* RCU hash table mapping netdevice pointers to struct ib_port_data */ 169324e227eSJason Gunthorpe static DEFINE_SPINLOCK(ndev_hash_lock); 170324e227eSJason Gunthorpe static DECLARE_HASHTABLE(ndev_hash, 5); 171324e227eSJason Gunthorpe 172c2261dd7SJason Gunthorpe static void free_netdevs(struct ib_device *ib_dev); 173d0899892SJason Gunthorpe static void ib_unregister_work(struct work_struct *work); 174d0899892SJason Gunthorpe static void __ib_unregister_device(struct ib_device *device); 1758f408ab6SDaniel Jurgens static int ib_security_change(struct notifier_block *nb, unsigned long event, 1768f408ab6SDaniel Jurgens void *lsm_data); 1778f408ab6SDaniel Jurgens static void ib_policy_change_task(struct work_struct *work); 1788f408ab6SDaniel Jurgens static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); 1798f408ab6SDaniel Jurgens 1808f408ab6SDaniel Jurgens static struct notifier_block ibdev_lsm_nb = { 1818f408ab6SDaniel Jurgens .notifier_call = ib_security_change, 1828f408ab6SDaniel Jurgens }; 1831da177e4SLinus Torvalds 184324e227eSJason Gunthorpe /* Pointer to the RCU head at the start of the ib_port_data array */ 185324e227eSJason Gunthorpe struct ib_port_data_rcu { 186324e227eSJason Gunthorpe struct rcu_head rcu_head; 187324e227eSJason Gunthorpe struct ib_port_data pdata[]; 188324e227eSJason Gunthorpe }; 189324e227eSJason Gunthorpe 1901da177e4SLinus Torvalds static int ib_device_check_mandatory(struct ib_device *device) 1911da177e4SLinus Torvalds { 1923023a1e9SKamal Heib #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x } 1931da177e4SLinus Torvalds static const struct { 1941da177e4SLinus Torvalds size_t offset; 1951da177e4SLinus Torvalds char *name; 1961da177e4SLinus Torvalds } mandatory_table[] = { 1971da177e4SLinus Torvalds IB_MANDATORY_FUNC(query_device), 1981da177e4SLinus Torvalds IB_MANDATORY_FUNC(query_port), 1991da177e4SLinus Torvalds IB_MANDATORY_FUNC(query_pkey), 2001da177e4SLinus Torvalds IB_MANDATORY_FUNC(alloc_pd), 2011da177e4SLinus Torvalds IB_MANDATORY_FUNC(dealloc_pd), 2021da177e4SLinus Torvalds IB_MANDATORY_FUNC(create_qp), 2031da177e4SLinus Torvalds IB_MANDATORY_FUNC(modify_qp), 2041da177e4SLinus Torvalds IB_MANDATORY_FUNC(destroy_qp), 2051da177e4SLinus Torvalds IB_MANDATORY_FUNC(post_send), 2061da177e4SLinus Torvalds IB_MANDATORY_FUNC(post_recv), 2071da177e4SLinus Torvalds IB_MANDATORY_FUNC(create_cq), 2081da177e4SLinus Torvalds IB_MANDATORY_FUNC(destroy_cq), 2091da177e4SLinus Torvalds IB_MANDATORY_FUNC(poll_cq), 2101da177e4SLinus Torvalds IB_MANDATORY_FUNC(req_notify_cq), 2111da177e4SLinus Torvalds IB_MANDATORY_FUNC(get_dma_mr), 2127738613eSIra Weiny IB_MANDATORY_FUNC(dereg_mr), 2137738613eSIra Weiny IB_MANDATORY_FUNC(get_port_immutable) 2141da177e4SLinus Torvalds }; 2151da177e4SLinus Torvalds int i; 2161da177e4SLinus Torvalds 2176780c4faSGal Pressman device->kverbs_provider = true; 2189a6b090cSAhmed S. Darwish for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 2193023a1e9SKamal Heib if (!*(void **) ((void *) &device->ops + 2203023a1e9SKamal Heib mandatory_table[i].offset)) { 2216780c4faSGal Pressman device->kverbs_provider = false; 2226780c4faSGal Pressman break; 2231da177e4SLinus Torvalds } 2241da177e4SLinus Torvalds } 2251da177e4SLinus Torvalds 2261da177e4SLinus Torvalds return 0; 2271da177e4SLinus Torvalds } 2281da177e4SLinus Torvalds 229f8978bd9SLeon Romanovsky /* 23001b67117SParav Pandit * Caller must perform ib_device_put() to return the device reference count 23101b67117SParav Pandit * when ib_device_get_by_index() returns valid device pointer. 232f8978bd9SLeon Romanovsky */ 233f8978bd9SLeon Romanovsky struct ib_device *ib_device_get_by_index(u32 index) 234f8978bd9SLeon Romanovsky { 235f8978bd9SLeon Romanovsky struct ib_device *device; 236f8978bd9SLeon Romanovsky 237921eab11SJason Gunthorpe down_read(&devices_rwsem); 2380df91bb6SJason Gunthorpe device = xa_load(&devices, index); 23901b67117SParav Pandit if (device) { 240d79af724SJason Gunthorpe if (!ib_device_try_get(device)) 24101b67117SParav Pandit device = NULL; 24201b67117SParav Pandit } 243921eab11SJason Gunthorpe up_read(&devices_rwsem); 244f8978bd9SLeon Romanovsky return device; 245f8978bd9SLeon Romanovsky } 246f8978bd9SLeon Romanovsky 247d79af724SJason Gunthorpe /** 248d79af724SJason Gunthorpe * ib_device_put - Release IB device reference 249d79af724SJason Gunthorpe * @device: device whose reference to be released 250d79af724SJason Gunthorpe * 251d79af724SJason Gunthorpe * ib_device_put() releases reference to the IB device to allow it to be 252d79af724SJason Gunthorpe * unregistered and eventually free. 253d79af724SJason Gunthorpe */ 25401b67117SParav Pandit void ib_device_put(struct ib_device *device) 25501b67117SParav Pandit { 25601b67117SParav Pandit if (refcount_dec_and_test(&device->refcount)) 25701b67117SParav Pandit complete(&device->unreg_completion); 25801b67117SParav Pandit } 259d79af724SJason Gunthorpe EXPORT_SYMBOL(ib_device_put); 26001b67117SParav Pandit 2611da177e4SLinus Torvalds static struct ib_device *__ib_device_get_by_name(const char *name) 2621da177e4SLinus Torvalds { 2631da177e4SLinus Torvalds struct ib_device *device; 2640df91bb6SJason Gunthorpe unsigned long index; 2651da177e4SLinus Torvalds 2660df91bb6SJason Gunthorpe xa_for_each (&devices, index, device) 267896de009SJason Gunthorpe if (!strcmp(name, dev_name(&device->dev))) 2681da177e4SLinus Torvalds return device; 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds return NULL; 2711da177e4SLinus Torvalds } 2721da177e4SLinus Torvalds 2736cc2c8e5SJason Gunthorpe /** 2746cc2c8e5SJason Gunthorpe * ib_device_get_by_name - Find an IB device by name 2756cc2c8e5SJason Gunthorpe * @name: The name to look for 2766cc2c8e5SJason Gunthorpe * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) 2776cc2c8e5SJason Gunthorpe * 2786cc2c8e5SJason Gunthorpe * Find and hold an ib_device by its name. The caller must call 2796cc2c8e5SJason Gunthorpe * ib_device_put() on the returned pointer. 2806cc2c8e5SJason Gunthorpe */ 2816cc2c8e5SJason Gunthorpe struct ib_device *ib_device_get_by_name(const char *name, 2826cc2c8e5SJason Gunthorpe enum rdma_driver_id driver_id) 2836cc2c8e5SJason Gunthorpe { 2846cc2c8e5SJason Gunthorpe struct ib_device *device; 2856cc2c8e5SJason Gunthorpe 2866cc2c8e5SJason Gunthorpe down_read(&devices_rwsem); 2876cc2c8e5SJason Gunthorpe device = __ib_device_get_by_name(name); 2886cc2c8e5SJason Gunthorpe if (device && driver_id != RDMA_DRIVER_UNKNOWN && 2896cc2c8e5SJason Gunthorpe device->driver_id != driver_id) 2906cc2c8e5SJason Gunthorpe device = NULL; 2916cc2c8e5SJason Gunthorpe 2926cc2c8e5SJason Gunthorpe if (device) { 2936cc2c8e5SJason Gunthorpe if (!ib_device_try_get(device)) 2946cc2c8e5SJason Gunthorpe device = NULL; 2956cc2c8e5SJason Gunthorpe } 2966cc2c8e5SJason Gunthorpe up_read(&devices_rwsem); 2976cc2c8e5SJason Gunthorpe return device; 2986cc2c8e5SJason Gunthorpe } 2996cc2c8e5SJason Gunthorpe EXPORT_SYMBOL(ib_device_get_by_name); 3006cc2c8e5SJason Gunthorpe 3014e0f7b90SParav Pandit static int rename_compat_devs(struct ib_device *device) 3024e0f7b90SParav Pandit { 3034e0f7b90SParav Pandit struct ib_core_device *cdev; 3044e0f7b90SParav Pandit unsigned long index; 3054e0f7b90SParav Pandit int ret = 0; 3064e0f7b90SParav Pandit 3074e0f7b90SParav Pandit mutex_lock(&device->compat_devs_mutex); 3084e0f7b90SParav Pandit xa_for_each (&device->compat_devs, index, cdev) { 3094e0f7b90SParav Pandit ret = device_rename(&cdev->dev, dev_name(&device->dev)); 3104e0f7b90SParav Pandit if (ret) { 3114e0f7b90SParav Pandit dev_warn(&cdev->dev, 3124e0f7b90SParav Pandit "Fail to rename compatdev to new name %s\n", 3134e0f7b90SParav Pandit dev_name(&device->dev)); 3144e0f7b90SParav Pandit break; 3154e0f7b90SParav Pandit } 3164e0f7b90SParav Pandit } 3174e0f7b90SParav Pandit mutex_unlock(&device->compat_devs_mutex); 3184e0f7b90SParav Pandit return ret; 3194e0f7b90SParav Pandit } 3204e0f7b90SParav Pandit 321d21943ddSLeon Romanovsky int ib_device_rename(struct ib_device *ibdev, const char *name) 322d21943ddSLeon Romanovsky { 323e3593b56SJason Gunthorpe int ret; 324d21943ddSLeon Romanovsky 325921eab11SJason Gunthorpe down_write(&devices_rwsem); 326e3593b56SJason Gunthorpe if (!strcmp(name, dev_name(&ibdev->dev))) { 327e3593b56SJason Gunthorpe ret = 0; 328e3593b56SJason Gunthorpe goto out; 329e3593b56SJason Gunthorpe } 330e3593b56SJason Gunthorpe 331344684e6SJason Gunthorpe if (__ib_device_get_by_name(name)) { 332d21943ddSLeon Romanovsky ret = -EEXIST; 333d21943ddSLeon Romanovsky goto out; 334d21943ddSLeon Romanovsky } 335d21943ddSLeon Romanovsky 336d21943ddSLeon Romanovsky ret = device_rename(&ibdev->dev, name); 337d21943ddSLeon Romanovsky if (ret) 338d21943ddSLeon Romanovsky goto out; 339d21943ddSLeon Romanovsky strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX); 3404e0f7b90SParav Pandit ret = rename_compat_devs(ibdev); 341d21943ddSLeon Romanovsky out: 342921eab11SJason Gunthorpe up_write(&devices_rwsem); 343d21943ddSLeon Romanovsky return ret; 344d21943ddSLeon Romanovsky } 345d21943ddSLeon Romanovsky 346e349f858SJason Gunthorpe static int alloc_name(struct ib_device *ibdev, const char *name) 3471da177e4SLinus Torvalds { 3481da177e4SLinus Torvalds struct ib_device *device; 3490df91bb6SJason Gunthorpe unsigned long index; 3503b88afd3SJason Gunthorpe struct ida inuse; 3513b88afd3SJason Gunthorpe int rc; 3521da177e4SLinus Torvalds int i; 3531da177e4SLinus Torvalds 354921eab11SJason Gunthorpe lockdep_assert_held_exclusive(&devices_rwsem); 3553b88afd3SJason Gunthorpe ida_init(&inuse); 3560df91bb6SJason Gunthorpe xa_for_each (&devices, index, device) { 357e349f858SJason Gunthorpe char buf[IB_DEVICE_NAME_MAX]; 358e349f858SJason Gunthorpe 359896de009SJason Gunthorpe if (sscanf(dev_name(&device->dev), name, &i) != 1) 3601da177e4SLinus Torvalds continue; 3613b88afd3SJason Gunthorpe if (i < 0 || i >= INT_MAX) 3621da177e4SLinus Torvalds continue; 3631da177e4SLinus Torvalds snprintf(buf, sizeof buf, name, i); 3643b88afd3SJason Gunthorpe if (strcmp(buf, dev_name(&device->dev)) != 0) 3653b88afd3SJason Gunthorpe continue; 3663b88afd3SJason Gunthorpe 3673b88afd3SJason Gunthorpe rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL); 3683b88afd3SJason Gunthorpe if (rc < 0) 3693b88afd3SJason Gunthorpe goto out; 3701da177e4SLinus Torvalds } 3711da177e4SLinus Torvalds 3723b88afd3SJason Gunthorpe rc = ida_alloc(&inuse, GFP_KERNEL); 3733b88afd3SJason Gunthorpe if (rc < 0) 3743b88afd3SJason Gunthorpe goto out; 3751da177e4SLinus Torvalds 3763b88afd3SJason Gunthorpe rc = dev_set_name(&ibdev->dev, name, rc); 3773b88afd3SJason Gunthorpe out: 3783b88afd3SJason Gunthorpe ida_destroy(&inuse); 3793b88afd3SJason Gunthorpe return rc; 3801da177e4SLinus Torvalds } 3811da177e4SLinus Torvalds 38255aeed06SJason Gunthorpe static void ib_device_release(struct device *device) 38355aeed06SJason Gunthorpe { 38455aeed06SJason Gunthorpe struct ib_device *dev = container_of(device, struct ib_device, dev); 38555aeed06SJason Gunthorpe 386c2261dd7SJason Gunthorpe free_netdevs(dev); 387652432f3SJason Gunthorpe WARN_ON(refcount_read(&dev->refcount)); 38803db3a2dSMatan Barak ib_cache_release_one(dev); 389b34b269aSJason Gunthorpe ib_security_release_port_pkey_list(dev); 3904e0f7b90SParav Pandit xa_destroy(&dev->compat_devs); 3910df91bb6SJason Gunthorpe xa_destroy(&dev->client_data); 392324e227eSJason Gunthorpe if (dev->port_data) 393324e227eSJason Gunthorpe kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu, 394324e227eSJason Gunthorpe pdata[0]), 395324e227eSJason Gunthorpe rcu_head); 396324e227eSJason Gunthorpe kfree_rcu(dev, rcu_head); 39755aeed06SJason Gunthorpe } 39855aeed06SJason Gunthorpe 39955aeed06SJason Gunthorpe static int ib_device_uevent(struct device *device, 40055aeed06SJason Gunthorpe struct kobj_uevent_env *env) 40155aeed06SJason Gunthorpe { 402896de009SJason Gunthorpe if (add_uevent_var(env, "NAME=%s", dev_name(device))) 40355aeed06SJason Gunthorpe return -ENOMEM; 40455aeed06SJason Gunthorpe 40555aeed06SJason Gunthorpe /* 40655aeed06SJason Gunthorpe * It would be nice to pass the node GUID with the event... 40755aeed06SJason Gunthorpe */ 40855aeed06SJason Gunthorpe 40955aeed06SJason Gunthorpe return 0; 41055aeed06SJason Gunthorpe } 41155aeed06SJason Gunthorpe 41262dfa795SParav Pandit static const void *net_namespace(struct device *d) 41362dfa795SParav Pandit { 4144e0f7b90SParav Pandit struct ib_core_device *coredev = 4154e0f7b90SParav Pandit container_of(d, struct ib_core_device, dev); 4164e0f7b90SParav Pandit 4174e0f7b90SParav Pandit return read_pnet(&coredev->rdma_net); 41862dfa795SParav Pandit } 41962dfa795SParav Pandit 42055aeed06SJason Gunthorpe static struct class ib_class = { 42155aeed06SJason Gunthorpe .name = "infiniband", 42255aeed06SJason Gunthorpe .dev_release = ib_device_release, 42355aeed06SJason Gunthorpe .dev_uevent = ib_device_uevent, 42462dfa795SParav Pandit .ns_type = &net_ns_type_operations, 42562dfa795SParav Pandit .namespace = net_namespace, 42655aeed06SJason Gunthorpe }; 42755aeed06SJason Gunthorpe 428cebe556bSParav Pandit static void rdma_init_coredev(struct ib_core_device *coredev, 4294e0f7b90SParav Pandit struct ib_device *dev, struct net *net) 430cebe556bSParav Pandit { 431cebe556bSParav Pandit /* This BUILD_BUG_ON is intended to catch layout change 432cebe556bSParav Pandit * of union of ib_core_device and device. 433cebe556bSParav Pandit * dev must be the first element as ib_core and providers 434cebe556bSParav Pandit * driver uses it. Adding anything in ib_core_device before 435cebe556bSParav Pandit * device will break this assumption. 436cebe556bSParav Pandit */ 437cebe556bSParav Pandit BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) != 438cebe556bSParav Pandit offsetof(struct ib_device, dev)); 439cebe556bSParav Pandit 440cebe556bSParav Pandit coredev->dev.class = &ib_class; 441cebe556bSParav Pandit coredev->dev.groups = dev->groups; 442cebe556bSParav Pandit device_initialize(&coredev->dev); 443cebe556bSParav Pandit coredev->owner = dev; 444cebe556bSParav Pandit INIT_LIST_HEAD(&coredev->port_list); 4454e0f7b90SParav Pandit write_pnet(&coredev->rdma_net, net); 446cebe556bSParav Pandit } 447cebe556bSParav Pandit 4481da177e4SLinus Torvalds /** 449459cc69fSLeon Romanovsky * _ib_alloc_device - allocate an IB device struct 4501da177e4SLinus Torvalds * @size:size of structure to allocate 4511da177e4SLinus Torvalds * 4521da177e4SLinus Torvalds * Low-level drivers should use ib_alloc_device() to allocate &struct 4531da177e4SLinus Torvalds * ib_device. @size is the size of the structure to be allocated, 4541da177e4SLinus Torvalds * including any private data used by the low-level driver. 4551da177e4SLinus Torvalds * ib_dealloc_device() must be used to free structures allocated with 4561da177e4SLinus Torvalds * ib_alloc_device(). 4571da177e4SLinus Torvalds */ 458459cc69fSLeon Romanovsky struct ib_device *_ib_alloc_device(size_t size) 4591da177e4SLinus Torvalds { 46055aeed06SJason Gunthorpe struct ib_device *device; 4611da177e4SLinus Torvalds 46255aeed06SJason Gunthorpe if (WARN_ON(size < sizeof(struct ib_device))) 46355aeed06SJason Gunthorpe return NULL; 46455aeed06SJason Gunthorpe 46555aeed06SJason Gunthorpe device = kzalloc(size, GFP_KERNEL); 46655aeed06SJason Gunthorpe if (!device) 46755aeed06SJason Gunthorpe return NULL; 46855aeed06SJason Gunthorpe 46941eda65cSLeon Romanovsky if (rdma_restrack_init(device)) { 47041eda65cSLeon Romanovsky kfree(device); 47141eda65cSLeon Romanovsky return NULL; 47241eda65cSLeon Romanovsky } 47302d8883fSLeon Romanovsky 4745f8f5499SParav Pandit device->groups[0] = &ib_dev_attr_group; 4754e0f7b90SParav Pandit rdma_init_coredev(&device->coredev, device, &init_net); 47655aeed06SJason Gunthorpe 47755aeed06SJason Gunthorpe INIT_LIST_HEAD(&device->event_handler_list); 47855aeed06SJason Gunthorpe spin_lock_init(&device->event_handler_lock); 479d0899892SJason Gunthorpe mutex_init(&device->unregistration_lock); 4800df91bb6SJason Gunthorpe /* 4810df91bb6SJason Gunthorpe * client_data needs to be alloc because we don't want our mark to be 4820df91bb6SJason Gunthorpe * destroyed if the user stores NULL in the client data. 4830df91bb6SJason Gunthorpe */ 4840df91bb6SJason Gunthorpe xa_init_flags(&device->client_data, XA_FLAGS_ALLOC); 485921eab11SJason Gunthorpe init_rwsem(&device->client_data_rwsem); 4864e0f7b90SParav Pandit xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC); 4874e0f7b90SParav Pandit mutex_init(&device->compat_devs_mutex); 48801b67117SParav Pandit init_completion(&device->unreg_completion); 489d0899892SJason Gunthorpe INIT_WORK(&device->unregistration_work, ib_unregister_work); 49055aeed06SJason Gunthorpe 49155aeed06SJason Gunthorpe return device; 4921da177e4SLinus Torvalds } 493459cc69fSLeon Romanovsky EXPORT_SYMBOL(_ib_alloc_device); 4941da177e4SLinus Torvalds 4951da177e4SLinus Torvalds /** 4961da177e4SLinus Torvalds * ib_dealloc_device - free an IB device struct 4971da177e4SLinus Torvalds * @device:structure to free 4981da177e4SLinus Torvalds * 4991da177e4SLinus Torvalds * Free a structure allocated with ib_alloc_device(). 5001da177e4SLinus Torvalds */ 5011da177e4SLinus Torvalds void ib_dealloc_device(struct ib_device *device) 5021da177e4SLinus Torvalds { 503d0899892SJason Gunthorpe if (device->ops.dealloc_driver) 504d0899892SJason Gunthorpe device->ops.dealloc_driver(device); 505d0899892SJason Gunthorpe 506d0899892SJason Gunthorpe /* 507d0899892SJason Gunthorpe * ib_unregister_driver() requires all devices to remain in the xarray 508d0899892SJason Gunthorpe * while their ops are callable. The last op we call is dealloc_driver 509d0899892SJason Gunthorpe * above. This is needed to create a fence on op callbacks prior to 510d0899892SJason Gunthorpe * allowing the driver module to unload. 511d0899892SJason Gunthorpe */ 512d0899892SJason Gunthorpe down_write(&devices_rwsem); 513d0899892SJason Gunthorpe if (xa_load(&devices, device->index) == device) 514d0899892SJason Gunthorpe xa_erase(&devices, device->index); 515d0899892SJason Gunthorpe up_write(&devices_rwsem); 516d0899892SJason Gunthorpe 517c2261dd7SJason Gunthorpe /* Expedite releasing netdev references */ 518c2261dd7SJason Gunthorpe free_netdevs(device); 519c2261dd7SJason Gunthorpe 5204e0f7b90SParav Pandit WARN_ON(!xa_empty(&device->compat_devs)); 5210df91bb6SJason Gunthorpe WARN_ON(!xa_empty(&device->client_data)); 522652432f3SJason Gunthorpe WARN_ON(refcount_read(&device->refcount)); 5230ad699c0SLeon Romanovsky rdma_restrack_clean(device); 524e155755eSParav Pandit /* Balances with device_initialize */ 525924b8900SLeon Romanovsky put_device(&device->dev); 5261da177e4SLinus Torvalds } 5271da177e4SLinus Torvalds EXPORT_SYMBOL(ib_dealloc_device); 5281da177e4SLinus Torvalds 529921eab11SJason Gunthorpe /* 530921eab11SJason Gunthorpe * add_client_context() and remove_client_context() must be safe against 531921eab11SJason Gunthorpe * parallel calls on the same device - registration/unregistration of both the 532921eab11SJason Gunthorpe * device and client can be occurring in parallel. 533921eab11SJason Gunthorpe * 534921eab11SJason Gunthorpe * The routines need to be a fence, any caller must not return until the add 535921eab11SJason Gunthorpe * or remove is fully completed. 536921eab11SJason Gunthorpe */ 537921eab11SJason Gunthorpe static int add_client_context(struct ib_device *device, 538921eab11SJason Gunthorpe struct ib_client *client) 5391da177e4SLinus Torvalds { 540921eab11SJason Gunthorpe int ret = 0; 5411da177e4SLinus Torvalds 5426780c4faSGal Pressman if (!device->kverbs_provider && !client->no_kverbs_req) 543921eab11SJason Gunthorpe return 0; 5446780c4faSGal Pressman 545921eab11SJason Gunthorpe down_write(&device->client_data_rwsem); 546921eab11SJason Gunthorpe /* 547921eab11SJason Gunthorpe * Another caller to add_client_context got here first and has already 548921eab11SJason Gunthorpe * completely initialized context. 549921eab11SJason Gunthorpe */ 550921eab11SJason Gunthorpe if (xa_get_mark(&device->client_data, client->client_id, 551921eab11SJason Gunthorpe CLIENT_DATA_REGISTERED)) 552921eab11SJason Gunthorpe goto out; 553921eab11SJason Gunthorpe 554921eab11SJason Gunthorpe ret = xa_err(xa_store(&device->client_data, client->client_id, NULL, 555921eab11SJason Gunthorpe GFP_KERNEL)); 556921eab11SJason Gunthorpe if (ret) 557921eab11SJason Gunthorpe goto out; 558921eab11SJason Gunthorpe downgrade_write(&device->client_data_rwsem); 559921eab11SJason Gunthorpe if (client->add) 560921eab11SJason Gunthorpe client->add(device); 561921eab11SJason Gunthorpe 562921eab11SJason Gunthorpe /* Readers shall not see a client until add has been completed */ 5630df91bb6SJason Gunthorpe xa_set_mark(&device->client_data, client->client_id, 5640df91bb6SJason Gunthorpe CLIENT_DATA_REGISTERED); 565921eab11SJason Gunthorpe up_read(&device->client_data_rwsem); 566921eab11SJason Gunthorpe return 0; 5671da177e4SLinus Torvalds 568921eab11SJason Gunthorpe out: 569921eab11SJason Gunthorpe up_write(&device->client_data_rwsem); 570921eab11SJason Gunthorpe return ret; 571921eab11SJason Gunthorpe } 572921eab11SJason Gunthorpe 573921eab11SJason Gunthorpe static void remove_client_context(struct ib_device *device, 574921eab11SJason Gunthorpe unsigned int client_id) 575921eab11SJason Gunthorpe { 576921eab11SJason Gunthorpe struct ib_client *client; 577921eab11SJason Gunthorpe void *client_data; 578921eab11SJason Gunthorpe 579921eab11SJason Gunthorpe down_write(&device->client_data_rwsem); 580921eab11SJason Gunthorpe if (!xa_get_mark(&device->client_data, client_id, 581921eab11SJason Gunthorpe CLIENT_DATA_REGISTERED)) { 582921eab11SJason Gunthorpe up_write(&device->client_data_rwsem); 583921eab11SJason Gunthorpe return; 584921eab11SJason Gunthorpe } 585921eab11SJason Gunthorpe client_data = xa_load(&device->client_data, client_id); 586921eab11SJason Gunthorpe xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); 587921eab11SJason Gunthorpe client = xa_load(&clients, client_id); 588921eab11SJason Gunthorpe downgrade_write(&device->client_data_rwsem); 589921eab11SJason Gunthorpe 590921eab11SJason Gunthorpe /* 591921eab11SJason Gunthorpe * Notice we cannot be holding any exclusive locks when calling the 592921eab11SJason Gunthorpe * remove callback as the remove callback can recurse back into any 593921eab11SJason Gunthorpe * public functions in this module and thus try for any locks those 594921eab11SJason Gunthorpe * functions take. 595921eab11SJason Gunthorpe * 596921eab11SJason Gunthorpe * For this reason clients and drivers should not call the 597921eab11SJason Gunthorpe * unregistration functions will holdling any locks. 598921eab11SJason Gunthorpe * 599921eab11SJason Gunthorpe * It tempting to drop the client_data_rwsem too, but this is required 600921eab11SJason Gunthorpe * to ensure that unregister_client does not return until all clients 601921eab11SJason Gunthorpe * are completely unregistered, which is required to avoid module 602921eab11SJason Gunthorpe * unloading races. 603921eab11SJason Gunthorpe */ 604921eab11SJason Gunthorpe if (client->remove) 605921eab11SJason Gunthorpe client->remove(device, client_data); 606921eab11SJason Gunthorpe 607921eab11SJason Gunthorpe xa_erase(&device->client_data, client_id); 608921eab11SJason Gunthorpe up_read(&device->client_data_rwsem); 6091da177e4SLinus Torvalds } 6101da177e4SLinus Torvalds 611c2261dd7SJason Gunthorpe static int alloc_port_data(struct ib_device *device) 6125eb620c8SYosef Etigin { 613324e227eSJason Gunthorpe struct ib_port_data_rcu *pdata_rcu; 614ea1075edSJason Gunthorpe unsigned int port; 615c2261dd7SJason Gunthorpe 616c2261dd7SJason Gunthorpe if (device->port_data) 617c2261dd7SJason Gunthorpe return 0; 618c2261dd7SJason Gunthorpe 619c2261dd7SJason Gunthorpe /* This can only be called once the physical port range is defined */ 620c2261dd7SJason Gunthorpe if (WARN_ON(!device->phys_port_cnt)) 621c2261dd7SJason Gunthorpe return -EINVAL; 6225eb620c8SYosef Etigin 6238ceb1357SJason Gunthorpe /* 6248ceb1357SJason Gunthorpe * device->port_data is indexed directly by the port number to make 6257738613eSIra Weiny * access to this data as efficient as possible. 6267738613eSIra Weiny * 6278ceb1357SJason Gunthorpe * Therefore port_data is declared as a 1 based array with potential 6288ceb1357SJason Gunthorpe * empty slots at the beginning. 6297738613eSIra Weiny */ 630324e227eSJason Gunthorpe pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata, 631324e227eSJason Gunthorpe rdma_end_port(device) + 1), 632324e227eSJason Gunthorpe GFP_KERNEL); 633324e227eSJason Gunthorpe if (!pdata_rcu) 63455aeed06SJason Gunthorpe return -ENOMEM; 635324e227eSJason Gunthorpe /* 636324e227eSJason Gunthorpe * The rcu_head is put in front of the port data array and the stored 637324e227eSJason Gunthorpe * pointer is adjusted since we never need to see that member until 638324e227eSJason Gunthorpe * kfree_rcu. 639324e227eSJason Gunthorpe */ 640324e227eSJason Gunthorpe device->port_data = pdata_rcu->pdata; 6415eb620c8SYosef Etigin 642ea1075edSJason Gunthorpe rdma_for_each_port (device, port) { 6438ceb1357SJason Gunthorpe struct ib_port_data *pdata = &device->port_data[port]; 6448ceb1357SJason Gunthorpe 645324e227eSJason Gunthorpe pdata->ib_dev = device; 6468ceb1357SJason Gunthorpe spin_lock_init(&pdata->pkey_list_lock); 6478ceb1357SJason Gunthorpe INIT_LIST_HEAD(&pdata->pkey_list); 648c2261dd7SJason Gunthorpe spin_lock_init(&pdata->netdev_lock); 649324e227eSJason Gunthorpe INIT_HLIST_NODE(&pdata->ndev_hash_link); 650c2261dd7SJason Gunthorpe } 651c2261dd7SJason Gunthorpe return 0; 652c2261dd7SJason Gunthorpe } 653c2261dd7SJason Gunthorpe 654c2261dd7SJason Gunthorpe static int verify_immutable(const struct ib_device *dev, u8 port) 655c2261dd7SJason Gunthorpe { 656c2261dd7SJason Gunthorpe return WARN_ON(!rdma_cap_ib_mad(dev, port) && 657c2261dd7SJason Gunthorpe rdma_max_mad_size(dev, port) != 0); 658c2261dd7SJason Gunthorpe } 659c2261dd7SJason Gunthorpe 660c2261dd7SJason Gunthorpe static int setup_port_data(struct ib_device *device) 661c2261dd7SJason Gunthorpe { 662c2261dd7SJason Gunthorpe unsigned int port; 663c2261dd7SJason Gunthorpe int ret; 664c2261dd7SJason Gunthorpe 665c2261dd7SJason Gunthorpe ret = alloc_port_data(device); 666c2261dd7SJason Gunthorpe if (ret) 667c2261dd7SJason Gunthorpe return ret; 668c2261dd7SJason Gunthorpe 669c2261dd7SJason Gunthorpe rdma_for_each_port (device, port) { 670c2261dd7SJason Gunthorpe struct ib_port_data *pdata = &device->port_data[port]; 6718ceb1357SJason Gunthorpe 6728ceb1357SJason Gunthorpe ret = device->ops.get_port_immutable(device, port, 6738ceb1357SJason Gunthorpe &pdata->immutable); 6745eb620c8SYosef Etigin if (ret) 6755eb620c8SYosef Etigin return ret; 67655aeed06SJason Gunthorpe 67755aeed06SJason Gunthorpe if (verify_immutable(device, port)) 67855aeed06SJason Gunthorpe return -EINVAL; 67955aeed06SJason Gunthorpe } 68055aeed06SJason Gunthorpe return 0; 6815eb620c8SYosef Etigin } 6825eb620c8SYosef Etigin 6839abb0d1bSLeon Romanovsky void ib_get_device_fw_str(struct ib_device *dev, char *str) 6845fa76c20SIra Weiny { 6853023a1e9SKamal Heib if (dev->ops.get_dev_fw_str) 6863023a1e9SKamal Heib dev->ops.get_dev_fw_str(dev, str); 6875fa76c20SIra Weiny else 6885fa76c20SIra Weiny str[0] = '\0'; 6895fa76c20SIra Weiny } 6905fa76c20SIra Weiny EXPORT_SYMBOL(ib_get_device_fw_str); 6915fa76c20SIra Weiny 6928f408ab6SDaniel Jurgens static void ib_policy_change_task(struct work_struct *work) 6938f408ab6SDaniel Jurgens { 6948f408ab6SDaniel Jurgens struct ib_device *dev; 6950df91bb6SJason Gunthorpe unsigned long index; 6968f408ab6SDaniel Jurgens 697921eab11SJason Gunthorpe down_read(&devices_rwsem); 6980df91bb6SJason Gunthorpe xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 699ea1075edSJason Gunthorpe unsigned int i; 7008f408ab6SDaniel Jurgens 701ea1075edSJason Gunthorpe rdma_for_each_port (dev, i) { 7028f408ab6SDaniel Jurgens u64 sp; 7038f408ab6SDaniel Jurgens int ret = ib_get_cached_subnet_prefix(dev, 7048f408ab6SDaniel Jurgens i, 7058f408ab6SDaniel Jurgens &sp); 7068f408ab6SDaniel Jurgens 7078f408ab6SDaniel Jurgens WARN_ONCE(ret, 7088f408ab6SDaniel Jurgens "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", 7098f408ab6SDaniel Jurgens ret); 710a750cfdeSDaniel Jurgens if (!ret) 7118f408ab6SDaniel Jurgens ib_security_cache_change(dev, i, sp); 7128f408ab6SDaniel Jurgens } 7138f408ab6SDaniel Jurgens } 714921eab11SJason Gunthorpe up_read(&devices_rwsem); 7158f408ab6SDaniel Jurgens } 7168f408ab6SDaniel Jurgens 7178f408ab6SDaniel Jurgens static int ib_security_change(struct notifier_block *nb, unsigned long event, 7188f408ab6SDaniel Jurgens void *lsm_data) 7198f408ab6SDaniel Jurgens { 7208f408ab6SDaniel Jurgens if (event != LSM_POLICY_CHANGE) 7218f408ab6SDaniel Jurgens return NOTIFY_DONE; 7228f408ab6SDaniel Jurgens 7238f408ab6SDaniel Jurgens schedule_work(&ib_policy_change_work); 724c66f6741SDaniel Jurgens ib_mad_agent_security_change(); 7258f408ab6SDaniel Jurgens 7268f408ab6SDaniel Jurgens return NOTIFY_OK; 7278f408ab6SDaniel Jurgens } 7288f408ab6SDaniel Jurgens 7294e0f7b90SParav Pandit static void compatdev_release(struct device *dev) 7304e0f7b90SParav Pandit { 7314e0f7b90SParav Pandit struct ib_core_device *cdev = 7324e0f7b90SParav Pandit container_of(dev, struct ib_core_device, dev); 7334e0f7b90SParav Pandit 7344e0f7b90SParav Pandit kfree(cdev); 7354e0f7b90SParav Pandit } 7364e0f7b90SParav Pandit 7374e0f7b90SParav Pandit static int add_one_compat_dev(struct ib_device *device, 7384e0f7b90SParav Pandit struct rdma_dev_net *rnet) 7394e0f7b90SParav Pandit { 7404e0f7b90SParav Pandit struct ib_core_device *cdev; 7414e0f7b90SParav Pandit int ret; 7424e0f7b90SParav Pandit 743a56bc45bSParav Pandit if (!ib_devices_shared_netns) 744a56bc45bSParav Pandit return 0; 745a56bc45bSParav Pandit 7464e0f7b90SParav Pandit /* 7474e0f7b90SParav Pandit * Create and add compat device in all namespaces other than where it 7484e0f7b90SParav Pandit * is currently bound to. 7494e0f7b90SParav Pandit */ 7504e0f7b90SParav Pandit if (net_eq(read_pnet(&rnet->net), 7514e0f7b90SParav Pandit read_pnet(&device->coredev.rdma_net))) 7524e0f7b90SParav Pandit return 0; 7534e0f7b90SParav Pandit 7544e0f7b90SParav Pandit /* 7554e0f7b90SParav Pandit * The first of init_net() or ib_register_device() to take the 7564e0f7b90SParav Pandit * compat_devs_mutex wins and gets to add the device. Others will wait 7574e0f7b90SParav Pandit * for completion here. 7584e0f7b90SParav Pandit */ 7594e0f7b90SParav Pandit mutex_lock(&device->compat_devs_mutex); 7604e0f7b90SParav Pandit cdev = xa_load(&device->compat_devs, rnet->id); 7614e0f7b90SParav Pandit if (cdev) { 7624e0f7b90SParav Pandit ret = 0; 7634e0f7b90SParav Pandit goto done; 7644e0f7b90SParav Pandit } 7654e0f7b90SParav Pandit ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL); 7664e0f7b90SParav Pandit if (ret) 7674e0f7b90SParav Pandit goto done; 7684e0f7b90SParav Pandit 7694e0f7b90SParav Pandit cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 7704e0f7b90SParav Pandit if (!cdev) { 7714e0f7b90SParav Pandit ret = -ENOMEM; 7724e0f7b90SParav Pandit goto cdev_err; 7734e0f7b90SParav Pandit } 7744e0f7b90SParav Pandit 7754e0f7b90SParav Pandit cdev->dev.parent = device->dev.parent; 7764e0f7b90SParav Pandit rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); 7774e0f7b90SParav Pandit cdev->dev.release = compatdev_release; 7784e0f7b90SParav Pandit dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); 7794e0f7b90SParav Pandit 7804e0f7b90SParav Pandit ret = device_add(&cdev->dev); 7814e0f7b90SParav Pandit if (ret) 7824e0f7b90SParav Pandit goto add_err; 7835417783eSParav Pandit ret = ib_setup_port_attrs(cdev, false); 7845417783eSParav Pandit if (ret) 7855417783eSParav Pandit goto port_err; 7864e0f7b90SParav Pandit 7874e0f7b90SParav Pandit ret = xa_err(xa_store(&device->compat_devs, rnet->id, 7884e0f7b90SParav Pandit cdev, GFP_KERNEL)); 7894e0f7b90SParav Pandit if (ret) 7904e0f7b90SParav Pandit goto insert_err; 7914e0f7b90SParav Pandit 7924e0f7b90SParav Pandit mutex_unlock(&device->compat_devs_mutex); 7934e0f7b90SParav Pandit return 0; 7944e0f7b90SParav Pandit 7954e0f7b90SParav Pandit insert_err: 7965417783eSParav Pandit ib_free_port_attrs(cdev); 7975417783eSParav Pandit port_err: 7984e0f7b90SParav Pandit device_del(&cdev->dev); 7994e0f7b90SParav Pandit add_err: 8004e0f7b90SParav Pandit put_device(&cdev->dev); 8014e0f7b90SParav Pandit cdev_err: 8024e0f7b90SParav Pandit xa_release(&device->compat_devs, rnet->id); 8034e0f7b90SParav Pandit done: 8044e0f7b90SParav Pandit mutex_unlock(&device->compat_devs_mutex); 8054e0f7b90SParav Pandit return ret; 8064e0f7b90SParav Pandit } 8074e0f7b90SParav Pandit 8084e0f7b90SParav Pandit static void remove_one_compat_dev(struct ib_device *device, u32 id) 8094e0f7b90SParav Pandit { 8104e0f7b90SParav Pandit struct ib_core_device *cdev; 8114e0f7b90SParav Pandit 8124e0f7b90SParav Pandit mutex_lock(&device->compat_devs_mutex); 8134e0f7b90SParav Pandit cdev = xa_erase(&device->compat_devs, id); 8144e0f7b90SParav Pandit mutex_unlock(&device->compat_devs_mutex); 8154e0f7b90SParav Pandit if (cdev) { 8165417783eSParav Pandit ib_free_port_attrs(cdev); 8174e0f7b90SParav Pandit device_del(&cdev->dev); 8184e0f7b90SParav Pandit put_device(&cdev->dev); 8194e0f7b90SParav Pandit } 8204e0f7b90SParav Pandit } 8214e0f7b90SParav Pandit 8224e0f7b90SParav Pandit static void remove_compat_devs(struct ib_device *device) 8234e0f7b90SParav Pandit { 8244e0f7b90SParav Pandit struct ib_core_device *cdev; 8254e0f7b90SParav Pandit unsigned long index; 8264e0f7b90SParav Pandit 8274e0f7b90SParav Pandit xa_for_each (&device->compat_devs, index, cdev) 8284e0f7b90SParav Pandit remove_one_compat_dev(device, index); 8294e0f7b90SParav Pandit } 8304e0f7b90SParav Pandit 8314e0f7b90SParav Pandit static int add_compat_devs(struct ib_device *device) 8324e0f7b90SParav Pandit { 8334e0f7b90SParav Pandit struct rdma_dev_net *rnet; 8344e0f7b90SParav Pandit unsigned long index; 8354e0f7b90SParav Pandit int ret = 0; 8364e0f7b90SParav Pandit 8374e0f7b90SParav Pandit down_read(&rdma_nets_rwsem); 8384e0f7b90SParav Pandit xa_for_each (&rdma_nets, index, rnet) { 8394e0f7b90SParav Pandit ret = add_one_compat_dev(device, rnet); 8404e0f7b90SParav Pandit if (ret) 8414e0f7b90SParav Pandit break; 8424e0f7b90SParav Pandit } 8434e0f7b90SParav Pandit up_read(&rdma_nets_rwsem); 8444e0f7b90SParav Pandit return ret; 8454e0f7b90SParav Pandit } 8464e0f7b90SParav Pandit 8474e0f7b90SParav Pandit static void rdma_dev_exit_net(struct net *net) 8484e0f7b90SParav Pandit { 8494e0f7b90SParav Pandit struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); 8504e0f7b90SParav Pandit struct ib_device *dev; 8514e0f7b90SParav Pandit unsigned long index; 8524e0f7b90SParav Pandit int ret; 8534e0f7b90SParav Pandit 8544e0f7b90SParav Pandit down_write(&rdma_nets_rwsem); 8554e0f7b90SParav Pandit /* 8564e0f7b90SParav Pandit * Prevent the ID from being re-used and hide the id from xa_for_each. 8574e0f7b90SParav Pandit */ 8584e0f7b90SParav Pandit ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL)); 8594e0f7b90SParav Pandit WARN_ON(ret); 8604e0f7b90SParav Pandit up_write(&rdma_nets_rwsem); 8614e0f7b90SParav Pandit 8624e0f7b90SParav Pandit down_read(&devices_rwsem); 8634e0f7b90SParav Pandit xa_for_each (&devices, index, dev) { 8644e0f7b90SParav Pandit get_device(&dev->dev); 8654e0f7b90SParav Pandit /* 8664e0f7b90SParav Pandit * Release the devices_rwsem so that pontentially blocking 8674e0f7b90SParav Pandit * device_del, doesn't hold the devices_rwsem for too long. 8684e0f7b90SParav Pandit */ 8694e0f7b90SParav Pandit up_read(&devices_rwsem); 8704e0f7b90SParav Pandit 8714e0f7b90SParav Pandit remove_one_compat_dev(dev, rnet->id); 8724e0f7b90SParav Pandit 8734e0f7b90SParav Pandit put_device(&dev->dev); 8744e0f7b90SParav Pandit down_read(&devices_rwsem); 8754e0f7b90SParav Pandit } 8764e0f7b90SParav Pandit up_read(&devices_rwsem); 8774e0f7b90SParav Pandit 8784e0f7b90SParav Pandit xa_erase(&rdma_nets, rnet->id); 8794e0f7b90SParav Pandit } 8804e0f7b90SParav Pandit 8814e0f7b90SParav Pandit static __net_init int rdma_dev_init_net(struct net *net) 8824e0f7b90SParav Pandit { 8834e0f7b90SParav Pandit struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); 8844e0f7b90SParav Pandit unsigned long index; 8854e0f7b90SParav Pandit struct ib_device *dev; 8864e0f7b90SParav Pandit int ret; 8874e0f7b90SParav Pandit 8884e0f7b90SParav Pandit /* No need to create any compat devices in default init_net. */ 8894e0f7b90SParav Pandit if (net_eq(net, &init_net)) 8904e0f7b90SParav Pandit return 0; 8914e0f7b90SParav Pandit 8924e0f7b90SParav Pandit write_pnet(&rnet->net, net); 8934e0f7b90SParav Pandit 8944e0f7b90SParav Pandit ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); 8954e0f7b90SParav Pandit if (ret) 8964e0f7b90SParav Pandit return ret; 8974e0f7b90SParav Pandit 8984e0f7b90SParav Pandit down_read(&devices_rwsem); 8994e0f7b90SParav Pandit xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 9004e0f7b90SParav Pandit ret = add_one_compat_dev(dev, rnet); 9014e0f7b90SParav Pandit if (ret) 9024e0f7b90SParav Pandit break; 9034e0f7b90SParav Pandit } 9044e0f7b90SParav Pandit up_read(&devices_rwsem); 9054e0f7b90SParav Pandit 9064e0f7b90SParav Pandit if (ret) 9074e0f7b90SParav Pandit rdma_dev_exit_net(net); 9084e0f7b90SParav Pandit 9094e0f7b90SParav Pandit return ret; 9104e0f7b90SParav Pandit } 9114e0f7b90SParav Pandit 912ecc82c53SLeon Romanovsky /* 913d0899892SJason Gunthorpe * Assign the unique string device name and the unique device index. This is 914d0899892SJason Gunthorpe * undone by ib_dealloc_device. 915ecc82c53SLeon Romanovsky */ 9160df91bb6SJason Gunthorpe static int assign_name(struct ib_device *device, const char *name) 9170df91bb6SJason Gunthorpe { 9180df91bb6SJason Gunthorpe static u32 last_id; 9190df91bb6SJason Gunthorpe int ret; 920ecc82c53SLeon Romanovsky 921921eab11SJason Gunthorpe down_write(&devices_rwsem); 9220df91bb6SJason Gunthorpe /* Assign a unique name to the device */ 9230df91bb6SJason Gunthorpe if (strchr(name, '%')) 9240df91bb6SJason Gunthorpe ret = alloc_name(device, name); 9250df91bb6SJason Gunthorpe else 9260df91bb6SJason Gunthorpe ret = dev_set_name(&device->dev, name); 9270df91bb6SJason Gunthorpe if (ret) 9280df91bb6SJason Gunthorpe goto out; 929ecc82c53SLeon Romanovsky 9300df91bb6SJason Gunthorpe if (__ib_device_get_by_name(dev_name(&device->dev))) { 9310df91bb6SJason Gunthorpe ret = -ENFILE; 9320df91bb6SJason Gunthorpe goto out; 933ecc82c53SLeon Romanovsky } 9340df91bb6SJason Gunthorpe strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); 9350df91bb6SJason Gunthorpe 936ea295481SLinus Torvalds ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b, 937ea295481SLinus Torvalds &last_id, GFP_KERNEL); 938ea295481SLinus Torvalds if (ret > 0) 9390df91bb6SJason Gunthorpe ret = 0; 940921eab11SJason Gunthorpe 9410df91bb6SJason Gunthorpe out: 942921eab11SJason Gunthorpe up_write(&devices_rwsem); 9430df91bb6SJason Gunthorpe return ret; 9440df91bb6SJason Gunthorpe } 9450df91bb6SJason Gunthorpe 946548cb4fbSParav Pandit static void setup_dma_device(struct ib_device *device) 9471da177e4SLinus Torvalds { 94899db9494SBart Van Assche struct device *parent = device->dev.parent; 9491da177e4SLinus Torvalds 9500957c29fSBart Van Assche WARN_ON_ONCE(device->dma_device); 9510957c29fSBart Van Assche if (device->dev.dma_ops) { 9520957c29fSBart Van Assche /* 9530957c29fSBart Van Assche * The caller provided custom DMA operations. Copy the 9540957c29fSBart Van Assche * DMA-related fields that are used by e.g. dma_alloc_coherent() 9550957c29fSBart Van Assche * into device->dev. 9560957c29fSBart Van Assche */ 9570957c29fSBart Van Assche device->dma_device = &device->dev; 95802ee9da3SBart Van Assche if (!device->dev.dma_mask) { 95902ee9da3SBart Van Assche if (parent) 96099db9494SBart Van Assche device->dev.dma_mask = parent->dma_mask; 96102ee9da3SBart Van Assche else 96202ee9da3SBart Van Assche WARN_ON_ONCE(true); 96302ee9da3SBart Van Assche } 96402ee9da3SBart Van Assche if (!device->dev.coherent_dma_mask) { 96502ee9da3SBart Van Assche if (parent) 9660957c29fSBart Van Assche device->dev.coherent_dma_mask = 9670957c29fSBart Van Assche parent->coherent_dma_mask; 96802ee9da3SBart Van Assche else 96902ee9da3SBart Van Assche WARN_ON_ONCE(true); 97002ee9da3SBart Van Assche } 9710957c29fSBart Van Assche } else { 9720957c29fSBart Van Assche /* 9730957c29fSBart Van Assche * The caller did not provide custom DMA operations. Use the 9740957c29fSBart Van Assche * DMA mapping operations of the parent device. 9750957c29fSBart Van Assche */ 97602ee9da3SBart Van Assche WARN_ON_ONCE(!parent); 9770957c29fSBart Van Assche device->dma_device = parent; 9780957c29fSBart Van Assche } 979548cb4fbSParav Pandit } 980548cb4fbSParav Pandit 981921eab11SJason Gunthorpe /* 982921eab11SJason Gunthorpe * setup_device() allocates memory and sets up data that requires calling the 983921eab11SJason Gunthorpe * device ops, this is the only reason these actions are not done during 984921eab11SJason Gunthorpe * ib_alloc_device. It is undone by ib_dealloc_device(). 985921eab11SJason Gunthorpe */ 986548cb4fbSParav Pandit static int setup_device(struct ib_device *device) 987548cb4fbSParav Pandit { 988548cb4fbSParav Pandit struct ib_udata uhw = {.outlen = 0, .inlen = 0}; 989548cb4fbSParav Pandit int ret; 990548cb4fbSParav Pandit 991921eab11SJason Gunthorpe setup_dma_device(device); 992921eab11SJason Gunthorpe 993548cb4fbSParav Pandit ret = ib_device_check_mandatory(device); 994548cb4fbSParav Pandit if (ret) 995548cb4fbSParav Pandit return ret; 996548cb4fbSParav Pandit 9978ceb1357SJason Gunthorpe ret = setup_port_data(device); 998548cb4fbSParav Pandit if (ret) { 9998ceb1357SJason Gunthorpe dev_warn(&device->dev, "Couldn't create per-port data\n"); 1000548cb4fbSParav Pandit return ret; 1001548cb4fbSParav Pandit } 1002548cb4fbSParav Pandit 1003548cb4fbSParav Pandit memset(&device->attrs, 0, sizeof(device->attrs)); 10043023a1e9SKamal Heib ret = device->ops.query_device(device, &device->attrs, &uhw); 1005548cb4fbSParav Pandit if (ret) { 1006548cb4fbSParav Pandit dev_warn(&device->dev, 1007548cb4fbSParav Pandit "Couldn't query the device attributes\n"); 1008d45f89d5SJason Gunthorpe return ret; 1009548cb4fbSParav Pandit } 1010548cb4fbSParav Pandit 1011548cb4fbSParav Pandit return 0; 1012548cb4fbSParav Pandit } 1013548cb4fbSParav Pandit 1014921eab11SJason Gunthorpe static void disable_device(struct ib_device *device) 1015921eab11SJason Gunthorpe { 1016921eab11SJason Gunthorpe struct ib_client *client; 1017921eab11SJason Gunthorpe 1018921eab11SJason Gunthorpe WARN_ON(!refcount_read(&device->refcount)); 1019921eab11SJason Gunthorpe 1020921eab11SJason Gunthorpe down_write(&devices_rwsem); 1021921eab11SJason Gunthorpe xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); 1022921eab11SJason Gunthorpe up_write(&devices_rwsem); 1023921eab11SJason Gunthorpe 1024921eab11SJason Gunthorpe down_read(&clients_rwsem); 1025921eab11SJason Gunthorpe list_for_each_entry_reverse(client, &client_list, list) 1026921eab11SJason Gunthorpe remove_client_context(device, client->client_id); 1027921eab11SJason Gunthorpe up_read(&clients_rwsem); 1028921eab11SJason Gunthorpe 1029921eab11SJason Gunthorpe /* Pairs with refcount_set in enable_device */ 1030921eab11SJason Gunthorpe ib_device_put(device); 1031921eab11SJason Gunthorpe wait_for_completion(&device->unreg_completion); 1032c2261dd7SJason Gunthorpe 10334e0f7b90SParav Pandit /* 10344e0f7b90SParav Pandit * compat devices must be removed after device refcount drops to zero. 10354e0f7b90SParav Pandit * Otherwise init_net() may add more compatdevs after removing compat 10364e0f7b90SParav Pandit * devices and before device is disabled. 10374e0f7b90SParav Pandit */ 10384e0f7b90SParav Pandit remove_compat_devs(device); 10394e0f7b90SParav Pandit 1040c2261dd7SJason Gunthorpe /* Expedite removing unregistered pointers from the hash table */ 1041c2261dd7SJason Gunthorpe free_netdevs(device); 1042921eab11SJason Gunthorpe } 1043921eab11SJason Gunthorpe 1044921eab11SJason Gunthorpe /* 1045921eab11SJason Gunthorpe * An enabled device is visible to all clients and to all the public facing 1046d0899892SJason Gunthorpe * APIs that return a device pointer. This always returns with a new get, even 1047d0899892SJason Gunthorpe * if it fails. 1048921eab11SJason Gunthorpe */ 1049d0899892SJason Gunthorpe static int enable_device_and_get(struct ib_device *device) 1050921eab11SJason Gunthorpe { 1051921eab11SJason Gunthorpe struct ib_client *client; 1052921eab11SJason Gunthorpe unsigned long index; 1053d0899892SJason Gunthorpe int ret = 0; 1054921eab11SJason Gunthorpe 1055d0899892SJason Gunthorpe /* 1056d0899892SJason Gunthorpe * One ref belongs to the xa and the other belongs to this 1057d0899892SJason Gunthorpe * thread. This is needed to guard against parallel unregistration. 1058d0899892SJason Gunthorpe */ 1059d0899892SJason Gunthorpe refcount_set(&device->refcount, 2); 1060921eab11SJason Gunthorpe down_write(&devices_rwsem); 1061921eab11SJason Gunthorpe xa_set_mark(&devices, device->index, DEVICE_REGISTERED); 1062d0899892SJason Gunthorpe 1063d0899892SJason Gunthorpe /* 1064d0899892SJason Gunthorpe * By using downgrade_write() we ensure that no other thread can clear 1065d0899892SJason Gunthorpe * DEVICE_REGISTERED while we are completing the client setup. 1066d0899892SJason Gunthorpe */ 1067d0899892SJason Gunthorpe downgrade_write(&devices_rwsem); 1068921eab11SJason Gunthorpe 1069ca22354bSJason Gunthorpe if (device->ops.enable_driver) { 1070ca22354bSJason Gunthorpe ret = device->ops.enable_driver(device); 1071ca22354bSJason Gunthorpe if (ret) 1072ca22354bSJason Gunthorpe goto out; 1073ca22354bSJason Gunthorpe } 1074ca22354bSJason Gunthorpe 1075921eab11SJason Gunthorpe down_read(&clients_rwsem); 1076921eab11SJason Gunthorpe xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { 1077921eab11SJason Gunthorpe ret = add_client_context(device, client); 1078d0899892SJason Gunthorpe if (ret) 1079d0899892SJason Gunthorpe break; 1080d0899892SJason Gunthorpe } 1081921eab11SJason Gunthorpe up_read(&clients_rwsem); 10824e0f7b90SParav Pandit if (!ret) 10834e0f7b90SParav Pandit ret = add_compat_devs(device); 1084ca22354bSJason Gunthorpe out: 1085d0899892SJason Gunthorpe up_read(&devices_rwsem); 1086921eab11SJason Gunthorpe return ret; 1087921eab11SJason Gunthorpe } 1088921eab11SJason Gunthorpe 1089548cb4fbSParav Pandit /** 1090548cb4fbSParav Pandit * ib_register_device - Register an IB device with IB core 1091548cb4fbSParav Pandit * @device:Device to register 1092548cb4fbSParav Pandit * 1093548cb4fbSParav Pandit * Low-level drivers use ib_register_device() to register their 1094548cb4fbSParav Pandit * devices with the IB core. All registered clients will receive a 1095548cb4fbSParav Pandit * callback for each device that is added. @device must be allocated 1096548cb4fbSParav Pandit * with ib_alloc_device(). 1097d0899892SJason Gunthorpe * 1098d0899892SJason Gunthorpe * If the driver uses ops.dealloc_driver and calls any ib_unregister_device() 1099d0899892SJason Gunthorpe * asynchronously then the device pointer may become freed as soon as this 1100d0899892SJason Gunthorpe * function returns. 1101548cb4fbSParav Pandit */ 1102ea4baf7fSParav Pandit int ib_register_device(struct ib_device *device, const char *name) 1103548cb4fbSParav Pandit { 1104548cb4fbSParav Pandit int ret; 11051da177e4SLinus Torvalds 11060df91bb6SJason Gunthorpe ret = assign_name(device, name); 1107e349f858SJason Gunthorpe if (ret) 1108921eab11SJason Gunthorpe return ret; 11091da177e4SLinus Torvalds 1110548cb4fbSParav Pandit ret = setup_device(device); 1111548cb4fbSParav Pandit if (ret) 1112d0899892SJason Gunthorpe return ret; 111303db3a2dSMatan Barak 1114d45f89d5SJason Gunthorpe ret = ib_cache_setup_one(device); 1115d45f89d5SJason Gunthorpe if (ret) { 1116d45f89d5SJason Gunthorpe dev_warn(&device->dev, 1117d45f89d5SJason Gunthorpe "Couldn't set up InfiniBand P_Key/GID cache\n"); 1118d0899892SJason Gunthorpe return ret; 1119d45f89d5SJason Gunthorpe } 1120d45f89d5SJason Gunthorpe 11217527a7b1SParav Pandit ib_device_register_rdmacg(device); 11223e153a93SIra Weiny 11235f8f5499SParav Pandit ret = device_add(&device->dev); 11245f8f5499SParav Pandit if (ret) 11255f8f5499SParav Pandit goto cg_cleanup; 11265f8f5499SParav Pandit 1127ea4baf7fSParav Pandit ret = ib_device_register_sysfs(device); 11281da177e4SLinus Torvalds if (ret) { 112943c7c851SJason Gunthorpe dev_warn(&device->dev, 113043c7c851SJason Gunthorpe "Couldn't register device with driver model\n"); 11315f8f5499SParav Pandit goto dev_cleanup; 11321da177e4SLinus Torvalds } 11331da177e4SLinus Torvalds 1134d0899892SJason Gunthorpe ret = enable_device_and_get(device); 1135d0899892SJason Gunthorpe if (ret) { 1136d0899892SJason Gunthorpe void (*dealloc_fn)(struct ib_device *); 1137d0899892SJason Gunthorpe 1138d0899892SJason Gunthorpe /* 1139d0899892SJason Gunthorpe * If we hit this error flow then we don't want to 1140d0899892SJason Gunthorpe * automatically dealloc the device since the caller is 1141d0899892SJason Gunthorpe * expected to call ib_dealloc_device() after 1142d0899892SJason Gunthorpe * ib_register_device() fails. This is tricky due to the 1143d0899892SJason Gunthorpe * possibility for a parallel unregistration along with this 1144d0899892SJason Gunthorpe * error flow. Since we have a refcount here we know any 1145d0899892SJason Gunthorpe * parallel flow is stopped in disable_device and will see the 1146d0899892SJason Gunthorpe * NULL pointers, causing the responsibility to 1147d0899892SJason Gunthorpe * ib_dealloc_device() to revert back to this thread. 1148d0899892SJason Gunthorpe */ 1149d0899892SJason Gunthorpe dealloc_fn = device->ops.dealloc_driver; 1150d0899892SJason Gunthorpe device->ops.dealloc_driver = NULL; 1151d0899892SJason Gunthorpe ib_device_put(device); 1152d0899892SJason Gunthorpe __ib_unregister_device(device); 1153d0899892SJason Gunthorpe device->ops.dealloc_driver = dealloc_fn; 1154d0899892SJason Gunthorpe return ret; 1155d0899892SJason Gunthorpe } 1156d0899892SJason Gunthorpe ib_device_put(device); 11571da177e4SLinus Torvalds 11584be3a4faSParav Pandit return 0; 11594be3a4faSParav Pandit 11605f8f5499SParav Pandit dev_cleanup: 11615f8f5499SParav Pandit device_del(&device->dev); 11622fb4f4eaSParav Pandit cg_cleanup: 11632fb4f4eaSParav Pandit ib_device_unregister_rdmacg(device); 1164d45f89d5SJason Gunthorpe ib_cache_cleanup_one(device); 11651da177e4SLinus Torvalds return ret; 11661da177e4SLinus Torvalds } 11671da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_device); 11681da177e4SLinus Torvalds 1169d0899892SJason Gunthorpe /* Callers must hold a get on the device. */ 1170d0899892SJason Gunthorpe static void __ib_unregister_device(struct ib_device *ib_dev) 1171d0899892SJason Gunthorpe { 1172d0899892SJason Gunthorpe /* 1173d0899892SJason Gunthorpe * We have a registration lock so that all the calls to unregister are 1174d0899892SJason Gunthorpe * fully fenced, once any unregister returns the device is truely 1175d0899892SJason Gunthorpe * unregistered even if multiple callers are unregistering it at the 1176d0899892SJason Gunthorpe * same time. This also interacts with the registration flow and 1177d0899892SJason Gunthorpe * provides sane semantics if register and unregister are racing. 1178d0899892SJason Gunthorpe */ 1179d0899892SJason Gunthorpe mutex_lock(&ib_dev->unregistration_lock); 1180d0899892SJason Gunthorpe if (!refcount_read(&ib_dev->refcount)) 1181d0899892SJason Gunthorpe goto out; 1182d0899892SJason Gunthorpe 1183d0899892SJason Gunthorpe disable_device(ib_dev); 1184d0899892SJason Gunthorpe ib_device_unregister_sysfs(ib_dev); 1185d0899892SJason Gunthorpe device_del(&ib_dev->dev); 1186d0899892SJason Gunthorpe ib_device_unregister_rdmacg(ib_dev); 1187d0899892SJason Gunthorpe ib_cache_cleanup_one(ib_dev); 1188d0899892SJason Gunthorpe 1189d0899892SJason Gunthorpe /* 1190d0899892SJason Gunthorpe * Drivers using the new flow may not call ib_dealloc_device except 1191d0899892SJason Gunthorpe * in error unwind prior to registration success. 1192d0899892SJason Gunthorpe */ 1193d0899892SJason Gunthorpe if (ib_dev->ops.dealloc_driver) { 1194d0899892SJason Gunthorpe WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); 1195d0899892SJason Gunthorpe ib_dealloc_device(ib_dev); 1196d0899892SJason Gunthorpe } 1197d0899892SJason Gunthorpe out: 1198d0899892SJason Gunthorpe mutex_unlock(&ib_dev->unregistration_lock); 1199d0899892SJason Gunthorpe } 1200d0899892SJason Gunthorpe 12011da177e4SLinus Torvalds /** 12021da177e4SLinus Torvalds * ib_unregister_device - Unregister an IB device 1203d0899892SJason Gunthorpe * @device: The device to unregister 12041da177e4SLinus Torvalds * 12051da177e4SLinus Torvalds * Unregister an IB device. All clients will receive a remove callback. 1206d0899892SJason Gunthorpe * 1207d0899892SJason Gunthorpe * Callers should call this routine only once, and protect against races with 1208d0899892SJason Gunthorpe * registration. Typically it should only be called as part of a remove 1209d0899892SJason Gunthorpe * callback in an implementation of driver core's struct device_driver and 1210d0899892SJason Gunthorpe * related. 1211d0899892SJason Gunthorpe * 1212d0899892SJason Gunthorpe * If ops.dealloc_driver is used then ib_dev will be freed upon return from 1213d0899892SJason Gunthorpe * this function. 12141da177e4SLinus Torvalds */ 1215d0899892SJason Gunthorpe void ib_unregister_device(struct ib_device *ib_dev) 12161da177e4SLinus Torvalds { 1217d0899892SJason Gunthorpe get_device(&ib_dev->dev); 1218d0899892SJason Gunthorpe __ib_unregister_device(ib_dev); 1219d0899892SJason Gunthorpe put_device(&ib_dev->dev); 12201da177e4SLinus Torvalds } 12211da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_device); 12221da177e4SLinus Torvalds 1223d0899892SJason Gunthorpe /** 1224d0899892SJason Gunthorpe * ib_unregister_device_and_put - Unregister a device while holding a 'get' 1225d0899892SJason Gunthorpe * device: The device to unregister 1226d0899892SJason Gunthorpe * 1227d0899892SJason Gunthorpe * This is the same as ib_unregister_device(), except it includes an internal 1228d0899892SJason Gunthorpe * ib_device_put() that should match a 'get' obtained by the caller. 1229d0899892SJason Gunthorpe * 1230d0899892SJason Gunthorpe * It is safe to call this routine concurrently from multiple threads while 1231d0899892SJason Gunthorpe * holding the 'get'. When the function returns the device is fully 1232d0899892SJason Gunthorpe * unregistered. 1233d0899892SJason Gunthorpe * 1234d0899892SJason Gunthorpe * Drivers using this flow MUST use the driver_unregister callback to clean up 1235d0899892SJason Gunthorpe * their resources associated with the device and dealloc it. 1236d0899892SJason Gunthorpe */ 1237d0899892SJason Gunthorpe void ib_unregister_device_and_put(struct ib_device *ib_dev) 1238d0899892SJason Gunthorpe { 1239d0899892SJason Gunthorpe WARN_ON(!ib_dev->ops.dealloc_driver); 1240d0899892SJason Gunthorpe get_device(&ib_dev->dev); 1241d0899892SJason Gunthorpe ib_device_put(ib_dev); 1242d0899892SJason Gunthorpe __ib_unregister_device(ib_dev); 1243d0899892SJason Gunthorpe put_device(&ib_dev->dev); 1244d0899892SJason Gunthorpe } 1245d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_device_and_put); 1246d0899892SJason Gunthorpe 1247d0899892SJason Gunthorpe /** 1248d0899892SJason Gunthorpe * ib_unregister_driver - Unregister all IB devices for a driver 1249d0899892SJason Gunthorpe * @driver_id: The driver to unregister 1250d0899892SJason Gunthorpe * 1251d0899892SJason Gunthorpe * This implements a fence for device unregistration. It only returns once all 1252d0899892SJason Gunthorpe * devices associated with the driver_id have fully completed their 1253d0899892SJason Gunthorpe * unregistration and returned from ib_unregister_device*(). 1254d0899892SJason Gunthorpe * 1255d0899892SJason Gunthorpe * If device's are not yet unregistered it goes ahead and starts unregistering 1256d0899892SJason Gunthorpe * them. 1257d0899892SJason Gunthorpe * 1258d0899892SJason Gunthorpe * This does not block creation of new devices with the given driver_id, that 1259d0899892SJason Gunthorpe * is the responsibility of the caller. 1260d0899892SJason Gunthorpe */ 1261d0899892SJason Gunthorpe void ib_unregister_driver(enum rdma_driver_id driver_id) 1262d0899892SJason Gunthorpe { 1263d0899892SJason Gunthorpe struct ib_device *ib_dev; 1264d0899892SJason Gunthorpe unsigned long index; 1265d0899892SJason Gunthorpe 1266d0899892SJason Gunthorpe down_read(&devices_rwsem); 1267d0899892SJason Gunthorpe xa_for_each (&devices, index, ib_dev) { 1268d0899892SJason Gunthorpe if (ib_dev->driver_id != driver_id) 1269d0899892SJason Gunthorpe continue; 1270d0899892SJason Gunthorpe 1271d0899892SJason Gunthorpe get_device(&ib_dev->dev); 1272d0899892SJason Gunthorpe up_read(&devices_rwsem); 1273d0899892SJason Gunthorpe 1274d0899892SJason Gunthorpe WARN_ON(!ib_dev->ops.dealloc_driver); 1275d0899892SJason Gunthorpe __ib_unregister_device(ib_dev); 1276d0899892SJason Gunthorpe 1277d0899892SJason Gunthorpe put_device(&ib_dev->dev); 1278d0899892SJason Gunthorpe down_read(&devices_rwsem); 1279d0899892SJason Gunthorpe } 1280d0899892SJason Gunthorpe up_read(&devices_rwsem); 1281d0899892SJason Gunthorpe } 1282d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_driver); 1283d0899892SJason Gunthorpe 1284d0899892SJason Gunthorpe static void ib_unregister_work(struct work_struct *work) 1285d0899892SJason Gunthorpe { 1286d0899892SJason Gunthorpe struct ib_device *ib_dev = 1287d0899892SJason Gunthorpe container_of(work, struct ib_device, unregistration_work); 1288d0899892SJason Gunthorpe 1289d0899892SJason Gunthorpe __ib_unregister_device(ib_dev); 1290d0899892SJason Gunthorpe put_device(&ib_dev->dev); 1291d0899892SJason Gunthorpe } 1292d0899892SJason Gunthorpe 1293d0899892SJason Gunthorpe /** 1294d0899892SJason Gunthorpe * ib_unregister_device_queued - Unregister a device using a work queue 1295d0899892SJason Gunthorpe * device: The device to unregister 1296d0899892SJason Gunthorpe * 1297d0899892SJason Gunthorpe * This schedules an asynchronous unregistration using a WQ for the device. A 1298d0899892SJason Gunthorpe * driver should use this to avoid holding locks while doing unregistration, 1299d0899892SJason Gunthorpe * such as holding the RTNL lock. 1300d0899892SJason Gunthorpe * 1301d0899892SJason Gunthorpe * Drivers using this API must use ib_unregister_driver before module unload 1302d0899892SJason Gunthorpe * to ensure that all scheduled unregistrations have completed. 1303d0899892SJason Gunthorpe */ 1304d0899892SJason Gunthorpe void ib_unregister_device_queued(struct ib_device *ib_dev) 1305d0899892SJason Gunthorpe { 1306d0899892SJason Gunthorpe WARN_ON(!refcount_read(&ib_dev->refcount)); 1307d0899892SJason Gunthorpe WARN_ON(!ib_dev->ops.dealloc_driver); 1308d0899892SJason Gunthorpe get_device(&ib_dev->dev); 1309d0899892SJason Gunthorpe if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work)) 1310d0899892SJason Gunthorpe put_device(&ib_dev->dev); 1311d0899892SJason Gunthorpe } 1312d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_device_queued); 1313d0899892SJason Gunthorpe 13144e0f7b90SParav Pandit static struct pernet_operations rdma_dev_net_ops = { 13154e0f7b90SParav Pandit .init = rdma_dev_init_net, 13164e0f7b90SParav Pandit .exit = rdma_dev_exit_net, 13174e0f7b90SParav Pandit .id = &rdma_dev_net_id, 13184e0f7b90SParav Pandit .size = sizeof(struct rdma_dev_net), 13194e0f7b90SParav Pandit }; 13204e0f7b90SParav Pandit 1321e59178d8SJason Gunthorpe static int assign_client_id(struct ib_client *client) 1322e59178d8SJason Gunthorpe { 1323e59178d8SJason Gunthorpe int ret; 1324e59178d8SJason Gunthorpe 1325921eab11SJason Gunthorpe down_write(&clients_rwsem); 1326e59178d8SJason Gunthorpe /* 1327e59178d8SJason Gunthorpe * The add/remove callbacks must be called in FIFO/LIFO order. To 1328e59178d8SJason Gunthorpe * achieve this we assign client_ids so they are sorted in 1329e59178d8SJason Gunthorpe * registration order, and retain a linked list we can reverse iterate 1330e59178d8SJason Gunthorpe * to get the LIFO order. The extra linked list can go away if xarray 1331e59178d8SJason Gunthorpe * learns to reverse iterate. 1332e59178d8SJason Gunthorpe */ 1333ea295481SLinus Torvalds if (list_empty(&client_list)) { 1334e59178d8SJason Gunthorpe client->client_id = 0; 1335ea295481SLinus Torvalds } else { 1336ea295481SLinus Torvalds struct ib_client *last; 1337ea295481SLinus Torvalds 1338ea295481SLinus Torvalds last = list_last_entry(&client_list, struct ib_client, list); 1339ea295481SLinus Torvalds client->client_id = last->client_id + 1; 1340ea295481SLinus Torvalds } 1341ea295481SLinus Torvalds ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); 1342e59178d8SJason Gunthorpe if (ret) 1343e59178d8SJason Gunthorpe goto out; 1344e59178d8SJason Gunthorpe 1345921eab11SJason Gunthorpe xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); 1346921eab11SJason Gunthorpe list_add_tail(&client->list, &client_list); 1347921eab11SJason Gunthorpe 1348e59178d8SJason Gunthorpe out: 1349921eab11SJason Gunthorpe up_write(&clients_rwsem); 1350e59178d8SJason Gunthorpe return ret; 1351e59178d8SJason Gunthorpe } 1352e59178d8SJason Gunthorpe 13531da177e4SLinus Torvalds /** 13541da177e4SLinus Torvalds * ib_register_client - Register an IB client 13551da177e4SLinus Torvalds * @client:Client to register 13561da177e4SLinus Torvalds * 13571da177e4SLinus Torvalds * Upper level users of the IB drivers can use ib_register_client() to 13581da177e4SLinus Torvalds * register callbacks for IB device addition and removal. When an IB 13591da177e4SLinus Torvalds * device is added, each registered client's add method will be called 13601da177e4SLinus Torvalds * (in the order the clients were registered), and when a device is 13611da177e4SLinus Torvalds * removed, each client's remove method will be called (in the reverse 13621da177e4SLinus Torvalds * order that clients were registered). In addition, when 13631da177e4SLinus Torvalds * ib_register_client() is called, the client will receive an add 13641da177e4SLinus Torvalds * callback for all devices already registered. 13651da177e4SLinus Torvalds */ 13661da177e4SLinus Torvalds int ib_register_client(struct ib_client *client) 13671da177e4SLinus Torvalds { 13681da177e4SLinus Torvalds struct ib_device *device; 13690df91bb6SJason Gunthorpe unsigned long index; 1370e59178d8SJason Gunthorpe int ret; 13711da177e4SLinus Torvalds 1372e59178d8SJason Gunthorpe ret = assign_client_id(client); 1373921eab11SJason Gunthorpe if (ret) 1374921eab11SJason Gunthorpe return ret; 1375921eab11SJason Gunthorpe 1376921eab11SJason Gunthorpe down_read(&devices_rwsem); 1377921eab11SJason Gunthorpe xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { 1378921eab11SJason Gunthorpe ret = add_client_context(device, client); 1379e59178d8SJason Gunthorpe if (ret) { 1380921eab11SJason Gunthorpe up_read(&devices_rwsem); 1381921eab11SJason Gunthorpe ib_unregister_client(client); 1382e59178d8SJason Gunthorpe return ret; 1383e59178d8SJason Gunthorpe } 1384921eab11SJason Gunthorpe } 1385921eab11SJason Gunthorpe up_read(&devices_rwsem); 13861da177e4SLinus Torvalds return 0; 13871da177e4SLinus Torvalds } 13881da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_client); 13891da177e4SLinus Torvalds 13901da177e4SLinus Torvalds /** 13911da177e4SLinus Torvalds * ib_unregister_client - Unregister an IB client 13921da177e4SLinus Torvalds * @client:Client to unregister 13931da177e4SLinus Torvalds * 13941da177e4SLinus Torvalds * Upper level users use ib_unregister_client() to remove their client 13951da177e4SLinus Torvalds * registration. When ib_unregister_client() is called, the client 13961da177e4SLinus Torvalds * will receive a remove callback for each IB device still registered. 1397921eab11SJason Gunthorpe * 1398921eab11SJason Gunthorpe * This is a full fence, once it returns no client callbacks will be called, 1399921eab11SJason Gunthorpe * or are running in another thread. 14001da177e4SLinus Torvalds */ 14011da177e4SLinus Torvalds void ib_unregister_client(struct ib_client *client) 14021da177e4SLinus Torvalds { 14031da177e4SLinus Torvalds struct ib_device *device; 14040df91bb6SJason Gunthorpe unsigned long index; 14051da177e4SLinus Torvalds 1406921eab11SJason Gunthorpe down_write(&clients_rwsem); 1407e59178d8SJason Gunthorpe xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); 1408921eab11SJason Gunthorpe up_write(&clients_rwsem); 1409921eab11SJason Gunthorpe /* 1410921eab11SJason Gunthorpe * Every device still known must be serialized to make sure we are 1411921eab11SJason Gunthorpe * done with the client callbacks before we return. 1412921eab11SJason Gunthorpe */ 1413921eab11SJason Gunthorpe down_read(&devices_rwsem); 1414921eab11SJason Gunthorpe xa_for_each (&devices, index, device) 1415921eab11SJason Gunthorpe remove_client_context(device, client->client_id); 1416921eab11SJason Gunthorpe up_read(&devices_rwsem); 14175aa44bb9SHaggai Eran 1418921eab11SJason Gunthorpe down_write(&clients_rwsem); 1419e59178d8SJason Gunthorpe list_del(&client->list); 1420e59178d8SJason Gunthorpe xa_erase(&clients, client->client_id); 1421921eab11SJason Gunthorpe up_write(&clients_rwsem); 14221da177e4SLinus Torvalds } 14231da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_client); 14241da177e4SLinus Torvalds 14251da177e4SLinus Torvalds /** 14269cd330d3SKrishna Kumar * ib_set_client_data - Set IB client context 14271da177e4SLinus Torvalds * @device:Device to set context for 14281da177e4SLinus Torvalds * @client:Client to set context for 14291da177e4SLinus Torvalds * @data:Context to set 14301da177e4SLinus Torvalds * 14310df91bb6SJason Gunthorpe * ib_set_client_data() sets client context data that can be retrieved with 14320df91bb6SJason Gunthorpe * ib_get_client_data(). This can only be called while the client is 14330df91bb6SJason Gunthorpe * registered to the device, once the ib_client remove() callback returns this 14340df91bb6SJason Gunthorpe * cannot be called. 14351da177e4SLinus Torvalds */ 14361da177e4SLinus Torvalds void ib_set_client_data(struct ib_device *device, struct ib_client *client, 14371da177e4SLinus Torvalds void *data) 14381da177e4SLinus Torvalds { 14390df91bb6SJason Gunthorpe void *rc; 14401da177e4SLinus Torvalds 14410df91bb6SJason Gunthorpe if (WARN_ON(IS_ERR(data))) 14420df91bb6SJason Gunthorpe data = NULL; 14431da177e4SLinus Torvalds 14440df91bb6SJason Gunthorpe rc = xa_store(&device->client_data, client->client_id, data, 14450df91bb6SJason Gunthorpe GFP_KERNEL); 14460df91bb6SJason Gunthorpe WARN_ON(xa_is_err(rc)); 14471da177e4SLinus Torvalds } 14481da177e4SLinus Torvalds EXPORT_SYMBOL(ib_set_client_data); 14491da177e4SLinus Torvalds 14501da177e4SLinus Torvalds /** 14511da177e4SLinus Torvalds * ib_register_event_handler - Register an IB event handler 14521da177e4SLinus Torvalds * @event_handler:Handler to register 14531da177e4SLinus Torvalds * 14541da177e4SLinus Torvalds * ib_register_event_handler() registers an event handler that will be 14551da177e4SLinus Torvalds * called back when asynchronous IB events occur (as defined in 14561da177e4SLinus Torvalds * chapter 11 of the InfiniBand Architecture Specification). This 14571da177e4SLinus Torvalds * callback may occur in interrupt context. 14581da177e4SLinus Torvalds */ 1459dcc9881eSLeon Romanovsky void ib_register_event_handler(struct ib_event_handler *event_handler) 14601da177e4SLinus Torvalds { 14611da177e4SLinus Torvalds unsigned long flags; 14621da177e4SLinus Torvalds 14631da177e4SLinus Torvalds spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 14641da177e4SLinus Torvalds list_add_tail(&event_handler->list, 14651da177e4SLinus Torvalds &event_handler->device->event_handler_list); 14661da177e4SLinus Torvalds spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 14671da177e4SLinus Torvalds } 14681da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_event_handler); 14691da177e4SLinus Torvalds 14701da177e4SLinus Torvalds /** 14711da177e4SLinus Torvalds * ib_unregister_event_handler - Unregister an event handler 14721da177e4SLinus Torvalds * @event_handler:Handler to unregister 14731da177e4SLinus Torvalds * 14741da177e4SLinus Torvalds * Unregister an event handler registered with 14751da177e4SLinus Torvalds * ib_register_event_handler(). 14761da177e4SLinus Torvalds */ 1477dcc9881eSLeon Romanovsky void ib_unregister_event_handler(struct ib_event_handler *event_handler) 14781da177e4SLinus Torvalds { 14791da177e4SLinus Torvalds unsigned long flags; 14801da177e4SLinus Torvalds 14811da177e4SLinus Torvalds spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 14821da177e4SLinus Torvalds list_del(&event_handler->list); 14831da177e4SLinus Torvalds spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 14841da177e4SLinus Torvalds } 14851da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_event_handler); 14861da177e4SLinus Torvalds 14871da177e4SLinus Torvalds /** 14881da177e4SLinus Torvalds * ib_dispatch_event - Dispatch an asynchronous event 14891da177e4SLinus Torvalds * @event:Event to dispatch 14901da177e4SLinus Torvalds * 14911da177e4SLinus Torvalds * Low-level drivers must call ib_dispatch_event() to dispatch the 14921da177e4SLinus Torvalds * event to all registered event handlers when an asynchronous event 14931da177e4SLinus Torvalds * occurs. 14941da177e4SLinus Torvalds */ 14951da177e4SLinus Torvalds void ib_dispatch_event(struct ib_event *event) 14961da177e4SLinus Torvalds { 14971da177e4SLinus Torvalds unsigned long flags; 14981da177e4SLinus Torvalds struct ib_event_handler *handler; 14991da177e4SLinus Torvalds 15001da177e4SLinus Torvalds spin_lock_irqsave(&event->device->event_handler_lock, flags); 15011da177e4SLinus Torvalds 15021da177e4SLinus Torvalds list_for_each_entry(handler, &event->device->event_handler_list, list) 15031da177e4SLinus Torvalds handler->handler(handler, event); 15041da177e4SLinus Torvalds 15051da177e4SLinus Torvalds spin_unlock_irqrestore(&event->device->event_handler_lock, flags); 15061da177e4SLinus Torvalds } 15071da177e4SLinus Torvalds EXPORT_SYMBOL(ib_dispatch_event); 15081da177e4SLinus Torvalds 15091da177e4SLinus Torvalds /** 15101da177e4SLinus Torvalds * ib_query_port - Query IB port attributes 15111da177e4SLinus Torvalds * @device:Device to query 15121da177e4SLinus Torvalds * @port_num:Port number to query 15131da177e4SLinus Torvalds * @port_attr:Port attributes 15141da177e4SLinus Torvalds * 15151da177e4SLinus Torvalds * ib_query_port() returns the attributes of a port through the 15161da177e4SLinus Torvalds * @port_attr pointer. 15171da177e4SLinus Torvalds */ 15181da177e4SLinus Torvalds int ib_query_port(struct ib_device *device, 15191da177e4SLinus Torvalds u8 port_num, 15201da177e4SLinus Torvalds struct ib_port_attr *port_attr) 15211da177e4SLinus Torvalds { 1522fad61ad4SEli Cohen union ib_gid gid; 1523fad61ad4SEli Cohen int err; 1524fad61ad4SEli Cohen 152524dc831bSYuval Shaia if (!rdma_is_port_valid(device, port_num)) 1526116c0074SRoland Dreier return -EINVAL; 1527116c0074SRoland Dreier 1528fad61ad4SEli Cohen memset(port_attr, 0, sizeof(*port_attr)); 15293023a1e9SKamal Heib err = device->ops.query_port(device, port_num, port_attr); 1530fad61ad4SEli Cohen if (err || port_attr->subnet_prefix) 1531fad61ad4SEli Cohen return err; 1532fad61ad4SEli Cohen 1533d7012467SEli Cohen if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) 1534d7012467SEli Cohen return 0; 1535d7012467SEli Cohen 15363023a1e9SKamal Heib err = device->ops.query_gid(device, port_num, 0, &gid); 1537fad61ad4SEli Cohen if (err) 1538fad61ad4SEli Cohen return err; 1539fad61ad4SEli Cohen 1540fad61ad4SEli Cohen port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); 1541fad61ad4SEli Cohen return 0; 15421da177e4SLinus Torvalds } 15431da177e4SLinus Torvalds EXPORT_SYMBOL(ib_query_port); 15441da177e4SLinus Torvalds 1545324e227eSJason Gunthorpe static void add_ndev_hash(struct ib_port_data *pdata) 1546324e227eSJason Gunthorpe { 1547324e227eSJason Gunthorpe unsigned long flags; 1548324e227eSJason Gunthorpe 1549324e227eSJason Gunthorpe might_sleep(); 1550324e227eSJason Gunthorpe 1551324e227eSJason Gunthorpe spin_lock_irqsave(&ndev_hash_lock, flags); 1552324e227eSJason Gunthorpe if (hash_hashed(&pdata->ndev_hash_link)) { 1553324e227eSJason Gunthorpe hash_del_rcu(&pdata->ndev_hash_link); 1554324e227eSJason Gunthorpe spin_unlock_irqrestore(&ndev_hash_lock, flags); 1555324e227eSJason Gunthorpe /* 1556324e227eSJason Gunthorpe * We cannot do hash_add_rcu after a hash_del_rcu until the 1557324e227eSJason Gunthorpe * grace period 1558324e227eSJason Gunthorpe */ 1559324e227eSJason Gunthorpe synchronize_rcu(); 1560324e227eSJason Gunthorpe spin_lock_irqsave(&ndev_hash_lock, flags); 1561324e227eSJason Gunthorpe } 1562324e227eSJason Gunthorpe if (pdata->netdev) 1563324e227eSJason Gunthorpe hash_add_rcu(ndev_hash, &pdata->ndev_hash_link, 1564324e227eSJason Gunthorpe (uintptr_t)pdata->netdev); 1565324e227eSJason Gunthorpe spin_unlock_irqrestore(&ndev_hash_lock, flags); 1566324e227eSJason Gunthorpe } 1567324e227eSJason Gunthorpe 15681da177e4SLinus Torvalds /** 1569c2261dd7SJason Gunthorpe * ib_device_set_netdev - Associate the ib_dev with an underlying net_device 1570c2261dd7SJason Gunthorpe * @ib_dev: Device to modify 1571c2261dd7SJason Gunthorpe * @ndev: net_device to affiliate, may be NULL 1572c2261dd7SJason Gunthorpe * @port: IB port the net_device is connected to 1573c2261dd7SJason Gunthorpe * 1574c2261dd7SJason Gunthorpe * Drivers should use this to link the ib_device to a netdev so the netdev 1575c2261dd7SJason Gunthorpe * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be 1576c2261dd7SJason Gunthorpe * affiliated with any port. 1577c2261dd7SJason Gunthorpe * 1578c2261dd7SJason Gunthorpe * The caller must ensure that the given ndev is not unregistered or 1579c2261dd7SJason Gunthorpe * unregistering, and that either the ib_device is unregistered or 1580c2261dd7SJason Gunthorpe * ib_device_set_netdev() is called with NULL when the ndev sends a 1581c2261dd7SJason Gunthorpe * NETDEV_UNREGISTER event. 1582c2261dd7SJason Gunthorpe */ 1583c2261dd7SJason Gunthorpe int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 1584c2261dd7SJason Gunthorpe unsigned int port) 1585c2261dd7SJason Gunthorpe { 1586c2261dd7SJason Gunthorpe struct net_device *old_ndev; 1587c2261dd7SJason Gunthorpe struct ib_port_data *pdata; 1588c2261dd7SJason Gunthorpe unsigned long flags; 1589c2261dd7SJason Gunthorpe int ret; 1590c2261dd7SJason Gunthorpe 1591c2261dd7SJason Gunthorpe /* 1592c2261dd7SJason Gunthorpe * Drivers wish to call this before ib_register_driver, so we have to 1593c2261dd7SJason Gunthorpe * setup the port data early. 1594c2261dd7SJason Gunthorpe */ 1595c2261dd7SJason Gunthorpe ret = alloc_port_data(ib_dev); 1596c2261dd7SJason Gunthorpe if (ret) 1597c2261dd7SJason Gunthorpe return ret; 1598c2261dd7SJason Gunthorpe 1599c2261dd7SJason Gunthorpe if (!rdma_is_port_valid(ib_dev, port)) 1600c2261dd7SJason Gunthorpe return -EINVAL; 1601c2261dd7SJason Gunthorpe 1602c2261dd7SJason Gunthorpe pdata = &ib_dev->port_data[port]; 1603c2261dd7SJason Gunthorpe spin_lock_irqsave(&pdata->netdev_lock, flags); 1604324e227eSJason Gunthorpe old_ndev = rcu_dereference_protected( 1605324e227eSJason Gunthorpe pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 1606324e227eSJason Gunthorpe if (old_ndev == ndev) { 1607c2261dd7SJason Gunthorpe spin_unlock_irqrestore(&pdata->netdev_lock, flags); 1608c2261dd7SJason Gunthorpe return 0; 1609c2261dd7SJason Gunthorpe } 1610c2261dd7SJason Gunthorpe 1611c2261dd7SJason Gunthorpe if (ndev) 1612c2261dd7SJason Gunthorpe dev_hold(ndev); 1613324e227eSJason Gunthorpe rcu_assign_pointer(pdata->netdev, ndev); 1614c2261dd7SJason Gunthorpe spin_unlock_irqrestore(&pdata->netdev_lock, flags); 1615c2261dd7SJason Gunthorpe 1616324e227eSJason Gunthorpe add_ndev_hash(pdata); 1617c2261dd7SJason Gunthorpe if (old_ndev) 1618c2261dd7SJason Gunthorpe dev_put(old_ndev); 1619c2261dd7SJason Gunthorpe 1620c2261dd7SJason Gunthorpe return 0; 1621c2261dd7SJason Gunthorpe } 1622c2261dd7SJason Gunthorpe EXPORT_SYMBOL(ib_device_set_netdev); 1623c2261dd7SJason Gunthorpe 1624c2261dd7SJason Gunthorpe static void free_netdevs(struct ib_device *ib_dev) 1625c2261dd7SJason Gunthorpe { 1626c2261dd7SJason Gunthorpe unsigned long flags; 1627c2261dd7SJason Gunthorpe unsigned int port; 1628c2261dd7SJason Gunthorpe 1629c2261dd7SJason Gunthorpe rdma_for_each_port (ib_dev, port) { 1630c2261dd7SJason Gunthorpe struct ib_port_data *pdata = &ib_dev->port_data[port]; 1631324e227eSJason Gunthorpe struct net_device *ndev; 1632c2261dd7SJason Gunthorpe 1633c2261dd7SJason Gunthorpe spin_lock_irqsave(&pdata->netdev_lock, flags); 1634324e227eSJason Gunthorpe ndev = rcu_dereference_protected( 1635324e227eSJason Gunthorpe pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 1636324e227eSJason Gunthorpe if (ndev) { 1637324e227eSJason Gunthorpe spin_lock(&ndev_hash_lock); 1638324e227eSJason Gunthorpe hash_del_rcu(&pdata->ndev_hash_link); 1639324e227eSJason Gunthorpe spin_unlock(&ndev_hash_lock); 1640324e227eSJason Gunthorpe 1641324e227eSJason Gunthorpe /* 1642324e227eSJason Gunthorpe * If this is the last dev_put there is still a 1643324e227eSJason Gunthorpe * synchronize_rcu before the netdev is kfreed, so we 1644324e227eSJason Gunthorpe * can continue to rely on unlocked pointer 1645324e227eSJason Gunthorpe * comparisons after the put 1646324e227eSJason Gunthorpe */ 1647324e227eSJason Gunthorpe rcu_assign_pointer(pdata->netdev, NULL); 1648324e227eSJason Gunthorpe dev_put(ndev); 1649c2261dd7SJason Gunthorpe } 1650c2261dd7SJason Gunthorpe spin_unlock_irqrestore(&pdata->netdev_lock, flags); 1651c2261dd7SJason Gunthorpe } 1652c2261dd7SJason Gunthorpe } 1653c2261dd7SJason Gunthorpe 1654c2261dd7SJason Gunthorpe struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 1655c2261dd7SJason Gunthorpe unsigned int port) 1656c2261dd7SJason Gunthorpe { 1657c2261dd7SJason Gunthorpe struct ib_port_data *pdata; 1658c2261dd7SJason Gunthorpe struct net_device *res; 1659c2261dd7SJason Gunthorpe 1660c2261dd7SJason Gunthorpe if (!rdma_is_port_valid(ib_dev, port)) 1661c2261dd7SJason Gunthorpe return NULL; 1662c2261dd7SJason Gunthorpe 1663c2261dd7SJason Gunthorpe pdata = &ib_dev->port_data[port]; 1664c2261dd7SJason Gunthorpe 1665c2261dd7SJason Gunthorpe /* 1666c2261dd7SJason Gunthorpe * New drivers should use ib_device_set_netdev() not the legacy 1667c2261dd7SJason Gunthorpe * get_netdev(). 1668c2261dd7SJason Gunthorpe */ 1669c2261dd7SJason Gunthorpe if (ib_dev->ops.get_netdev) 1670c2261dd7SJason Gunthorpe res = ib_dev->ops.get_netdev(ib_dev, port); 1671c2261dd7SJason Gunthorpe else { 1672c2261dd7SJason Gunthorpe spin_lock(&pdata->netdev_lock); 1673324e227eSJason Gunthorpe res = rcu_dereference_protected( 1674324e227eSJason Gunthorpe pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 1675c2261dd7SJason Gunthorpe if (res) 1676c2261dd7SJason Gunthorpe dev_hold(res); 1677c2261dd7SJason Gunthorpe spin_unlock(&pdata->netdev_lock); 1678c2261dd7SJason Gunthorpe } 1679c2261dd7SJason Gunthorpe 1680c2261dd7SJason Gunthorpe /* 1681c2261dd7SJason Gunthorpe * If we are starting to unregister expedite things by preventing 1682c2261dd7SJason Gunthorpe * propagation of an unregistering netdev. 1683c2261dd7SJason Gunthorpe */ 1684c2261dd7SJason Gunthorpe if (res && res->reg_state != NETREG_REGISTERED) { 1685c2261dd7SJason Gunthorpe dev_put(res); 1686c2261dd7SJason Gunthorpe return NULL; 1687c2261dd7SJason Gunthorpe } 1688c2261dd7SJason Gunthorpe 1689c2261dd7SJason Gunthorpe return res; 1690c2261dd7SJason Gunthorpe } 1691c2261dd7SJason Gunthorpe 1692c2261dd7SJason Gunthorpe /** 1693324e227eSJason Gunthorpe * ib_device_get_by_netdev - Find an IB device associated with a netdev 1694324e227eSJason Gunthorpe * @ndev: netdev to locate 1695324e227eSJason Gunthorpe * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) 1696324e227eSJason Gunthorpe * 1697324e227eSJason Gunthorpe * Find and hold an ib_device that is associated with a netdev via 1698324e227eSJason Gunthorpe * ib_device_set_netdev(). The caller must call ib_device_put() on the 1699324e227eSJason Gunthorpe * returned pointer. 1700324e227eSJason Gunthorpe */ 1701324e227eSJason Gunthorpe struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, 1702324e227eSJason Gunthorpe enum rdma_driver_id driver_id) 1703324e227eSJason Gunthorpe { 1704324e227eSJason Gunthorpe struct ib_device *res = NULL; 1705324e227eSJason Gunthorpe struct ib_port_data *cur; 1706324e227eSJason Gunthorpe 1707324e227eSJason Gunthorpe rcu_read_lock(); 1708324e227eSJason Gunthorpe hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link, 1709324e227eSJason Gunthorpe (uintptr_t)ndev) { 1710324e227eSJason Gunthorpe if (rcu_access_pointer(cur->netdev) == ndev && 1711324e227eSJason Gunthorpe (driver_id == RDMA_DRIVER_UNKNOWN || 1712324e227eSJason Gunthorpe cur->ib_dev->driver_id == driver_id) && 1713324e227eSJason Gunthorpe ib_device_try_get(cur->ib_dev)) { 1714324e227eSJason Gunthorpe res = cur->ib_dev; 1715324e227eSJason Gunthorpe break; 1716324e227eSJason Gunthorpe } 1717324e227eSJason Gunthorpe } 1718324e227eSJason Gunthorpe rcu_read_unlock(); 1719324e227eSJason Gunthorpe 1720324e227eSJason Gunthorpe return res; 1721324e227eSJason Gunthorpe } 1722324e227eSJason Gunthorpe EXPORT_SYMBOL(ib_device_get_by_netdev); 1723324e227eSJason Gunthorpe 1724324e227eSJason Gunthorpe /** 172503db3a2dSMatan Barak * ib_enum_roce_netdev - enumerate all RoCE ports 172603db3a2dSMatan Barak * @ib_dev : IB device we want to query 172703db3a2dSMatan Barak * @filter: Should we call the callback? 172803db3a2dSMatan Barak * @filter_cookie: Cookie passed to filter 172903db3a2dSMatan Barak * @cb: Callback to call for each found RoCE ports 173003db3a2dSMatan Barak * @cookie: Cookie passed back to the callback 173103db3a2dSMatan Barak * 173203db3a2dSMatan Barak * Enumerates all of the physical RoCE ports of ib_dev 173303db3a2dSMatan Barak * which are related to netdevice and calls callback() on each 173403db3a2dSMatan Barak * device for which filter() function returns non zero. 173503db3a2dSMatan Barak */ 173603db3a2dSMatan Barak void ib_enum_roce_netdev(struct ib_device *ib_dev, 173703db3a2dSMatan Barak roce_netdev_filter filter, 173803db3a2dSMatan Barak void *filter_cookie, 173903db3a2dSMatan Barak roce_netdev_callback cb, 174003db3a2dSMatan Barak void *cookie) 174103db3a2dSMatan Barak { 1742ea1075edSJason Gunthorpe unsigned int port; 174303db3a2dSMatan Barak 1744ea1075edSJason Gunthorpe rdma_for_each_port (ib_dev, port) 174503db3a2dSMatan Barak if (rdma_protocol_roce(ib_dev, port)) { 1746c2261dd7SJason Gunthorpe struct net_device *idev = 1747c2261dd7SJason Gunthorpe ib_device_get_netdev(ib_dev, port); 174803db3a2dSMatan Barak 174903db3a2dSMatan Barak if (filter(ib_dev, port, idev, filter_cookie)) 175003db3a2dSMatan Barak cb(ib_dev, port, idev, cookie); 175103db3a2dSMatan Barak 175203db3a2dSMatan Barak if (idev) 175303db3a2dSMatan Barak dev_put(idev); 175403db3a2dSMatan Barak } 175503db3a2dSMatan Barak } 175603db3a2dSMatan Barak 175703db3a2dSMatan Barak /** 175803db3a2dSMatan Barak * ib_enum_all_roce_netdevs - enumerate all RoCE devices 175903db3a2dSMatan Barak * @filter: Should we call the callback? 176003db3a2dSMatan Barak * @filter_cookie: Cookie passed to filter 176103db3a2dSMatan Barak * @cb: Callback to call for each found RoCE ports 176203db3a2dSMatan Barak * @cookie: Cookie passed back to the callback 176303db3a2dSMatan Barak * 176403db3a2dSMatan Barak * Enumerates all RoCE devices' physical ports which are related 176503db3a2dSMatan Barak * to netdevices and calls callback() on each device for which 176603db3a2dSMatan Barak * filter() function returns non zero. 176703db3a2dSMatan Barak */ 176803db3a2dSMatan Barak void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 176903db3a2dSMatan Barak void *filter_cookie, 177003db3a2dSMatan Barak roce_netdev_callback cb, 177103db3a2dSMatan Barak void *cookie) 177203db3a2dSMatan Barak { 177303db3a2dSMatan Barak struct ib_device *dev; 17740df91bb6SJason Gunthorpe unsigned long index; 177503db3a2dSMatan Barak 1776921eab11SJason Gunthorpe down_read(&devices_rwsem); 17770df91bb6SJason Gunthorpe xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) 177803db3a2dSMatan Barak ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); 1779921eab11SJason Gunthorpe up_read(&devices_rwsem); 178003db3a2dSMatan Barak } 178103db3a2dSMatan Barak 178203db3a2dSMatan Barak /** 17838030c835SLeon Romanovsky * ib_enum_all_devs - enumerate all ib_devices 17848030c835SLeon Romanovsky * @cb: Callback to call for each found ib_device 17858030c835SLeon Romanovsky * 17868030c835SLeon Romanovsky * Enumerates all ib_devices and calls callback() on each device. 17878030c835SLeon Romanovsky */ 17888030c835SLeon Romanovsky int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, 17898030c835SLeon Romanovsky struct netlink_callback *cb) 17908030c835SLeon Romanovsky { 17910df91bb6SJason Gunthorpe unsigned long index; 17928030c835SLeon Romanovsky struct ib_device *dev; 17938030c835SLeon Romanovsky unsigned int idx = 0; 17948030c835SLeon Romanovsky int ret = 0; 17958030c835SLeon Romanovsky 1796921eab11SJason Gunthorpe down_read(&devices_rwsem); 17970df91bb6SJason Gunthorpe xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 17988030c835SLeon Romanovsky ret = nldev_cb(dev, skb, cb, idx); 17998030c835SLeon Romanovsky if (ret) 18008030c835SLeon Romanovsky break; 18018030c835SLeon Romanovsky idx++; 18028030c835SLeon Romanovsky } 1803921eab11SJason Gunthorpe up_read(&devices_rwsem); 18048030c835SLeon Romanovsky return ret; 18058030c835SLeon Romanovsky } 18068030c835SLeon Romanovsky 18078030c835SLeon Romanovsky /** 18081da177e4SLinus Torvalds * ib_query_pkey - Get P_Key table entry 18091da177e4SLinus Torvalds * @device:Device to query 18101da177e4SLinus Torvalds * @port_num:Port number to query 18111da177e4SLinus Torvalds * @index:P_Key table index to query 18121da177e4SLinus Torvalds * @pkey:Returned P_Key 18131da177e4SLinus Torvalds * 18141da177e4SLinus Torvalds * ib_query_pkey() fetches the specified P_Key table entry. 18151da177e4SLinus Torvalds */ 18161da177e4SLinus Torvalds int ib_query_pkey(struct ib_device *device, 18171da177e4SLinus Torvalds u8 port_num, u16 index, u16 *pkey) 18181da177e4SLinus Torvalds { 18199af3f5cfSYuval Shaia if (!rdma_is_port_valid(device, port_num)) 18209af3f5cfSYuval Shaia return -EINVAL; 18219af3f5cfSYuval Shaia 18223023a1e9SKamal Heib return device->ops.query_pkey(device, port_num, index, pkey); 18231da177e4SLinus Torvalds } 18241da177e4SLinus Torvalds EXPORT_SYMBOL(ib_query_pkey); 18251da177e4SLinus Torvalds 18261da177e4SLinus Torvalds /** 18271da177e4SLinus Torvalds * ib_modify_device - Change IB device attributes 18281da177e4SLinus Torvalds * @device:Device to modify 18291da177e4SLinus Torvalds * @device_modify_mask:Mask of attributes to change 18301da177e4SLinus Torvalds * @device_modify:New attribute values 18311da177e4SLinus Torvalds * 18321da177e4SLinus Torvalds * ib_modify_device() changes a device's attributes as specified by 18331da177e4SLinus Torvalds * the @device_modify_mask and @device_modify structure. 18341da177e4SLinus Torvalds */ 18351da177e4SLinus Torvalds int ib_modify_device(struct ib_device *device, 18361da177e4SLinus Torvalds int device_modify_mask, 18371da177e4SLinus Torvalds struct ib_device_modify *device_modify) 18381da177e4SLinus Torvalds { 18393023a1e9SKamal Heib if (!device->ops.modify_device) 184010e1b54bSBart Van Assche return -ENOSYS; 184110e1b54bSBart Van Assche 18423023a1e9SKamal Heib return device->ops.modify_device(device, device_modify_mask, 18431da177e4SLinus Torvalds device_modify); 18441da177e4SLinus Torvalds } 18451da177e4SLinus Torvalds EXPORT_SYMBOL(ib_modify_device); 18461da177e4SLinus Torvalds 18471da177e4SLinus Torvalds /** 18481da177e4SLinus Torvalds * ib_modify_port - Modifies the attributes for the specified port. 18491da177e4SLinus Torvalds * @device: The device to modify. 18501da177e4SLinus Torvalds * @port_num: The number of the port to modify. 18511da177e4SLinus Torvalds * @port_modify_mask: Mask used to specify which attributes of the port 18521da177e4SLinus Torvalds * to change. 18531da177e4SLinus Torvalds * @port_modify: New attribute values for the port. 18541da177e4SLinus Torvalds * 18551da177e4SLinus Torvalds * ib_modify_port() changes a port's attributes as specified by the 18561da177e4SLinus Torvalds * @port_modify_mask and @port_modify structure. 18571da177e4SLinus Torvalds */ 18581da177e4SLinus Torvalds int ib_modify_port(struct ib_device *device, 18591da177e4SLinus Torvalds u8 port_num, int port_modify_mask, 18601da177e4SLinus Torvalds struct ib_port_modify *port_modify) 18611da177e4SLinus Torvalds { 186261e0962dSSelvin Xavier int rc; 186310e1b54bSBart Van Assche 186424dc831bSYuval Shaia if (!rdma_is_port_valid(device, port_num)) 1865116c0074SRoland Dreier return -EINVAL; 1866116c0074SRoland Dreier 18673023a1e9SKamal Heib if (device->ops.modify_port) 18683023a1e9SKamal Heib rc = device->ops.modify_port(device, port_num, 18693023a1e9SKamal Heib port_modify_mask, 18701da177e4SLinus Torvalds port_modify); 187161e0962dSSelvin Xavier else 187261e0962dSSelvin Xavier rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; 187361e0962dSSelvin Xavier return rc; 18741da177e4SLinus Torvalds } 18751da177e4SLinus Torvalds EXPORT_SYMBOL(ib_modify_port); 18761da177e4SLinus Torvalds 18775eb620c8SYosef Etigin /** 18785eb620c8SYosef Etigin * ib_find_gid - Returns the port number and GID table index where 1879dbb12562SParav Pandit * a specified GID value occurs. Its searches only for IB link layer. 18805eb620c8SYosef Etigin * @device: The device to query. 18815eb620c8SYosef Etigin * @gid: The GID value to search for. 18825eb620c8SYosef Etigin * @port_num: The port number of the device where the GID value was found. 18835eb620c8SYosef Etigin * @index: The index into the GID table where the GID was found. This 18845eb620c8SYosef Etigin * parameter may be NULL. 18855eb620c8SYosef Etigin */ 18865eb620c8SYosef Etigin int ib_find_gid(struct ib_device *device, union ib_gid *gid, 1887b26c4a11SParav Pandit u8 *port_num, u16 *index) 18885eb620c8SYosef Etigin { 18895eb620c8SYosef Etigin union ib_gid tmp_gid; 1890ea1075edSJason Gunthorpe unsigned int port; 1891ea1075edSJason Gunthorpe int ret, i; 18925eb620c8SYosef Etigin 1893ea1075edSJason Gunthorpe rdma_for_each_port (device, port) { 189422d24f75SParav Pandit if (!rdma_protocol_ib(device, port)) 1895b39ffa1dSMatan Barak continue; 1896b39ffa1dSMatan Barak 18978ceb1357SJason Gunthorpe for (i = 0; i < device->port_data[port].immutable.gid_tbl_len; 18988ceb1357SJason Gunthorpe ++i) { 18991dfce294SParav Pandit ret = rdma_query_gid(device, port, i, &tmp_gid); 19005eb620c8SYosef Etigin if (ret) 19015eb620c8SYosef Etigin return ret; 19025eb620c8SYosef Etigin if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 19035eb620c8SYosef Etigin *port_num = port; 19045eb620c8SYosef Etigin if (index) 19055eb620c8SYosef Etigin *index = i; 19065eb620c8SYosef Etigin return 0; 19075eb620c8SYosef Etigin } 19085eb620c8SYosef Etigin } 19095eb620c8SYosef Etigin } 19105eb620c8SYosef Etigin 19115eb620c8SYosef Etigin return -ENOENT; 19125eb620c8SYosef Etigin } 19135eb620c8SYosef Etigin EXPORT_SYMBOL(ib_find_gid); 19145eb620c8SYosef Etigin 19155eb620c8SYosef Etigin /** 19165eb620c8SYosef Etigin * ib_find_pkey - Returns the PKey table index where a specified 19175eb620c8SYosef Etigin * PKey value occurs. 19185eb620c8SYosef Etigin * @device: The device to query. 19195eb620c8SYosef Etigin * @port_num: The port number of the device to search for the PKey. 19205eb620c8SYosef Etigin * @pkey: The PKey value to search for. 19215eb620c8SYosef Etigin * @index: The index into the PKey table where the PKey was found. 19225eb620c8SYosef Etigin */ 19235eb620c8SYosef Etigin int ib_find_pkey(struct ib_device *device, 19245eb620c8SYosef Etigin u8 port_num, u16 pkey, u16 *index) 19255eb620c8SYosef Etigin { 19265eb620c8SYosef Etigin int ret, i; 19275eb620c8SYosef Etigin u16 tmp_pkey; 1928ff7166c4SJack Morgenstein int partial_ix = -1; 19295eb620c8SYosef Etigin 19308ceb1357SJason Gunthorpe for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len; 19318ceb1357SJason Gunthorpe ++i) { 19325eb620c8SYosef Etigin ret = ib_query_pkey(device, port_num, i, &tmp_pkey); 19335eb620c8SYosef Etigin if (ret) 19345eb620c8SYosef Etigin return ret; 193536026eccSMoni Shoua if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { 1936ff7166c4SJack Morgenstein /* if there is full-member pkey take it.*/ 1937ff7166c4SJack Morgenstein if (tmp_pkey & 0x8000) { 19385eb620c8SYosef Etigin *index = i; 19395eb620c8SYosef Etigin return 0; 19405eb620c8SYosef Etigin } 1941ff7166c4SJack Morgenstein if (partial_ix < 0) 1942ff7166c4SJack Morgenstein partial_ix = i; 1943ff7166c4SJack Morgenstein } 19445eb620c8SYosef Etigin } 19455eb620c8SYosef Etigin 1946ff7166c4SJack Morgenstein /*no full-member, if exists take the limited*/ 1947ff7166c4SJack Morgenstein if (partial_ix >= 0) { 1948ff7166c4SJack Morgenstein *index = partial_ix; 1949ff7166c4SJack Morgenstein return 0; 1950ff7166c4SJack Morgenstein } 19515eb620c8SYosef Etigin return -ENOENT; 19525eb620c8SYosef Etigin } 19535eb620c8SYosef Etigin EXPORT_SYMBOL(ib_find_pkey); 19545eb620c8SYosef Etigin 19559268f72dSYotam Kenneth /** 19569268f72dSYotam Kenneth * ib_get_net_dev_by_params() - Return the appropriate net_dev 19579268f72dSYotam Kenneth * for a received CM request 19589268f72dSYotam Kenneth * @dev: An RDMA device on which the request has been received. 19599268f72dSYotam Kenneth * @port: Port number on the RDMA device. 19609268f72dSYotam Kenneth * @pkey: The Pkey the request came on. 19619268f72dSYotam Kenneth * @gid: A GID that the net_dev uses to communicate. 19629268f72dSYotam Kenneth * @addr: Contains the IP address that the request specified as its 19639268f72dSYotam Kenneth * destination. 1964921eab11SJason Gunthorpe * 19659268f72dSYotam Kenneth */ 19669268f72dSYotam Kenneth struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, 19679268f72dSYotam Kenneth u8 port, 19689268f72dSYotam Kenneth u16 pkey, 19699268f72dSYotam Kenneth const union ib_gid *gid, 19709268f72dSYotam Kenneth const struct sockaddr *addr) 19719268f72dSYotam Kenneth { 19729268f72dSYotam Kenneth struct net_device *net_dev = NULL; 19730df91bb6SJason Gunthorpe unsigned long index; 19740df91bb6SJason Gunthorpe void *client_data; 19759268f72dSYotam Kenneth 19769268f72dSYotam Kenneth if (!rdma_protocol_ib(dev, port)) 19779268f72dSYotam Kenneth return NULL; 19789268f72dSYotam Kenneth 1979921eab11SJason Gunthorpe /* 1980921eab11SJason Gunthorpe * Holding the read side guarantees that the client will not become 1981921eab11SJason Gunthorpe * unregistered while we are calling get_net_dev_by_params() 1982921eab11SJason Gunthorpe */ 1983921eab11SJason Gunthorpe down_read(&dev->client_data_rwsem); 19840df91bb6SJason Gunthorpe xan_for_each_marked (&dev->client_data, index, client_data, 19850df91bb6SJason Gunthorpe CLIENT_DATA_REGISTERED) { 19860df91bb6SJason Gunthorpe struct ib_client *client = xa_load(&clients, index); 19879268f72dSYotam Kenneth 19880df91bb6SJason Gunthorpe if (!client || !client->get_net_dev_by_params) 19899268f72dSYotam Kenneth continue; 19909268f72dSYotam Kenneth 19910df91bb6SJason Gunthorpe net_dev = client->get_net_dev_by_params(dev, port, pkey, gid, 19920df91bb6SJason Gunthorpe addr, client_data); 19939268f72dSYotam Kenneth if (net_dev) 19949268f72dSYotam Kenneth break; 19959268f72dSYotam Kenneth } 1996921eab11SJason Gunthorpe up_read(&dev->client_data_rwsem); 19979268f72dSYotam Kenneth 19989268f72dSYotam Kenneth return net_dev; 19999268f72dSYotam Kenneth } 20009268f72dSYotam Kenneth EXPORT_SYMBOL(ib_get_net_dev_by_params); 20019268f72dSYotam Kenneth 2002521ed0d9SKamal Heib void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) 2003521ed0d9SKamal Heib { 20043023a1e9SKamal Heib struct ib_device_ops *dev_ops = &dev->ops; 2005521ed0d9SKamal Heib #define SET_DEVICE_OP(ptr, name) \ 2006521ed0d9SKamal Heib do { \ 2007521ed0d9SKamal Heib if (ops->name) \ 2008521ed0d9SKamal Heib if (!((ptr)->name)) \ 2009521ed0d9SKamal Heib (ptr)->name = ops->name; \ 2010521ed0d9SKamal Heib } while (0) 2011521ed0d9SKamal Heib 201230471d4bSLeon Romanovsky #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name) 201330471d4bSLeon Romanovsky 20143023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, add_gid); 20152f1927b0SMoni Shoua SET_DEVICE_OP(dev_ops, advise_mr); 20163023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_dm); 20173023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_fmr); 20183023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_hw_stats); 20193023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_mr); 20203023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_mw); 20213023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_pd); 20223023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_rdma_netdev); 20233023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_ucontext); 20243023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_xrcd); 20253023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, attach_mcast); 20263023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, check_mr_status); 20273023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_ah); 20283023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_counters); 20293023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_cq); 20303023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_flow); 20313023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_flow_action_esp); 20323023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_qp); 20333023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_rwq_ind_table); 20343023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_srq); 20353023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_wq); 20363023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_dm); 2037d0899892SJason Gunthorpe SET_DEVICE_OP(dev_ops, dealloc_driver); 20383023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_fmr); 20393023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_mw); 20403023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_pd); 20413023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_ucontext); 20423023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_xrcd); 20433023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, del_gid); 20443023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dereg_mr); 20453023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_ah); 20463023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_counters); 20473023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_cq); 20483023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_flow); 20493023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_flow_action); 20503023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_qp); 20513023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table); 20523023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_srq); 20533023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_wq); 20543023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, detach_mcast); 20553023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, disassociate_ucontext); 20563023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, drain_rq); 20573023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, drain_sq); 2058ca22354bSJason Gunthorpe SET_DEVICE_OP(dev_ops, enable_driver); 205902da3750SLeon Romanovsky SET_DEVICE_OP(dev_ops, fill_res_entry); 20603023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_dev_fw_str); 20613023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_dma_mr); 20623023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_hw_stats); 20633023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_link_layer); 20643023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_netdev); 20653023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_port_immutable); 20663023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_vector_affinity); 20673023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_vf_config); 20683023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_vf_stats); 2069ea4baf7fSParav Pandit SET_DEVICE_OP(dev_ops, init_port); 20703023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, map_mr_sg); 20713023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, map_phys_fmr); 20723023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, mmap); 20733023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_ah); 20743023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_cq); 20753023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_device); 20763023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_flow_action_esp); 20773023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_port); 20783023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_qp); 20793023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_srq); 20803023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_wq); 20813023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, peek_cq); 20823023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, poll_cq); 20833023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, post_recv); 20843023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, post_send); 20853023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, post_srq_recv); 20863023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, process_mad); 20873023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_ah); 20883023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_device); 20893023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_gid); 20903023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_pkey); 20913023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_port); 20923023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_qp); 20933023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_srq); 20943023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); 20953023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, read_counters); 20963023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, reg_dm_mr); 20973023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, reg_user_mr); 20983023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, req_ncomp_notif); 20993023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, req_notify_cq); 21003023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, rereg_user_mr); 21013023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, resize_cq); 21023023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, set_vf_guid); 21033023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, set_vf_link_state); 21043023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, unmap_fmr); 210521a428a0SLeon Romanovsky 210621a428a0SLeon Romanovsky SET_OBJ_SIZE(dev_ops, ib_pd); 2107a2a074efSLeon Romanovsky SET_OBJ_SIZE(dev_ops, ib_ucontext); 2108521ed0d9SKamal Heib } 2109521ed0d9SKamal Heib EXPORT_SYMBOL(ib_set_device_ops); 2110521ed0d9SKamal Heib 2111d0e312feSLeon Romanovsky static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { 2112735c631aSMark Bloch [RDMA_NL_LS_OP_RESOLVE] = { 2113647c75acSLeon Romanovsky .doit = ib_nl_handle_resolve_resp, 2114e3a2b93dSLeon Romanovsky .flags = RDMA_NL_ADMIN_PERM, 2115e3a2b93dSLeon Romanovsky }, 2116735c631aSMark Bloch [RDMA_NL_LS_OP_SET_TIMEOUT] = { 2117647c75acSLeon Romanovsky .doit = ib_nl_handle_set_timeout, 2118e3a2b93dSLeon Romanovsky .flags = RDMA_NL_ADMIN_PERM, 2119e3a2b93dSLeon Romanovsky }, 2120ae43f828SMark Bloch [RDMA_NL_LS_OP_IP_RESOLVE] = { 2121647c75acSLeon Romanovsky .doit = ib_nl_handle_ip_res_resp, 2122e3a2b93dSLeon Romanovsky .flags = RDMA_NL_ADMIN_PERM, 2123e3a2b93dSLeon Romanovsky }, 2124735c631aSMark Bloch }; 2125735c631aSMark Bloch 21261da177e4SLinus Torvalds static int __init ib_core_init(void) 21271da177e4SLinus Torvalds { 21281da177e4SLinus Torvalds int ret; 21291da177e4SLinus Torvalds 2130f0626710STejun Heo ib_wq = alloc_workqueue("infiniband", 0, 0); 2131f0626710STejun Heo if (!ib_wq) 2132f0626710STejun Heo return -ENOMEM; 2133f0626710STejun Heo 213414d3a3b2SChristoph Hellwig ib_comp_wq = alloc_workqueue("ib-comp-wq", 2135b7363e67SSagi Grimberg WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 213614d3a3b2SChristoph Hellwig if (!ib_comp_wq) { 213714d3a3b2SChristoph Hellwig ret = -ENOMEM; 213814d3a3b2SChristoph Hellwig goto err; 213914d3a3b2SChristoph Hellwig } 214014d3a3b2SChristoph Hellwig 2141f794809aSJack Morgenstein ib_comp_unbound_wq = 2142f794809aSJack Morgenstein alloc_workqueue("ib-comp-unb-wq", 2143f794809aSJack Morgenstein WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | 2144f794809aSJack Morgenstein WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); 2145f794809aSJack Morgenstein if (!ib_comp_unbound_wq) { 2146f794809aSJack Morgenstein ret = -ENOMEM; 2147f794809aSJack Morgenstein goto err_comp; 2148f794809aSJack Morgenstein } 2149f794809aSJack Morgenstein 215055aeed06SJason Gunthorpe ret = class_register(&ib_class); 2151fd75c789SNir Muchtar if (ret) { 2152aba25a3eSParav Pandit pr_warn("Couldn't create InfiniBand device class\n"); 2153f794809aSJack Morgenstein goto err_comp_unbound; 2154fd75c789SNir Muchtar } 21551da177e4SLinus Torvalds 2156c9901724SLeon Romanovsky ret = rdma_nl_init(); 21571da177e4SLinus Torvalds if (ret) { 2158c9901724SLeon Romanovsky pr_warn("Couldn't init IB netlink interface: err %d\n", ret); 2159fd75c789SNir Muchtar goto err_sysfs; 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds 2162e3f20f02SLeon Romanovsky ret = addr_init(); 2163e3f20f02SLeon Romanovsky if (ret) { 2164e3f20f02SLeon Romanovsky pr_warn("Could't init IB address resolution\n"); 2165e3f20f02SLeon Romanovsky goto err_ibnl; 2166e3f20f02SLeon Romanovsky } 2167e3f20f02SLeon Romanovsky 21684c2cb422SMark Bloch ret = ib_mad_init(); 21694c2cb422SMark Bloch if (ret) { 21704c2cb422SMark Bloch pr_warn("Couldn't init IB MAD\n"); 21714c2cb422SMark Bloch goto err_addr; 21724c2cb422SMark Bloch } 21734c2cb422SMark Bloch 2174c2e49c92SMark Bloch ret = ib_sa_init(); 2175c2e49c92SMark Bloch if (ret) { 2176c2e49c92SMark Bloch pr_warn("Couldn't init SA\n"); 2177c2e49c92SMark Bloch goto err_mad; 2178c2e49c92SMark Bloch } 2179c2e49c92SMark Bloch 21808f408ab6SDaniel Jurgens ret = register_lsm_notifier(&ibdev_lsm_nb); 21818f408ab6SDaniel Jurgens if (ret) { 21828f408ab6SDaniel Jurgens pr_warn("Couldn't register LSM notifier. ret %d\n", ret); 2183c9901724SLeon Romanovsky goto err_sa; 21848f408ab6SDaniel Jurgens } 21858f408ab6SDaniel Jurgens 21864e0f7b90SParav Pandit ret = register_pernet_device(&rdma_dev_net_ops); 21874e0f7b90SParav Pandit if (ret) { 21884e0f7b90SParav Pandit pr_warn("Couldn't init compat dev. ret %d\n", ret); 21894e0f7b90SParav Pandit goto err_compat; 21904e0f7b90SParav Pandit } 21914e0f7b90SParav Pandit 21926c80b41aSLeon Romanovsky nldev_init(); 2193c9901724SLeon Romanovsky rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); 21945ef8c0c1SJason Gunthorpe roce_gid_mgmt_init(); 2195b2cbae2cSRoland Dreier 2196fd75c789SNir Muchtar return 0; 2197fd75c789SNir Muchtar 21984e0f7b90SParav Pandit err_compat: 21994e0f7b90SParav Pandit unregister_lsm_notifier(&ibdev_lsm_nb); 2200735c631aSMark Bloch err_sa: 2201735c631aSMark Bloch ib_sa_cleanup(); 2202c2e49c92SMark Bloch err_mad: 2203c2e49c92SMark Bloch ib_mad_cleanup(); 22044c2cb422SMark Bloch err_addr: 22054c2cb422SMark Bloch addr_cleanup(); 2206e3f20f02SLeon Romanovsky err_ibnl: 2207c9901724SLeon Romanovsky rdma_nl_exit(); 2208fd75c789SNir Muchtar err_sysfs: 220955aeed06SJason Gunthorpe class_unregister(&ib_class); 2210f794809aSJack Morgenstein err_comp_unbound: 2211f794809aSJack Morgenstein destroy_workqueue(ib_comp_unbound_wq); 221214d3a3b2SChristoph Hellwig err_comp: 221314d3a3b2SChristoph Hellwig destroy_workqueue(ib_comp_wq); 2214fd75c789SNir Muchtar err: 2215fd75c789SNir Muchtar destroy_workqueue(ib_wq); 22161da177e4SLinus Torvalds return ret; 22171da177e4SLinus Torvalds } 22181da177e4SLinus Torvalds 22191da177e4SLinus Torvalds static void __exit ib_core_cleanup(void) 22201da177e4SLinus Torvalds { 22215ef8c0c1SJason Gunthorpe roce_gid_mgmt_cleanup(); 22226c80b41aSLeon Romanovsky nldev_exit(); 2223c9901724SLeon Romanovsky rdma_nl_unregister(RDMA_NL_LS); 22244e0f7b90SParav Pandit unregister_pernet_device(&rdma_dev_net_ops); 2225c9901724SLeon Romanovsky unregister_lsm_notifier(&ibdev_lsm_nb); 2226c2e49c92SMark Bloch ib_sa_cleanup(); 22274c2cb422SMark Bloch ib_mad_cleanup(); 2228e3f20f02SLeon Romanovsky addr_cleanup(); 2229c9901724SLeon Romanovsky rdma_nl_exit(); 223055aeed06SJason Gunthorpe class_unregister(&ib_class); 2231f794809aSJack Morgenstein destroy_workqueue(ib_comp_unbound_wq); 223214d3a3b2SChristoph Hellwig destroy_workqueue(ib_comp_wq); 2233f7c6a7b5SRoland Dreier /* Make sure that any pending umem accounting work is done. */ 2234f0626710STejun Heo destroy_workqueue(ib_wq); 2235d0899892SJason Gunthorpe flush_workqueue(system_unbound_wq); 2236e59178d8SJason Gunthorpe WARN_ON(!xa_empty(&clients)); 22370df91bb6SJason Gunthorpe WARN_ON(!xa_empty(&devices)); 22381da177e4SLinus Torvalds } 22391da177e4SLinus Torvalds 2240e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); 2241e3bf14bdSJason Gunthorpe 224262dfa795SParav Pandit /* ib core relies on netdev stack to first register net_ns_type_operations 224362dfa795SParav Pandit * ns kobject type before ib_core initialization. 224462dfa795SParav Pandit */ 224562dfa795SParav Pandit fs_initcall(ib_core_init); 22461da177e4SLinus Torvalds module_exit(ib_core_cleanup); 2247