11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (c) 2004 Topspin Communications. All rights reserved. 32a1d9b7fSRoland Dreier * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This software is available to you under a choice of one of two 61da177e4SLinus Torvalds * licenses. You may choose to be licensed under the terms of the GNU 71da177e4SLinus Torvalds * General Public License (GPL) Version 2, available from the file 81da177e4SLinus Torvalds * COPYING in the main directory of this source tree, or the 91da177e4SLinus Torvalds * OpenIB.org BSD license below: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Redistribution and use in source and binary forms, with or 121da177e4SLinus Torvalds * without modification, are permitted provided that the following 131da177e4SLinus Torvalds * conditions are met: 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * - Redistributions of source code must retain the above 161da177e4SLinus Torvalds * copyright notice, this list of conditions and the following 171da177e4SLinus Torvalds * disclaimer. 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * - Redistributions in binary form must reproduce the above 201da177e4SLinus Torvalds * copyright notice, this list of conditions and the following 211da177e4SLinus Torvalds * disclaimer in the documentation and/or other materials 221da177e4SLinus Torvalds * provided with the distribution. 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 251da177e4SLinus Torvalds * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 261da177e4SLinus Torvalds * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 271da177e4SLinus Torvalds * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 281da177e4SLinus Torvalds * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 291da177e4SLinus Torvalds * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 301da177e4SLinus Torvalds * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 311da177e4SLinus Torvalds * SOFTWARE. 321da177e4SLinus Torvalds */ 331da177e4SLinus Torvalds 341da177e4SLinus Torvalds #include <linux/module.h> 351da177e4SLinus Torvalds #include <linux/string.h> 361da177e4SLinus Torvalds #include <linux/errno.h> 379a6b090cSAhmed S. Darwish #include <linux/kernel.h> 381da177e4SLinus Torvalds #include <linux/slab.h> 391da177e4SLinus Torvalds #include <linux/init.h> 409268f72dSYotam Kenneth #include <linux/netdevice.h> 414e0f7b90SParav Pandit #include <net/net_namespace.h> 424e0f7b90SParav Pandit #include <net/netns/generic.h> 438f408ab6SDaniel Jurgens #include <linux/security.h> 448f408ab6SDaniel Jurgens #include <linux/notifier.h> 45324e227eSJason Gunthorpe #include <linux/hashtable.h> 46b2cbae2cSRoland Dreier #include <rdma/rdma_netlink.h> 4703db3a2dSMatan Barak #include <rdma/ib_addr.h> 4803db3a2dSMatan Barak #include <rdma/ib_cache.h> 49413d3347SMark Zhang #include <rdma/rdma_counter.h> 501da177e4SLinus Torvalds 511da177e4SLinus Torvalds #include "core_priv.h" 5241eda65cSLeon Romanovsky #include "restrack.h" 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds MODULE_AUTHOR("Roland Dreier"); 551da177e4SLinus Torvalds MODULE_DESCRIPTION("core kernel InfiniBand API"); 561da177e4SLinus Torvalds MODULE_LICENSE("Dual BSD/GPL"); 571da177e4SLinus Torvalds 5814d3a3b2SChristoph Hellwig struct workqueue_struct *ib_comp_wq; 59f794809aSJack Morgenstein struct workqueue_struct *ib_comp_unbound_wq; 60f0626710STejun Heo struct workqueue_struct *ib_wq; 61f0626710STejun Heo EXPORT_SYMBOL_GPL(ib_wq); 62f0626710STejun Heo 630df91bb6SJason Gunthorpe /* 64921eab11SJason Gunthorpe * Each of the three rwsem locks (devices, clients, client_data) protects the 65921eab11SJason Gunthorpe * xarray of the same name. Specifically it allows the caller to assert that 66921eab11SJason Gunthorpe * the MARK will/will not be changing under the lock, and for devices and 67921eab11SJason Gunthorpe * clients, that the value in the xarray is still a valid pointer. Change of 68921eab11SJason Gunthorpe * the MARK is linked to the object state, so holding the lock and testing the 69921eab11SJason Gunthorpe * MARK also asserts that the contained object is in a certain state. 70921eab11SJason Gunthorpe * 71921eab11SJason Gunthorpe * This is used to build a two stage register/unregister flow where objects 72921eab11SJason Gunthorpe * can continue to be in the xarray even though they are still in progress to 73921eab11SJason Gunthorpe * register/unregister. 74921eab11SJason Gunthorpe * 75921eab11SJason Gunthorpe * The xarray itself provides additional locking, and restartable iteration, 76921eab11SJason Gunthorpe * which is also relied on. 77921eab11SJason Gunthorpe * 78921eab11SJason Gunthorpe * Locks should not be nested, with the exception of client_data, which is 79921eab11SJason Gunthorpe * allowed to nest under the read side of the other two locks. 80921eab11SJason Gunthorpe * 81921eab11SJason Gunthorpe * The devices_rwsem also protects the device name list, any change or 82921eab11SJason Gunthorpe * assignment of device name must also hold the write side to guarantee unique 83921eab11SJason Gunthorpe * names. 84921eab11SJason Gunthorpe */ 85921eab11SJason Gunthorpe 86921eab11SJason Gunthorpe /* 870df91bb6SJason Gunthorpe * devices contains devices that have had their names assigned. The 880df91bb6SJason Gunthorpe * devices may not be registered. Users that care about the registration 890df91bb6SJason Gunthorpe * status need to call ib_device_try_get() on the device to ensure it is 900df91bb6SJason Gunthorpe * registered, and keep it registered, for the required duration. 910df91bb6SJason Gunthorpe * 920df91bb6SJason Gunthorpe */ 930df91bb6SJason Gunthorpe static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); 94921eab11SJason Gunthorpe static DECLARE_RWSEM(devices_rwsem); 950df91bb6SJason Gunthorpe #define DEVICE_REGISTERED XA_MARK_1 960df91bb6SJason Gunthorpe 971da177e4SLinus Torvalds static LIST_HEAD(client_list); 98e59178d8SJason Gunthorpe #define CLIENT_REGISTERED XA_MARK_1 99e59178d8SJason Gunthorpe static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); 100921eab11SJason Gunthorpe static DECLARE_RWSEM(clients_rwsem); 1011da177e4SLinus Torvalds 1021da177e4SLinus Torvalds /* 1030df91bb6SJason Gunthorpe * If client_data is registered then the corresponding client must also still 1040df91bb6SJason Gunthorpe * be registered. 1050df91bb6SJason Gunthorpe */ 1060df91bb6SJason Gunthorpe #define CLIENT_DATA_REGISTERED XA_MARK_1 1074e0f7b90SParav Pandit 1084e0f7b90SParav Pandit /** 1094e0f7b90SParav Pandit * struct rdma_dev_net - rdma net namespace metadata for a net 1104e0f7b90SParav Pandit * @net: Pointer to owner net namespace 1114e0f7b90SParav Pandit * @id: xarray id to identify the net namespace. 1124e0f7b90SParav Pandit */ 1134e0f7b90SParav Pandit struct rdma_dev_net { 1144e0f7b90SParav Pandit possible_net_t net; 1154e0f7b90SParav Pandit u32 id; 1164e0f7b90SParav Pandit }; 1174e0f7b90SParav Pandit 1184e0f7b90SParav Pandit static unsigned int rdma_dev_net_id; 1194e0f7b90SParav Pandit 1204e0f7b90SParav Pandit /* 1214e0f7b90SParav Pandit * A list of net namespaces is maintained in an xarray. This is necessary 1224e0f7b90SParav Pandit * because we can't get the locking right using the existing net ns list. We 1234e0f7b90SParav Pandit * would require a init_net callback after the list is updated. 1244e0f7b90SParav Pandit */ 1254e0f7b90SParav Pandit static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC); 1264e0f7b90SParav Pandit /* 1274e0f7b90SParav Pandit * rwsem to protect accessing the rdma_nets xarray entries. 1284e0f7b90SParav Pandit */ 1294e0f7b90SParav Pandit static DECLARE_RWSEM(rdma_nets_rwsem); 1304e0f7b90SParav Pandit 131cb7e0e13SParav Pandit bool ib_devices_shared_netns = true; 132a56bc45bSParav Pandit module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444); 133a56bc45bSParav Pandit MODULE_PARM_DESC(netns_mode, 134a56bc45bSParav Pandit "Share device among net namespaces; default=1 (shared)"); 13541c61401SParav Pandit /** 13641c61401SParav Pandit * rdma_dev_access_netns() - Return whether a rdma device can be accessed 13741c61401SParav Pandit * from a specified net namespace or not. 13841c61401SParav Pandit * @device: Pointer to rdma device which needs to be checked 13941c61401SParav Pandit * @net: Pointer to net namesapce for which access to be checked 14041c61401SParav Pandit * 14141c61401SParav Pandit * rdma_dev_access_netns() - Return whether a rdma device can be accessed 14241c61401SParav Pandit * from a specified net namespace or not. When 14341c61401SParav Pandit * rdma device is in shared mode, it ignores the 14441c61401SParav Pandit * net namespace. When rdma device is exclusive 14541c61401SParav Pandit * to a net namespace, rdma device net namespace is 14641c61401SParav Pandit * checked against the specified one. 14741c61401SParav Pandit */ 14841c61401SParav Pandit bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net) 14941c61401SParav Pandit { 15041c61401SParav Pandit return (ib_devices_shared_netns || 15141c61401SParav Pandit net_eq(read_pnet(&dev->coredev.rdma_net), net)); 15241c61401SParav Pandit } 15341c61401SParav Pandit EXPORT_SYMBOL(rdma_dev_access_netns); 15441c61401SParav Pandit 1550df91bb6SJason Gunthorpe /* 1560df91bb6SJason Gunthorpe * xarray has this behavior where it won't iterate over NULL values stored in 1570df91bb6SJason Gunthorpe * allocated arrays. So we need our own iterator to see all values stored in 1580df91bb6SJason Gunthorpe * the array. This does the same thing as xa_for_each except that it also 1590df91bb6SJason Gunthorpe * returns NULL valued entries if the array is allocating. Simplified to only 1600df91bb6SJason Gunthorpe * work on simple xarrays. 1610df91bb6SJason Gunthorpe */ 1620df91bb6SJason Gunthorpe static void *xan_find_marked(struct xarray *xa, unsigned long *indexp, 1630df91bb6SJason Gunthorpe xa_mark_t filter) 1640df91bb6SJason Gunthorpe { 1650df91bb6SJason Gunthorpe XA_STATE(xas, xa, *indexp); 1660df91bb6SJason Gunthorpe void *entry; 1670df91bb6SJason Gunthorpe 1680df91bb6SJason Gunthorpe rcu_read_lock(); 1690df91bb6SJason Gunthorpe do { 1700df91bb6SJason Gunthorpe entry = xas_find_marked(&xas, ULONG_MAX, filter); 1710df91bb6SJason Gunthorpe if (xa_is_zero(entry)) 1720df91bb6SJason Gunthorpe break; 1730df91bb6SJason Gunthorpe } while (xas_retry(&xas, entry)); 1740df91bb6SJason Gunthorpe rcu_read_unlock(); 1750df91bb6SJason Gunthorpe 1760df91bb6SJason Gunthorpe if (entry) { 1770df91bb6SJason Gunthorpe *indexp = xas.xa_index; 1780df91bb6SJason Gunthorpe if (xa_is_zero(entry)) 1790df91bb6SJason Gunthorpe return NULL; 1800df91bb6SJason Gunthorpe return entry; 1810df91bb6SJason Gunthorpe } 1820df91bb6SJason Gunthorpe return XA_ERROR(-ENOENT); 1830df91bb6SJason Gunthorpe } 1840df91bb6SJason Gunthorpe #define xan_for_each_marked(xa, index, entry, filter) \ 1850df91bb6SJason Gunthorpe for (index = 0, entry = xan_find_marked(xa, &(index), filter); \ 1860df91bb6SJason Gunthorpe !xa_is_err(entry); \ 1870df91bb6SJason Gunthorpe (index)++, entry = xan_find_marked(xa, &(index), filter)) 1880df91bb6SJason Gunthorpe 189324e227eSJason Gunthorpe /* RCU hash table mapping netdevice pointers to struct ib_port_data */ 190324e227eSJason Gunthorpe static DEFINE_SPINLOCK(ndev_hash_lock); 191324e227eSJason Gunthorpe static DECLARE_HASHTABLE(ndev_hash, 5); 192324e227eSJason Gunthorpe 193c2261dd7SJason Gunthorpe static void free_netdevs(struct ib_device *ib_dev); 194d0899892SJason Gunthorpe static void ib_unregister_work(struct work_struct *work); 195d0899892SJason Gunthorpe static void __ib_unregister_device(struct ib_device *device); 1968f408ab6SDaniel Jurgens static int ib_security_change(struct notifier_block *nb, unsigned long event, 1978f408ab6SDaniel Jurgens void *lsm_data); 1988f408ab6SDaniel Jurgens static void ib_policy_change_task(struct work_struct *work); 1998f408ab6SDaniel Jurgens static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); 2008f408ab6SDaniel Jurgens 201923abb9dSGal Pressman static void __ibdev_printk(const char *level, const struct ib_device *ibdev, 202923abb9dSGal Pressman struct va_format *vaf) 203923abb9dSGal Pressman { 204923abb9dSGal Pressman if (ibdev && ibdev->dev.parent) 205923abb9dSGal Pressman dev_printk_emit(level[1] - '0', 206923abb9dSGal Pressman ibdev->dev.parent, 207923abb9dSGal Pressman "%s %s %s: %pV", 208923abb9dSGal Pressman dev_driver_string(ibdev->dev.parent), 209923abb9dSGal Pressman dev_name(ibdev->dev.parent), 210923abb9dSGal Pressman dev_name(&ibdev->dev), 211923abb9dSGal Pressman vaf); 212923abb9dSGal Pressman else if (ibdev) 213923abb9dSGal Pressman printk("%s%s: %pV", 214923abb9dSGal Pressman level, dev_name(&ibdev->dev), vaf); 215923abb9dSGal Pressman else 216923abb9dSGal Pressman printk("%s(NULL ib_device): %pV", level, vaf); 217923abb9dSGal Pressman } 218923abb9dSGal Pressman 219923abb9dSGal Pressman void ibdev_printk(const char *level, const struct ib_device *ibdev, 220923abb9dSGal Pressman const char *format, ...) 221923abb9dSGal Pressman { 222923abb9dSGal Pressman struct va_format vaf; 223923abb9dSGal Pressman va_list args; 224923abb9dSGal Pressman 225923abb9dSGal Pressman va_start(args, format); 226923abb9dSGal Pressman 227923abb9dSGal Pressman vaf.fmt = format; 228923abb9dSGal Pressman vaf.va = &args; 229923abb9dSGal Pressman 230923abb9dSGal Pressman __ibdev_printk(level, ibdev, &vaf); 231923abb9dSGal Pressman 232923abb9dSGal Pressman va_end(args); 233923abb9dSGal Pressman } 234923abb9dSGal Pressman EXPORT_SYMBOL(ibdev_printk); 235923abb9dSGal Pressman 236923abb9dSGal Pressman #define define_ibdev_printk_level(func, level) \ 237923abb9dSGal Pressman void func(const struct ib_device *ibdev, const char *fmt, ...) \ 238923abb9dSGal Pressman { \ 239923abb9dSGal Pressman struct va_format vaf; \ 240923abb9dSGal Pressman va_list args; \ 241923abb9dSGal Pressman \ 242923abb9dSGal Pressman va_start(args, fmt); \ 243923abb9dSGal Pressman \ 244923abb9dSGal Pressman vaf.fmt = fmt; \ 245923abb9dSGal Pressman vaf.va = &args; \ 246923abb9dSGal Pressman \ 247923abb9dSGal Pressman __ibdev_printk(level, ibdev, &vaf); \ 248923abb9dSGal Pressman \ 249923abb9dSGal Pressman va_end(args); \ 250923abb9dSGal Pressman } \ 251923abb9dSGal Pressman EXPORT_SYMBOL(func); 252923abb9dSGal Pressman 253923abb9dSGal Pressman define_ibdev_printk_level(ibdev_emerg, KERN_EMERG); 254923abb9dSGal Pressman define_ibdev_printk_level(ibdev_alert, KERN_ALERT); 255923abb9dSGal Pressman define_ibdev_printk_level(ibdev_crit, KERN_CRIT); 256923abb9dSGal Pressman define_ibdev_printk_level(ibdev_err, KERN_ERR); 257923abb9dSGal Pressman define_ibdev_printk_level(ibdev_warn, KERN_WARNING); 258923abb9dSGal Pressman define_ibdev_printk_level(ibdev_notice, KERN_NOTICE); 259923abb9dSGal Pressman define_ibdev_printk_level(ibdev_info, KERN_INFO); 260923abb9dSGal Pressman 2618f408ab6SDaniel Jurgens static struct notifier_block ibdev_lsm_nb = { 2628f408ab6SDaniel Jurgens .notifier_call = ib_security_change, 2638f408ab6SDaniel Jurgens }; 2641da177e4SLinus Torvalds 265decbc7a6SParav Pandit static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, 266decbc7a6SParav Pandit struct net *net); 267decbc7a6SParav Pandit 268324e227eSJason Gunthorpe /* Pointer to the RCU head at the start of the ib_port_data array */ 269324e227eSJason Gunthorpe struct ib_port_data_rcu { 270324e227eSJason Gunthorpe struct rcu_head rcu_head; 271324e227eSJason Gunthorpe struct ib_port_data pdata[]; 272324e227eSJason Gunthorpe }; 273324e227eSJason Gunthorpe 274deee3c7eSKamal Heib static void ib_device_check_mandatory(struct ib_device *device) 2751da177e4SLinus Torvalds { 2763023a1e9SKamal Heib #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x } 2771da177e4SLinus Torvalds static const struct { 2781da177e4SLinus Torvalds size_t offset; 2791da177e4SLinus Torvalds char *name; 2801da177e4SLinus Torvalds } mandatory_table[] = { 2811da177e4SLinus Torvalds IB_MANDATORY_FUNC(query_device), 2821da177e4SLinus Torvalds IB_MANDATORY_FUNC(query_port), 2831da177e4SLinus Torvalds IB_MANDATORY_FUNC(query_pkey), 2841da177e4SLinus Torvalds IB_MANDATORY_FUNC(alloc_pd), 2851da177e4SLinus Torvalds IB_MANDATORY_FUNC(dealloc_pd), 2861da177e4SLinus Torvalds IB_MANDATORY_FUNC(create_qp), 2871da177e4SLinus Torvalds IB_MANDATORY_FUNC(modify_qp), 2881da177e4SLinus Torvalds IB_MANDATORY_FUNC(destroy_qp), 2891da177e4SLinus Torvalds IB_MANDATORY_FUNC(post_send), 2901da177e4SLinus Torvalds IB_MANDATORY_FUNC(post_recv), 2911da177e4SLinus Torvalds IB_MANDATORY_FUNC(create_cq), 2921da177e4SLinus Torvalds IB_MANDATORY_FUNC(destroy_cq), 2931da177e4SLinus Torvalds IB_MANDATORY_FUNC(poll_cq), 2941da177e4SLinus Torvalds IB_MANDATORY_FUNC(req_notify_cq), 2951da177e4SLinus Torvalds IB_MANDATORY_FUNC(get_dma_mr), 2967738613eSIra Weiny IB_MANDATORY_FUNC(dereg_mr), 2977738613eSIra Weiny IB_MANDATORY_FUNC(get_port_immutable) 2981da177e4SLinus Torvalds }; 2991da177e4SLinus Torvalds int i; 3001da177e4SLinus Torvalds 3016780c4faSGal Pressman device->kverbs_provider = true; 3029a6b090cSAhmed S. Darwish for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 3033023a1e9SKamal Heib if (!*(void **) ((void *) &device->ops + 3043023a1e9SKamal Heib mandatory_table[i].offset)) { 3056780c4faSGal Pressman device->kverbs_provider = false; 3066780c4faSGal Pressman break; 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds } 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 311f8978bd9SLeon Romanovsky /* 31201b67117SParav Pandit * Caller must perform ib_device_put() to return the device reference count 31301b67117SParav Pandit * when ib_device_get_by_index() returns valid device pointer. 314f8978bd9SLeon Romanovsky */ 31537eeab55SParav Pandit struct ib_device *ib_device_get_by_index(const struct net *net, u32 index) 316f8978bd9SLeon Romanovsky { 317f8978bd9SLeon Romanovsky struct ib_device *device; 318f8978bd9SLeon Romanovsky 319921eab11SJason Gunthorpe down_read(&devices_rwsem); 3200df91bb6SJason Gunthorpe device = xa_load(&devices, index); 32101b67117SParav Pandit if (device) { 32237eeab55SParav Pandit if (!rdma_dev_access_netns(device, net)) { 32337eeab55SParav Pandit device = NULL; 32437eeab55SParav Pandit goto out; 32537eeab55SParav Pandit } 32637eeab55SParav Pandit 327d79af724SJason Gunthorpe if (!ib_device_try_get(device)) 32801b67117SParav Pandit device = NULL; 32901b67117SParav Pandit } 33037eeab55SParav Pandit out: 331921eab11SJason Gunthorpe up_read(&devices_rwsem); 332f8978bd9SLeon Romanovsky return device; 333f8978bd9SLeon Romanovsky } 334f8978bd9SLeon Romanovsky 335d79af724SJason Gunthorpe /** 336d79af724SJason Gunthorpe * ib_device_put - Release IB device reference 337d79af724SJason Gunthorpe * @device: device whose reference to be released 338d79af724SJason Gunthorpe * 339d79af724SJason Gunthorpe * ib_device_put() releases reference to the IB device to allow it to be 340d79af724SJason Gunthorpe * unregistered and eventually free. 341d79af724SJason Gunthorpe */ 34201b67117SParav Pandit void ib_device_put(struct ib_device *device) 34301b67117SParav Pandit { 34401b67117SParav Pandit if (refcount_dec_and_test(&device->refcount)) 34501b67117SParav Pandit complete(&device->unreg_completion); 34601b67117SParav Pandit } 347d79af724SJason Gunthorpe EXPORT_SYMBOL(ib_device_put); 34801b67117SParav Pandit 3491da177e4SLinus Torvalds static struct ib_device *__ib_device_get_by_name(const char *name) 3501da177e4SLinus Torvalds { 3511da177e4SLinus Torvalds struct ib_device *device; 3520df91bb6SJason Gunthorpe unsigned long index; 3531da177e4SLinus Torvalds 3540df91bb6SJason Gunthorpe xa_for_each (&devices, index, device) 355896de009SJason Gunthorpe if (!strcmp(name, dev_name(&device->dev))) 3561da177e4SLinus Torvalds return device; 3571da177e4SLinus Torvalds 3581da177e4SLinus Torvalds return NULL; 3591da177e4SLinus Torvalds } 3601da177e4SLinus Torvalds 3616cc2c8e5SJason Gunthorpe /** 3626cc2c8e5SJason Gunthorpe * ib_device_get_by_name - Find an IB device by name 3636cc2c8e5SJason Gunthorpe * @name: The name to look for 3646cc2c8e5SJason Gunthorpe * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) 3656cc2c8e5SJason Gunthorpe * 3666cc2c8e5SJason Gunthorpe * Find and hold an ib_device by its name. The caller must call 3676cc2c8e5SJason Gunthorpe * ib_device_put() on the returned pointer. 3686cc2c8e5SJason Gunthorpe */ 3696cc2c8e5SJason Gunthorpe struct ib_device *ib_device_get_by_name(const char *name, 3706cc2c8e5SJason Gunthorpe enum rdma_driver_id driver_id) 3716cc2c8e5SJason Gunthorpe { 3726cc2c8e5SJason Gunthorpe struct ib_device *device; 3736cc2c8e5SJason Gunthorpe 3746cc2c8e5SJason Gunthorpe down_read(&devices_rwsem); 3756cc2c8e5SJason Gunthorpe device = __ib_device_get_by_name(name); 3766cc2c8e5SJason Gunthorpe if (device && driver_id != RDMA_DRIVER_UNKNOWN && 377b9560a41SJason Gunthorpe device->ops.driver_id != driver_id) 3786cc2c8e5SJason Gunthorpe device = NULL; 3796cc2c8e5SJason Gunthorpe 3806cc2c8e5SJason Gunthorpe if (device) { 3816cc2c8e5SJason Gunthorpe if (!ib_device_try_get(device)) 3826cc2c8e5SJason Gunthorpe device = NULL; 3836cc2c8e5SJason Gunthorpe } 3846cc2c8e5SJason Gunthorpe up_read(&devices_rwsem); 3856cc2c8e5SJason Gunthorpe return device; 3866cc2c8e5SJason Gunthorpe } 3876cc2c8e5SJason Gunthorpe EXPORT_SYMBOL(ib_device_get_by_name); 3886cc2c8e5SJason Gunthorpe 3894e0f7b90SParav Pandit static int rename_compat_devs(struct ib_device *device) 3904e0f7b90SParav Pandit { 3914e0f7b90SParav Pandit struct ib_core_device *cdev; 3924e0f7b90SParav Pandit unsigned long index; 3934e0f7b90SParav Pandit int ret = 0; 3944e0f7b90SParav Pandit 3954e0f7b90SParav Pandit mutex_lock(&device->compat_devs_mutex); 3964e0f7b90SParav Pandit xa_for_each (&device->compat_devs, index, cdev) { 3974e0f7b90SParav Pandit ret = device_rename(&cdev->dev, dev_name(&device->dev)); 3984e0f7b90SParav Pandit if (ret) { 3994e0f7b90SParav Pandit dev_warn(&cdev->dev, 4004e0f7b90SParav Pandit "Fail to rename compatdev to new name %s\n", 4014e0f7b90SParav Pandit dev_name(&device->dev)); 4024e0f7b90SParav Pandit break; 4034e0f7b90SParav Pandit } 4044e0f7b90SParav Pandit } 4054e0f7b90SParav Pandit mutex_unlock(&device->compat_devs_mutex); 4064e0f7b90SParav Pandit return ret; 4074e0f7b90SParav Pandit } 4084e0f7b90SParav Pandit 409d21943ddSLeon Romanovsky int ib_device_rename(struct ib_device *ibdev, const char *name) 410d21943ddSLeon Romanovsky { 411dc1435c0SLeon Romanovsky unsigned long index; 412dc1435c0SLeon Romanovsky void *client_data; 413e3593b56SJason Gunthorpe int ret; 414d21943ddSLeon Romanovsky 415921eab11SJason Gunthorpe down_write(&devices_rwsem); 416e3593b56SJason Gunthorpe if (!strcmp(name, dev_name(&ibdev->dev))) { 417dc1435c0SLeon Romanovsky up_write(&devices_rwsem); 418dc1435c0SLeon Romanovsky return 0; 419e3593b56SJason Gunthorpe } 420e3593b56SJason Gunthorpe 421344684e6SJason Gunthorpe if (__ib_device_get_by_name(name)) { 422dc1435c0SLeon Romanovsky up_write(&devices_rwsem); 423dc1435c0SLeon Romanovsky return -EEXIST; 424d21943ddSLeon Romanovsky } 425d21943ddSLeon Romanovsky 426d21943ddSLeon Romanovsky ret = device_rename(&ibdev->dev, name); 427dc1435c0SLeon Romanovsky if (ret) { 428921eab11SJason Gunthorpe up_write(&devices_rwsem); 429d21943ddSLeon Romanovsky return ret; 430d21943ddSLeon Romanovsky } 431d21943ddSLeon Romanovsky 432dc1435c0SLeon Romanovsky strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX); 433dc1435c0SLeon Romanovsky ret = rename_compat_devs(ibdev); 434dc1435c0SLeon Romanovsky 435dc1435c0SLeon Romanovsky downgrade_write(&devices_rwsem); 436dc1435c0SLeon Romanovsky down_read(&ibdev->client_data_rwsem); 437dc1435c0SLeon Romanovsky xan_for_each_marked(&ibdev->client_data, index, client_data, 438dc1435c0SLeon Romanovsky CLIENT_DATA_REGISTERED) { 439dc1435c0SLeon Romanovsky struct ib_client *client = xa_load(&clients, index); 440dc1435c0SLeon Romanovsky 441dc1435c0SLeon Romanovsky if (!client || !client->rename) 442dc1435c0SLeon Romanovsky continue; 443dc1435c0SLeon Romanovsky 444dc1435c0SLeon Romanovsky client->rename(ibdev, client_data); 445dc1435c0SLeon Romanovsky } 446dc1435c0SLeon Romanovsky up_read(&ibdev->client_data_rwsem); 447dc1435c0SLeon Romanovsky up_read(&devices_rwsem); 448dc1435c0SLeon Romanovsky return 0; 449dc1435c0SLeon Romanovsky } 450dc1435c0SLeon Romanovsky 451f8fc8cd9SYamin Friedman int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim) 452f8fc8cd9SYamin Friedman { 453f8fc8cd9SYamin Friedman if (use_dim > 1) 454f8fc8cd9SYamin Friedman return -EINVAL; 455f8fc8cd9SYamin Friedman ibdev->use_cq_dim = use_dim; 456f8fc8cd9SYamin Friedman 457f8fc8cd9SYamin Friedman return 0; 458f8fc8cd9SYamin Friedman } 459f8fc8cd9SYamin Friedman 460e349f858SJason Gunthorpe static int alloc_name(struct ib_device *ibdev, const char *name) 4611da177e4SLinus Torvalds { 4621da177e4SLinus Torvalds struct ib_device *device; 4630df91bb6SJason Gunthorpe unsigned long index; 4643b88afd3SJason Gunthorpe struct ida inuse; 4653b88afd3SJason Gunthorpe int rc; 4661da177e4SLinus Torvalds int i; 4671da177e4SLinus Torvalds 468921eab11SJason Gunthorpe lockdep_assert_held_exclusive(&devices_rwsem); 4693b88afd3SJason Gunthorpe ida_init(&inuse); 4700df91bb6SJason Gunthorpe xa_for_each (&devices, index, device) { 471e349f858SJason Gunthorpe char buf[IB_DEVICE_NAME_MAX]; 472e349f858SJason Gunthorpe 473896de009SJason Gunthorpe if (sscanf(dev_name(&device->dev), name, &i) != 1) 4741da177e4SLinus Torvalds continue; 4753b88afd3SJason Gunthorpe if (i < 0 || i >= INT_MAX) 4761da177e4SLinus Torvalds continue; 4771da177e4SLinus Torvalds snprintf(buf, sizeof buf, name, i); 4783b88afd3SJason Gunthorpe if (strcmp(buf, dev_name(&device->dev)) != 0) 4793b88afd3SJason Gunthorpe continue; 4803b88afd3SJason Gunthorpe 4813b88afd3SJason Gunthorpe rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL); 4823b88afd3SJason Gunthorpe if (rc < 0) 4833b88afd3SJason Gunthorpe goto out; 4841da177e4SLinus Torvalds } 4851da177e4SLinus Torvalds 4863b88afd3SJason Gunthorpe rc = ida_alloc(&inuse, GFP_KERNEL); 4873b88afd3SJason Gunthorpe if (rc < 0) 4883b88afd3SJason Gunthorpe goto out; 4891da177e4SLinus Torvalds 4903b88afd3SJason Gunthorpe rc = dev_set_name(&ibdev->dev, name, rc); 4913b88afd3SJason Gunthorpe out: 4923b88afd3SJason Gunthorpe ida_destroy(&inuse); 4933b88afd3SJason Gunthorpe return rc; 4941da177e4SLinus Torvalds } 4951da177e4SLinus Torvalds 49655aeed06SJason Gunthorpe static void ib_device_release(struct device *device) 49755aeed06SJason Gunthorpe { 49855aeed06SJason Gunthorpe struct ib_device *dev = container_of(device, struct ib_device, dev); 49955aeed06SJason Gunthorpe 500c2261dd7SJason Gunthorpe free_netdevs(dev); 501652432f3SJason Gunthorpe WARN_ON(refcount_read(&dev->refcount)); 50246bdf370SKamal Heib if (dev->port_data) { 50303db3a2dSMatan Barak ib_cache_release_one(dev); 504b34b269aSJason Gunthorpe ib_security_release_port_pkey_list(dev); 505413d3347SMark Zhang rdma_counter_release(dev); 506324e227eSJason Gunthorpe kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu, 507324e227eSJason Gunthorpe pdata[0]), 508324e227eSJason Gunthorpe rcu_head); 50946bdf370SKamal Heib } 510413d3347SMark Zhang 51146bdf370SKamal Heib xa_destroy(&dev->compat_devs); 51246bdf370SKamal Heib xa_destroy(&dev->client_data); 513324e227eSJason Gunthorpe kfree_rcu(dev, rcu_head); 51455aeed06SJason Gunthorpe } 51555aeed06SJason Gunthorpe 51655aeed06SJason Gunthorpe static int ib_device_uevent(struct device *device, 51755aeed06SJason Gunthorpe struct kobj_uevent_env *env) 51855aeed06SJason Gunthorpe { 519896de009SJason Gunthorpe if (add_uevent_var(env, "NAME=%s", dev_name(device))) 52055aeed06SJason Gunthorpe return -ENOMEM; 52155aeed06SJason Gunthorpe 52255aeed06SJason Gunthorpe /* 52355aeed06SJason Gunthorpe * It would be nice to pass the node GUID with the event... 52455aeed06SJason Gunthorpe */ 52555aeed06SJason Gunthorpe 52655aeed06SJason Gunthorpe return 0; 52755aeed06SJason Gunthorpe } 52855aeed06SJason Gunthorpe 52962dfa795SParav Pandit static const void *net_namespace(struct device *d) 53062dfa795SParav Pandit { 5314e0f7b90SParav Pandit struct ib_core_device *coredev = 5324e0f7b90SParav Pandit container_of(d, struct ib_core_device, dev); 5334e0f7b90SParav Pandit 5344e0f7b90SParav Pandit return read_pnet(&coredev->rdma_net); 53562dfa795SParav Pandit } 53662dfa795SParav Pandit 53755aeed06SJason Gunthorpe static struct class ib_class = { 53855aeed06SJason Gunthorpe .name = "infiniband", 53955aeed06SJason Gunthorpe .dev_release = ib_device_release, 54055aeed06SJason Gunthorpe .dev_uevent = ib_device_uevent, 54162dfa795SParav Pandit .ns_type = &net_ns_type_operations, 54262dfa795SParav Pandit .namespace = net_namespace, 54355aeed06SJason Gunthorpe }; 54455aeed06SJason Gunthorpe 545cebe556bSParav Pandit static void rdma_init_coredev(struct ib_core_device *coredev, 5464e0f7b90SParav Pandit struct ib_device *dev, struct net *net) 547cebe556bSParav Pandit { 548cebe556bSParav Pandit /* This BUILD_BUG_ON is intended to catch layout change 549cebe556bSParav Pandit * of union of ib_core_device and device. 550cebe556bSParav Pandit * dev must be the first element as ib_core and providers 551cebe556bSParav Pandit * driver uses it. Adding anything in ib_core_device before 552cebe556bSParav Pandit * device will break this assumption. 553cebe556bSParav Pandit */ 554cebe556bSParav Pandit BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) != 555cebe556bSParav Pandit offsetof(struct ib_device, dev)); 556cebe556bSParav Pandit 557cebe556bSParav Pandit coredev->dev.class = &ib_class; 558cebe556bSParav Pandit coredev->dev.groups = dev->groups; 559cebe556bSParav Pandit device_initialize(&coredev->dev); 560cebe556bSParav Pandit coredev->owner = dev; 561cebe556bSParav Pandit INIT_LIST_HEAD(&coredev->port_list); 5624e0f7b90SParav Pandit write_pnet(&coredev->rdma_net, net); 563cebe556bSParav Pandit } 564cebe556bSParav Pandit 5651da177e4SLinus Torvalds /** 566459cc69fSLeon Romanovsky * _ib_alloc_device - allocate an IB device struct 5671da177e4SLinus Torvalds * @size:size of structure to allocate 5681da177e4SLinus Torvalds * 5691da177e4SLinus Torvalds * Low-level drivers should use ib_alloc_device() to allocate &struct 5701da177e4SLinus Torvalds * ib_device. @size is the size of the structure to be allocated, 5711da177e4SLinus Torvalds * including any private data used by the low-level driver. 5721da177e4SLinus Torvalds * ib_dealloc_device() must be used to free structures allocated with 5731da177e4SLinus Torvalds * ib_alloc_device(). 5741da177e4SLinus Torvalds */ 575459cc69fSLeon Romanovsky struct ib_device *_ib_alloc_device(size_t size) 5761da177e4SLinus Torvalds { 57755aeed06SJason Gunthorpe struct ib_device *device; 5781da177e4SLinus Torvalds 57955aeed06SJason Gunthorpe if (WARN_ON(size < sizeof(struct ib_device))) 58055aeed06SJason Gunthorpe return NULL; 58155aeed06SJason Gunthorpe 58255aeed06SJason Gunthorpe device = kzalloc(size, GFP_KERNEL); 58355aeed06SJason Gunthorpe if (!device) 58455aeed06SJason Gunthorpe return NULL; 58555aeed06SJason Gunthorpe 58641eda65cSLeon Romanovsky if (rdma_restrack_init(device)) { 58741eda65cSLeon Romanovsky kfree(device); 58841eda65cSLeon Romanovsky return NULL; 58941eda65cSLeon Romanovsky } 59002d8883fSLeon Romanovsky 5915f8f5499SParav Pandit device->groups[0] = &ib_dev_attr_group; 5924e0f7b90SParav Pandit rdma_init_coredev(&device->coredev, device, &init_net); 59355aeed06SJason Gunthorpe 59455aeed06SJason Gunthorpe INIT_LIST_HEAD(&device->event_handler_list); 59555aeed06SJason Gunthorpe spin_lock_init(&device->event_handler_lock); 596d0899892SJason Gunthorpe mutex_init(&device->unregistration_lock); 5970df91bb6SJason Gunthorpe /* 5980df91bb6SJason Gunthorpe * client_data needs to be alloc because we don't want our mark to be 5990df91bb6SJason Gunthorpe * destroyed if the user stores NULL in the client data. 6000df91bb6SJason Gunthorpe */ 6010df91bb6SJason Gunthorpe xa_init_flags(&device->client_data, XA_FLAGS_ALLOC); 602921eab11SJason Gunthorpe init_rwsem(&device->client_data_rwsem); 6034e0f7b90SParav Pandit xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC); 6044e0f7b90SParav Pandit mutex_init(&device->compat_devs_mutex); 60501b67117SParav Pandit init_completion(&device->unreg_completion); 606d0899892SJason Gunthorpe INIT_WORK(&device->unregistration_work, ib_unregister_work); 60755aeed06SJason Gunthorpe 60855aeed06SJason Gunthorpe return device; 6091da177e4SLinus Torvalds } 610459cc69fSLeon Romanovsky EXPORT_SYMBOL(_ib_alloc_device); 6111da177e4SLinus Torvalds 6121da177e4SLinus Torvalds /** 6131da177e4SLinus Torvalds * ib_dealloc_device - free an IB device struct 6141da177e4SLinus Torvalds * @device:structure to free 6151da177e4SLinus Torvalds * 6161da177e4SLinus Torvalds * Free a structure allocated with ib_alloc_device(). 6171da177e4SLinus Torvalds */ 6181da177e4SLinus Torvalds void ib_dealloc_device(struct ib_device *device) 6191da177e4SLinus Torvalds { 620d0899892SJason Gunthorpe if (device->ops.dealloc_driver) 621d0899892SJason Gunthorpe device->ops.dealloc_driver(device); 622d0899892SJason Gunthorpe 623d0899892SJason Gunthorpe /* 624d0899892SJason Gunthorpe * ib_unregister_driver() requires all devices to remain in the xarray 625d0899892SJason Gunthorpe * while their ops are callable. The last op we call is dealloc_driver 626d0899892SJason Gunthorpe * above. This is needed to create a fence on op callbacks prior to 627d0899892SJason Gunthorpe * allowing the driver module to unload. 628d0899892SJason Gunthorpe */ 629d0899892SJason Gunthorpe down_write(&devices_rwsem); 630d0899892SJason Gunthorpe if (xa_load(&devices, device->index) == device) 631d0899892SJason Gunthorpe xa_erase(&devices, device->index); 632d0899892SJason Gunthorpe up_write(&devices_rwsem); 633d0899892SJason Gunthorpe 634c2261dd7SJason Gunthorpe /* Expedite releasing netdev references */ 635c2261dd7SJason Gunthorpe free_netdevs(device); 636c2261dd7SJason Gunthorpe 6374e0f7b90SParav Pandit WARN_ON(!xa_empty(&device->compat_devs)); 6380df91bb6SJason Gunthorpe WARN_ON(!xa_empty(&device->client_data)); 639652432f3SJason Gunthorpe WARN_ON(refcount_read(&device->refcount)); 6400ad699c0SLeon Romanovsky rdma_restrack_clean(device); 641e155755eSParav Pandit /* Balances with device_initialize */ 642924b8900SLeon Romanovsky put_device(&device->dev); 6431da177e4SLinus Torvalds } 6441da177e4SLinus Torvalds EXPORT_SYMBOL(ib_dealloc_device); 6451da177e4SLinus Torvalds 646921eab11SJason Gunthorpe /* 647921eab11SJason Gunthorpe * add_client_context() and remove_client_context() must be safe against 648921eab11SJason Gunthorpe * parallel calls on the same device - registration/unregistration of both the 649921eab11SJason Gunthorpe * device and client can be occurring in parallel. 650921eab11SJason Gunthorpe * 651921eab11SJason Gunthorpe * The routines need to be a fence, any caller must not return until the add 652921eab11SJason Gunthorpe * or remove is fully completed. 653921eab11SJason Gunthorpe */ 654921eab11SJason Gunthorpe static int add_client_context(struct ib_device *device, 655921eab11SJason Gunthorpe struct ib_client *client) 6561da177e4SLinus Torvalds { 657921eab11SJason Gunthorpe int ret = 0; 6581da177e4SLinus Torvalds 6596780c4faSGal Pressman if (!device->kverbs_provider && !client->no_kverbs_req) 660921eab11SJason Gunthorpe return 0; 6616780c4faSGal Pressman 662921eab11SJason Gunthorpe down_write(&device->client_data_rwsem); 663921eab11SJason Gunthorpe /* 664921eab11SJason Gunthorpe * Another caller to add_client_context got here first and has already 665921eab11SJason Gunthorpe * completely initialized context. 666921eab11SJason Gunthorpe */ 667921eab11SJason Gunthorpe if (xa_get_mark(&device->client_data, client->client_id, 668921eab11SJason Gunthorpe CLIENT_DATA_REGISTERED)) 669921eab11SJason Gunthorpe goto out; 670921eab11SJason Gunthorpe 671921eab11SJason Gunthorpe ret = xa_err(xa_store(&device->client_data, client->client_id, NULL, 672921eab11SJason Gunthorpe GFP_KERNEL)); 673921eab11SJason Gunthorpe if (ret) 674921eab11SJason Gunthorpe goto out; 675921eab11SJason Gunthorpe downgrade_write(&device->client_data_rwsem); 676921eab11SJason Gunthorpe if (client->add) 677921eab11SJason Gunthorpe client->add(device); 678921eab11SJason Gunthorpe 679921eab11SJason Gunthorpe /* Readers shall not see a client until add has been completed */ 6800df91bb6SJason Gunthorpe xa_set_mark(&device->client_data, client->client_id, 6810df91bb6SJason Gunthorpe CLIENT_DATA_REGISTERED); 682921eab11SJason Gunthorpe up_read(&device->client_data_rwsem); 683921eab11SJason Gunthorpe return 0; 6841da177e4SLinus Torvalds 685921eab11SJason Gunthorpe out: 686921eab11SJason Gunthorpe up_write(&device->client_data_rwsem); 687921eab11SJason Gunthorpe return ret; 688921eab11SJason Gunthorpe } 689921eab11SJason Gunthorpe 690921eab11SJason Gunthorpe static void remove_client_context(struct ib_device *device, 691921eab11SJason Gunthorpe unsigned int client_id) 692921eab11SJason Gunthorpe { 693921eab11SJason Gunthorpe struct ib_client *client; 694921eab11SJason Gunthorpe void *client_data; 695921eab11SJason Gunthorpe 696921eab11SJason Gunthorpe down_write(&device->client_data_rwsem); 697921eab11SJason Gunthorpe if (!xa_get_mark(&device->client_data, client_id, 698921eab11SJason Gunthorpe CLIENT_DATA_REGISTERED)) { 699921eab11SJason Gunthorpe up_write(&device->client_data_rwsem); 700921eab11SJason Gunthorpe return; 701921eab11SJason Gunthorpe } 702921eab11SJason Gunthorpe client_data = xa_load(&device->client_data, client_id); 703921eab11SJason Gunthorpe xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); 704921eab11SJason Gunthorpe client = xa_load(&clients, client_id); 705921eab11SJason Gunthorpe downgrade_write(&device->client_data_rwsem); 706921eab11SJason Gunthorpe 707921eab11SJason Gunthorpe /* 708921eab11SJason Gunthorpe * Notice we cannot be holding any exclusive locks when calling the 709921eab11SJason Gunthorpe * remove callback as the remove callback can recurse back into any 710921eab11SJason Gunthorpe * public functions in this module and thus try for any locks those 711921eab11SJason Gunthorpe * functions take. 712921eab11SJason Gunthorpe * 713921eab11SJason Gunthorpe * For this reason clients and drivers should not call the 714921eab11SJason Gunthorpe * unregistration functions will holdling any locks. 715921eab11SJason Gunthorpe * 716921eab11SJason Gunthorpe * It tempting to drop the client_data_rwsem too, but this is required 717921eab11SJason Gunthorpe * to ensure that unregister_client does not return until all clients 718921eab11SJason Gunthorpe * are completely unregistered, which is required to avoid module 719921eab11SJason Gunthorpe * unloading races. 720921eab11SJason Gunthorpe */ 721921eab11SJason Gunthorpe if (client->remove) 722921eab11SJason Gunthorpe client->remove(device, client_data); 723921eab11SJason Gunthorpe 724921eab11SJason Gunthorpe xa_erase(&device->client_data, client_id); 725921eab11SJason Gunthorpe up_read(&device->client_data_rwsem); 7261da177e4SLinus Torvalds } 7271da177e4SLinus Torvalds 728c2261dd7SJason Gunthorpe static int alloc_port_data(struct ib_device *device) 7295eb620c8SYosef Etigin { 730324e227eSJason Gunthorpe struct ib_port_data_rcu *pdata_rcu; 731ea1075edSJason Gunthorpe unsigned int port; 732c2261dd7SJason Gunthorpe 733c2261dd7SJason Gunthorpe if (device->port_data) 734c2261dd7SJason Gunthorpe return 0; 735c2261dd7SJason Gunthorpe 736c2261dd7SJason Gunthorpe /* This can only be called once the physical port range is defined */ 737c2261dd7SJason Gunthorpe if (WARN_ON(!device->phys_port_cnt)) 738c2261dd7SJason Gunthorpe return -EINVAL; 7395eb620c8SYosef Etigin 7408ceb1357SJason Gunthorpe /* 7418ceb1357SJason Gunthorpe * device->port_data is indexed directly by the port number to make 7427738613eSIra Weiny * access to this data as efficient as possible. 7437738613eSIra Weiny * 7448ceb1357SJason Gunthorpe * Therefore port_data is declared as a 1 based array with potential 7458ceb1357SJason Gunthorpe * empty slots at the beginning. 7467738613eSIra Weiny */ 747324e227eSJason Gunthorpe pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata, 748324e227eSJason Gunthorpe rdma_end_port(device) + 1), 749324e227eSJason Gunthorpe GFP_KERNEL); 750324e227eSJason Gunthorpe if (!pdata_rcu) 75155aeed06SJason Gunthorpe return -ENOMEM; 752324e227eSJason Gunthorpe /* 753324e227eSJason Gunthorpe * The rcu_head is put in front of the port data array and the stored 754324e227eSJason Gunthorpe * pointer is adjusted since we never need to see that member until 755324e227eSJason Gunthorpe * kfree_rcu. 756324e227eSJason Gunthorpe */ 757324e227eSJason Gunthorpe device->port_data = pdata_rcu->pdata; 7585eb620c8SYosef Etigin 759ea1075edSJason Gunthorpe rdma_for_each_port (device, port) { 7608ceb1357SJason Gunthorpe struct ib_port_data *pdata = &device->port_data[port]; 7618ceb1357SJason Gunthorpe 762324e227eSJason Gunthorpe pdata->ib_dev = device; 7638ceb1357SJason Gunthorpe spin_lock_init(&pdata->pkey_list_lock); 7648ceb1357SJason Gunthorpe INIT_LIST_HEAD(&pdata->pkey_list); 765c2261dd7SJason Gunthorpe spin_lock_init(&pdata->netdev_lock); 766324e227eSJason Gunthorpe INIT_HLIST_NODE(&pdata->ndev_hash_link); 767c2261dd7SJason Gunthorpe } 768c2261dd7SJason Gunthorpe return 0; 769c2261dd7SJason Gunthorpe } 770c2261dd7SJason Gunthorpe 771c2261dd7SJason Gunthorpe static int verify_immutable(const struct ib_device *dev, u8 port) 772c2261dd7SJason Gunthorpe { 773c2261dd7SJason Gunthorpe return WARN_ON(!rdma_cap_ib_mad(dev, port) && 774c2261dd7SJason Gunthorpe rdma_max_mad_size(dev, port) != 0); 775c2261dd7SJason Gunthorpe } 776c2261dd7SJason Gunthorpe 777c2261dd7SJason Gunthorpe static int setup_port_data(struct ib_device *device) 778c2261dd7SJason Gunthorpe { 779c2261dd7SJason Gunthorpe unsigned int port; 780c2261dd7SJason Gunthorpe int ret; 781c2261dd7SJason Gunthorpe 782c2261dd7SJason Gunthorpe ret = alloc_port_data(device); 783c2261dd7SJason Gunthorpe if (ret) 784c2261dd7SJason Gunthorpe return ret; 785c2261dd7SJason Gunthorpe 786c2261dd7SJason Gunthorpe rdma_for_each_port (device, port) { 787c2261dd7SJason Gunthorpe struct ib_port_data *pdata = &device->port_data[port]; 7888ceb1357SJason Gunthorpe 7898ceb1357SJason Gunthorpe ret = device->ops.get_port_immutable(device, port, 7908ceb1357SJason Gunthorpe &pdata->immutable); 7915eb620c8SYosef Etigin if (ret) 7925eb620c8SYosef Etigin return ret; 79355aeed06SJason Gunthorpe 79455aeed06SJason Gunthorpe if (verify_immutable(device, port)) 79555aeed06SJason Gunthorpe return -EINVAL; 79655aeed06SJason Gunthorpe } 79755aeed06SJason Gunthorpe return 0; 7985eb620c8SYosef Etigin } 7995eb620c8SYosef Etigin 8009abb0d1bSLeon Romanovsky void ib_get_device_fw_str(struct ib_device *dev, char *str) 8015fa76c20SIra Weiny { 8023023a1e9SKamal Heib if (dev->ops.get_dev_fw_str) 8033023a1e9SKamal Heib dev->ops.get_dev_fw_str(dev, str); 8045fa76c20SIra Weiny else 8055fa76c20SIra Weiny str[0] = '\0'; 8065fa76c20SIra Weiny } 8075fa76c20SIra Weiny EXPORT_SYMBOL(ib_get_device_fw_str); 8085fa76c20SIra Weiny 8098f408ab6SDaniel Jurgens static void ib_policy_change_task(struct work_struct *work) 8108f408ab6SDaniel Jurgens { 8118f408ab6SDaniel Jurgens struct ib_device *dev; 8120df91bb6SJason Gunthorpe unsigned long index; 8138f408ab6SDaniel Jurgens 814921eab11SJason Gunthorpe down_read(&devices_rwsem); 8150df91bb6SJason Gunthorpe xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 816ea1075edSJason Gunthorpe unsigned int i; 8178f408ab6SDaniel Jurgens 818ea1075edSJason Gunthorpe rdma_for_each_port (dev, i) { 8198f408ab6SDaniel Jurgens u64 sp; 8208f408ab6SDaniel Jurgens int ret = ib_get_cached_subnet_prefix(dev, 8218f408ab6SDaniel Jurgens i, 8228f408ab6SDaniel Jurgens &sp); 8238f408ab6SDaniel Jurgens 8248f408ab6SDaniel Jurgens WARN_ONCE(ret, 8258f408ab6SDaniel Jurgens "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", 8268f408ab6SDaniel Jurgens ret); 827a750cfdeSDaniel Jurgens if (!ret) 8288f408ab6SDaniel Jurgens ib_security_cache_change(dev, i, sp); 8298f408ab6SDaniel Jurgens } 8308f408ab6SDaniel Jurgens } 831921eab11SJason Gunthorpe up_read(&devices_rwsem); 8328f408ab6SDaniel Jurgens } 8338f408ab6SDaniel Jurgens 8348f408ab6SDaniel Jurgens static int ib_security_change(struct notifier_block *nb, unsigned long event, 8358f408ab6SDaniel Jurgens void *lsm_data) 8368f408ab6SDaniel Jurgens { 8378f408ab6SDaniel Jurgens if (event != LSM_POLICY_CHANGE) 8388f408ab6SDaniel Jurgens return NOTIFY_DONE; 8398f408ab6SDaniel Jurgens 8408f408ab6SDaniel Jurgens schedule_work(&ib_policy_change_work); 841c66f6741SDaniel Jurgens ib_mad_agent_security_change(); 8428f408ab6SDaniel Jurgens 8438f408ab6SDaniel Jurgens return NOTIFY_OK; 8448f408ab6SDaniel Jurgens } 8458f408ab6SDaniel Jurgens 8464e0f7b90SParav Pandit static void compatdev_release(struct device *dev) 8474e0f7b90SParav Pandit { 8484e0f7b90SParav Pandit struct ib_core_device *cdev = 8494e0f7b90SParav Pandit container_of(dev, struct ib_core_device, dev); 8504e0f7b90SParav Pandit 8514e0f7b90SParav Pandit kfree(cdev); 8524e0f7b90SParav Pandit } 8534e0f7b90SParav Pandit 8544e0f7b90SParav Pandit static int add_one_compat_dev(struct ib_device *device, 8554e0f7b90SParav Pandit struct rdma_dev_net *rnet) 8564e0f7b90SParav Pandit { 8574e0f7b90SParav Pandit struct ib_core_device *cdev; 8584e0f7b90SParav Pandit int ret; 8594e0f7b90SParav Pandit 8602b34c558SParav Pandit lockdep_assert_held(&rdma_nets_rwsem); 861a56bc45bSParav Pandit if (!ib_devices_shared_netns) 862a56bc45bSParav Pandit return 0; 863a56bc45bSParav Pandit 8644e0f7b90SParav Pandit /* 8654e0f7b90SParav Pandit * Create and add compat device in all namespaces other than where it 8664e0f7b90SParav Pandit * is currently bound to. 8674e0f7b90SParav Pandit */ 8684e0f7b90SParav Pandit if (net_eq(read_pnet(&rnet->net), 8694e0f7b90SParav Pandit read_pnet(&device->coredev.rdma_net))) 8704e0f7b90SParav Pandit return 0; 8714e0f7b90SParav Pandit 8724e0f7b90SParav Pandit /* 8734e0f7b90SParav Pandit * The first of init_net() or ib_register_device() to take the 8744e0f7b90SParav Pandit * compat_devs_mutex wins and gets to add the device. Others will wait 8754e0f7b90SParav Pandit * for completion here. 8764e0f7b90SParav Pandit */ 8774e0f7b90SParav Pandit mutex_lock(&device->compat_devs_mutex); 8784e0f7b90SParav Pandit cdev = xa_load(&device->compat_devs, rnet->id); 8794e0f7b90SParav Pandit if (cdev) { 8804e0f7b90SParav Pandit ret = 0; 8814e0f7b90SParav Pandit goto done; 8824e0f7b90SParav Pandit } 8834e0f7b90SParav Pandit ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL); 8844e0f7b90SParav Pandit if (ret) 8854e0f7b90SParav Pandit goto done; 8864e0f7b90SParav Pandit 8874e0f7b90SParav Pandit cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 8884e0f7b90SParav Pandit if (!cdev) { 8894e0f7b90SParav Pandit ret = -ENOMEM; 8904e0f7b90SParav Pandit goto cdev_err; 8914e0f7b90SParav Pandit } 8924e0f7b90SParav Pandit 8934e0f7b90SParav Pandit cdev->dev.parent = device->dev.parent; 8944e0f7b90SParav Pandit rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); 8954e0f7b90SParav Pandit cdev->dev.release = compatdev_release; 8964e0f7b90SParav Pandit dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); 8974e0f7b90SParav Pandit 8984e0f7b90SParav Pandit ret = device_add(&cdev->dev); 8994e0f7b90SParav Pandit if (ret) 9004e0f7b90SParav Pandit goto add_err; 901eb15c78bSParav Pandit ret = ib_setup_port_attrs(cdev); 9025417783eSParav Pandit if (ret) 9035417783eSParav Pandit goto port_err; 9044e0f7b90SParav Pandit 9054e0f7b90SParav Pandit ret = xa_err(xa_store(&device->compat_devs, rnet->id, 9064e0f7b90SParav Pandit cdev, GFP_KERNEL)); 9074e0f7b90SParav Pandit if (ret) 9084e0f7b90SParav Pandit goto insert_err; 9094e0f7b90SParav Pandit 9104e0f7b90SParav Pandit mutex_unlock(&device->compat_devs_mutex); 9114e0f7b90SParav Pandit return 0; 9124e0f7b90SParav Pandit 9134e0f7b90SParav Pandit insert_err: 9145417783eSParav Pandit ib_free_port_attrs(cdev); 9155417783eSParav Pandit port_err: 9164e0f7b90SParav Pandit device_del(&cdev->dev); 9174e0f7b90SParav Pandit add_err: 9184e0f7b90SParav Pandit put_device(&cdev->dev); 9194e0f7b90SParav Pandit cdev_err: 9204e0f7b90SParav Pandit xa_release(&device->compat_devs, rnet->id); 9214e0f7b90SParav Pandit done: 9224e0f7b90SParav Pandit mutex_unlock(&device->compat_devs_mutex); 9234e0f7b90SParav Pandit return ret; 9244e0f7b90SParav Pandit } 9254e0f7b90SParav Pandit 9264e0f7b90SParav Pandit static void remove_one_compat_dev(struct ib_device *device, u32 id) 9274e0f7b90SParav Pandit { 9284e0f7b90SParav Pandit struct ib_core_device *cdev; 9294e0f7b90SParav Pandit 9304e0f7b90SParav Pandit mutex_lock(&device->compat_devs_mutex); 9314e0f7b90SParav Pandit cdev = xa_erase(&device->compat_devs, id); 9324e0f7b90SParav Pandit mutex_unlock(&device->compat_devs_mutex); 9334e0f7b90SParav Pandit if (cdev) { 9345417783eSParav Pandit ib_free_port_attrs(cdev); 9354e0f7b90SParav Pandit device_del(&cdev->dev); 9364e0f7b90SParav Pandit put_device(&cdev->dev); 9374e0f7b90SParav Pandit } 9384e0f7b90SParav Pandit } 9394e0f7b90SParav Pandit 9404e0f7b90SParav Pandit static void remove_compat_devs(struct ib_device *device) 9414e0f7b90SParav Pandit { 9424e0f7b90SParav Pandit struct ib_core_device *cdev; 9434e0f7b90SParav Pandit unsigned long index; 9444e0f7b90SParav Pandit 9454e0f7b90SParav Pandit xa_for_each (&device->compat_devs, index, cdev) 9464e0f7b90SParav Pandit remove_one_compat_dev(device, index); 9474e0f7b90SParav Pandit } 9484e0f7b90SParav Pandit 9494e0f7b90SParav Pandit static int add_compat_devs(struct ib_device *device) 9504e0f7b90SParav Pandit { 9514e0f7b90SParav Pandit struct rdma_dev_net *rnet; 9524e0f7b90SParav Pandit unsigned long index; 9534e0f7b90SParav Pandit int ret = 0; 9544e0f7b90SParav Pandit 955decbc7a6SParav Pandit lockdep_assert_held(&devices_rwsem); 956decbc7a6SParav Pandit 9574e0f7b90SParav Pandit down_read(&rdma_nets_rwsem); 9584e0f7b90SParav Pandit xa_for_each (&rdma_nets, index, rnet) { 9594e0f7b90SParav Pandit ret = add_one_compat_dev(device, rnet); 9604e0f7b90SParav Pandit if (ret) 9614e0f7b90SParav Pandit break; 9624e0f7b90SParav Pandit } 9634e0f7b90SParav Pandit up_read(&rdma_nets_rwsem); 9644e0f7b90SParav Pandit return ret; 9654e0f7b90SParav Pandit } 9664e0f7b90SParav Pandit 9672b34c558SParav Pandit static void remove_all_compat_devs(void) 9682b34c558SParav Pandit { 9692b34c558SParav Pandit struct ib_compat_device *cdev; 9702b34c558SParav Pandit struct ib_device *dev; 9712b34c558SParav Pandit unsigned long index; 9722b34c558SParav Pandit 9732b34c558SParav Pandit down_read(&devices_rwsem); 9742b34c558SParav Pandit xa_for_each (&devices, index, dev) { 9752b34c558SParav Pandit unsigned long c_index = 0; 9762b34c558SParav Pandit 9772b34c558SParav Pandit /* Hold nets_rwsem so that any other thread modifying this 9782b34c558SParav Pandit * system param can sync with this thread. 9792b34c558SParav Pandit */ 9802b34c558SParav Pandit down_read(&rdma_nets_rwsem); 9812b34c558SParav Pandit xa_for_each (&dev->compat_devs, c_index, cdev) 9822b34c558SParav Pandit remove_one_compat_dev(dev, c_index); 9832b34c558SParav Pandit up_read(&rdma_nets_rwsem); 9842b34c558SParav Pandit } 9852b34c558SParav Pandit up_read(&devices_rwsem); 9862b34c558SParav Pandit } 9872b34c558SParav Pandit 9882b34c558SParav Pandit static int add_all_compat_devs(void) 9892b34c558SParav Pandit { 9902b34c558SParav Pandit struct rdma_dev_net *rnet; 9912b34c558SParav Pandit struct ib_device *dev; 9922b34c558SParav Pandit unsigned long index; 9932b34c558SParav Pandit int ret = 0; 9942b34c558SParav Pandit 9952b34c558SParav Pandit down_read(&devices_rwsem); 9962b34c558SParav Pandit xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 9972b34c558SParav Pandit unsigned long net_index = 0; 9982b34c558SParav Pandit 9992b34c558SParav Pandit /* Hold nets_rwsem so that any other thread modifying this 10002b34c558SParav Pandit * system param can sync with this thread. 10012b34c558SParav Pandit */ 10022b34c558SParav Pandit down_read(&rdma_nets_rwsem); 10032b34c558SParav Pandit xa_for_each (&rdma_nets, net_index, rnet) { 10042b34c558SParav Pandit ret = add_one_compat_dev(dev, rnet); 10052b34c558SParav Pandit if (ret) 10062b34c558SParav Pandit break; 10072b34c558SParav Pandit } 10082b34c558SParav Pandit up_read(&rdma_nets_rwsem); 10092b34c558SParav Pandit } 10102b34c558SParav Pandit up_read(&devices_rwsem); 10112b34c558SParav Pandit if (ret) 10122b34c558SParav Pandit remove_all_compat_devs(); 10132b34c558SParav Pandit return ret; 10142b34c558SParav Pandit } 10152b34c558SParav Pandit 10162b34c558SParav Pandit int rdma_compatdev_set(u8 enable) 10172b34c558SParav Pandit { 10182b34c558SParav Pandit struct rdma_dev_net *rnet; 10192b34c558SParav Pandit unsigned long index; 10202b34c558SParav Pandit int ret = 0; 10212b34c558SParav Pandit 10222b34c558SParav Pandit down_write(&rdma_nets_rwsem); 10232b34c558SParav Pandit if (ib_devices_shared_netns == enable) { 10242b34c558SParav Pandit up_write(&rdma_nets_rwsem); 10252b34c558SParav Pandit return 0; 10262b34c558SParav Pandit } 10272b34c558SParav Pandit 10282b34c558SParav Pandit /* enable/disable of compat devices is not supported 10292b34c558SParav Pandit * when more than default init_net exists. 10302b34c558SParav Pandit */ 10312b34c558SParav Pandit xa_for_each (&rdma_nets, index, rnet) { 10322b34c558SParav Pandit ret++; 10332b34c558SParav Pandit break; 10342b34c558SParav Pandit } 10352b34c558SParav Pandit if (!ret) 10362b34c558SParav Pandit ib_devices_shared_netns = enable; 10372b34c558SParav Pandit up_write(&rdma_nets_rwsem); 10382b34c558SParav Pandit if (ret) 10392b34c558SParav Pandit return -EBUSY; 10402b34c558SParav Pandit 10412b34c558SParav Pandit if (enable) 10422b34c558SParav Pandit ret = add_all_compat_devs(); 10432b34c558SParav Pandit else 10442b34c558SParav Pandit remove_all_compat_devs(); 10452b34c558SParav Pandit return ret; 10462b34c558SParav Pandit } 10472b34c558SParav Pandit 10484e0f7b90SParav Pandit static void rdma_dev_exit_net(struct net *net) 10494e0f7b90SParav Pandit { 10504e0f7b90SParav Pandit struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); 10514e0f7b90SParav Pandit struct ib_device *dev; 10524e0f7b90SParav Pandit unsigned long index; 10534e0f7b90SParav Pandit int ret; 10544e0f7b90SParav Pandit 10554e0f7b90SParav Pandit down_write(&rdma_nets_rwsem); 10564e0f7b90SParav Pandit /* 10574e0f7b90SParav Pandit * Prevent the ID from being re-used and hide the id from xa_for_each. 10584e0f7b90SParav Pandit */ 10594e0f7b90SParav Pandit ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL)); 10604e0f7b90SParav Pandit WARN_ON(ret); 10614e0f7b90SParav Pandit up_write(&rdma_nets_rwsem); 10624e0f7b90SParav Pandit 10634e0f7b90SParav Pandit down_read(&devices_rwsem); 10644e0f7b90SParav Pandit xa_for_each (&devices, index, dev) { 10654e0f7b90SParav Pandit get_device(&dev->dev); 10664e0f7b90SParav Pandit /* 10674e0f7b90SParav Pandit * Release the devices_rwsem so that pontentially blocking 10684e0f7b90SParav Pandit * device_del, doesn't hold the devices_rwsem for too long. 10694e0f7b90SParav Pandit */ 10704e0f7b90SParav Pandit up_read(&devices_rwsem); 10714e0f7b90SParav Pandit 10724e0f7b90SParav Pandit remove_one_compat_dev(dev, rnet->id); 10734e0f7b90SParav Pandit 1074decbc7a6SParav Pandit /* 1075decbc7a6SParav Pandit * If the real device is in the NS then move it back to init. 1076decbc7a6SParav Pandit */ 1077decbc7a6SParav Pandit rdma_dev_change_netns(dev, net, &init_net); 1078decbc7a6SParav Pandit 10794e0f7b90SParav Pandit put_device(&dev->dev); 10804e0f7b90SParav Pandit down_read(&devices_rwsem); 10814e0f7b90SParav Pandit } 10824e0f7b90SParav Pandit up_read(&devices_rwsem); 10834e0f7b90SParav Pandit 10844e0f7b90SParav Pandit xa_erase(&rdma_nets, rnet->id); 10854e0f7b90SParav Pandit } 10864e0f7b90SParav Pandit 10874e0f7b90SParav Pandit static __net_init int rdma_dev_init_net(struct net *net) 10884e0f7b90SParav Pandit { 10894e0f7b90SParav Pandit struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); 10904e0f7b90SParav Pandit unsigned long index; 10914e0f7b90SParav Pandit struct ib_device *dev; 10924e0f7b90SParav Pandit int ret; 10934e0f7b90SParav Pandit 10944e0f7b90SParav Pandit /* No need to create any compat devices in default init_net. */ 10954e0f7b90SParav Pandit if (net_eq(net, &init_net)) 10964e0f7b90SParav Pandit return 0; 10974e0f7b90SParav Pandit 10984e0f7b90SParav Pandit write_pnet(&rnet->net, net); 10994e0f7b90SParav Pandit 11004e0f7b90SParav Pandit ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); 11014e0f7b90SParav Pandit if (ret) 11024e0f7b90SParav Pandit return ret; 11034e0f7b90SParav Pandit 11044e0f7b90SParav Pandit down_read(&devices_rwsem); 11054e0f7b90SParav Pandit xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 11062b34c558SParav Pandit /* Hold nets_rwsem so that netlink command cannot change 11072b34c558SParav Pandit * system configuration for device sharing mode. 11082b34c558SParav Pandit */ 11092b34c558SParav Pandit down_read(&rdma_nets_rwsem); 11104e0f7b90SParav Pandit ret = add_one_compat_dev(dev, rnet); 11112b34c558SParav Pandit up_read(&rdma_nets_rwsem); 11124e0f7b90SParav Pandit if (ret) 11134e0f7b90SParav Pandit break; 11144e0f7b90SParav Pandit } 11154e0f7b90SParav Pandit up_read(&devices_rwsem); 11164e0f7b90SParav Pandit 11174e0f7b90SParav Pandit if (ret) 11184e0f7b90SParav Pandit rdma_dev_exit_net(net); 11194e0f7b90SParav Pandit 11204e0f7b90SParav Pandit return ret; 11214e0f7b90SParav Pandit } 11224e0f7b90SParav Pandit 1123ecc82c53SLeon Romanovsky /* 1124d0899892SJason Gunthorpe * Assign the unique string device name and the unique device index. This is 1125d0899892SJason Gunthorpe * undone by ib_dealloc_device. 1126ecc82c53SLeon Romanovsky */ 11270df91bb6SJason Gunthorpe static int assign_name(struct ib_device *device, const char *name) 11280df91bb6SJason Gunthorpe { 11290df91bb6SJason Gunthorpe static u32 last_id; 11300df91bb6SJason Gunthorpe int ret; 1131ecc82c53SLeon Romanovsky 1132921eab11SJason Gunthorpe down_write(&devices_rwsem); 11330df91bb6SJason Gunthorpe /* Assign a unique name to the device */ 11340df91bb6SJason Gunthorpe if (strchr(name, '%')) 11350df91bb6SJason Gunthorpe ret = alloc_name(device, name); 11360df91bb6SJason Gunthorpe else 11370df91bb6SJason Gunthorpe ret = dev_set_name(&device->dev, name); 11380df91bb6SJason Gunthorpe if (ret) 11390df91bb6SJason Gunthorpe goto out; 1140ecc82c53SLeon Romanovsky 11410df91bb6SJason Gunthorpe if (__ib_device_get_by_name(dev_name(&device->dev))) { 11420df91bb6SJason Gunthorpe ret = -ENFILE; 11430df91bb6SJason Gunthorpe goto out; 1144ecc82c53SLeon Romanovsky } 11450df91bb6SJason Gunthorpe strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); 11460df91bb6SJason Gunthorpe 1147ea295481SLinus Torvalds ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b, 1148ea295481SLinus Torvalds &last_id, GFP_KERNEL); 1149ea295481SLinus Torvalds if (ret > 0) 11500df91bb6SJason Gunthorpe ret = 0; 1151921eab11SJason Gunthorpe 11520df91bb6SJason Gunthorpe out: 1153921eab11SJason Gunthorpe up_write(&devices_rwsem); 11540df91bb6SJason Gunthorpe return ret; 11550df91bb6SJason Gunthorpe } 11560df91bb6SJason Gunthorpe 1157548cb4fbSParav Pandit static void setup_dma_device(struct ib_device *device) 11581da177e4SLinus Torvalds { 115999db9494SBart Van Assche struct device *parent = device->dev.parent; 11601da177e4SLinus Torvalds 11610957c29fSBart Van Assche WARN_ON_ONCE(device->dma_device); 11620957c29fSBart Van Assche if (device->dev.dma_ops) { 11630957c29fSBart Van Assche /* 11640957c29fSBart Van Assche * The caller provided custom DMA operations. Copy the 11650957c29fSBart Van Assche * DMA-related fields that are used by e.g. dma_alloc_coherent() 11660957c29fSBart Van Assche * into device->dev. 11670957c29fSBart Van Assche */ 11680957c29fSBart Van Assche device->dma_device = &device->dev; 116902ee9da3SBart Van Assche if (!device->dev.dma_mask) { 117002ee9da3SBart Van Assche if (parent) 117199db9494SBart Van Assche device->dev.dma_mask = parent->dma_mask; 117202ee9da3SBart Van Assche else 117302ee9da3SBart Van Assche WARN_ON_ONCE(true); 117402ee9da3SBart Van Assche } 117502ee9da3SBart Van Assche if (!device->dev.coherent_dma_mask) { 117602ee9da3SBart Van Assche if (parent) 11770957c29fSBart Van Assche device->dev.coherent_dma_mask = 11780957c29fSBart Van Assche parent->coherent_dma_mask; 117902ee9da3SBart Van Assche else 118002ee9da3SBart Van Assche WARN_ON_ONCE(true); 118102ee9da3SBart Van Assche } 11820957c29fSBart Van Assche } else { 11830957c29fSBart Van Assche /* 11840957c29fSBart Van Assche * The caller did not provide custom DMA operations. Use the 11850957c29fSBart Van Assche * DMA mapping operations of the parent device. 11860957c29fSBart Van Assche */ 118702ee9da3SBart Van Assche WARN_ON_ONCE(!parent); 11880957c29fSBart Van Assche device->dma_device = parent; 11890957c29fSBart Van Assche } 1190d10bcf94SShiraz Saleem /* Setup default max segment size for all IB devices */ 1191d10bcf94SShiraz Saleem dma_set_max_seg_size(device->dma_device, SZ_2G); 1192d10bcf94SShiraz Saleem 1193548cb4fbSParav Pandit } 1194548cb4fbSParav Pandit 1195921eab11SJason Gunthorpe /* 1196921eab11SJason Gunthorpe * setup_device() allocates memory and sets up data that requires calling the 1197921eab11SJason Gunthorpe * device ops, this is the only reason these actions are not done during 1198921eab11SJason Gunthorpe * ib_alloc_device. It is undone by ib_dealloc_device(). 1199921eab11SJason Gunthorpe */ 1200548cb4fbSParav Pandit static int setup_device(struct ib_device *device) 1201548cb4fbSParav Pandit { 1202548cb4fbSParav Pandit struct ib_udata uhw = {.outlen = 0, .inlen = 0}; 1203548cb4fbSParav Pandit int ret; 1204548cb4fbSParav Pandit 1205921eab11SJason Gunthorpe setup_dma_device(device); 1206deee3c7eSKamal Heib ib_device_check_mandatory(device); 1207548cb4fbSParav Pandit 12088ceb1357SJason Gunthorpe ret = setup_port_data(device); 1209548cb4fbSParav Pandit if (ret) { 12108ceb1357SJason Gunthorpe dev_warn(&device->dev, "Couldn't create per-port data\n"); 1211548cb4fbSParav Pandit return ret; 1212548cb4fbSParav Pandit } 1213548cb4fbSParav Pandit 1214548cb4fbSParav Pandit memset(&device->attrs, 0, sizeof(device->attrs)); 12153023a1e9SKamal Heib ret = device->ops.query_device(device, &device->attrs, &uhw); 1216548cb4fbSParav Pandit if (ret) { 1217548cb4fbSParav Pandit dev_warn(&device->dev, 1218548cb4fbSParav Pandit "Couldn't query the device attributes\n"); 1219d45f89d5SJason Gunthorpe return ret; 1220548cb4fbSParav Pandit } 1221548cb4fbSParav Pandit 1222548cb4fbSParav Pandit return 0; 1223548cb4fbSParav Pandit } 1224548cb4fbSParav Pandit 1225921eab11SJason Gunthorpe static void disable_device(struct ib_device *device) 1226921eab11SJason Gunthorpe { 1227921eab11SJason Gunthorpe struct ib_client *client; 1228921eab11SJason Gunthorpe 1229921eab11SJason Gunthorpe WARN_ON(!refcount_read(&device->refcount)); 1230921eab11SJason Gunthorpe 1231921eab11SJason Gunthorpe down_write(&devices_rwsem); 1232921eab11SJason Gunthorpe xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); 1233921eab11SJason Gunthorpe up_write(&devices_rwsem); 1234921eab11SJason Gunthorpe 1235921eab11SJason Gunthorpe down_read(&clients_rwsem); 1236921eab11SJason Gunthorpe list_for_each_entry_reverse(client, &client_list, list) 1237921eab11SJason Gunthorpe remove_client_context(device, client->client_id); 1238921eab11SJason Gunthorpe up_read(&clients_rwsem); 1239921eab11SJason Gunthorpe 1240921eab11SJason Gunthorpe /* Pairs with refcount_set in enable_device */ 1241921eab11SJason Gunthorpe ib_device_put(device); 1242921eab11SJason Gunthorpe wait_for_completion(&device->unreg_completion); 1243c2261dd7SJason Gunthorpe 12444e0f7b90SParav Pandit /* 12454e0f7b90SParav Pandit * compat devices must be removed after device refcount drops to zero. 12464e0f7b90SParav Pandit * Otherwise init_net() may add more compatdevs after removing compat 12474e0f7b90SParav Pandit * devices and before device is disabled. 12484e0f7b90SParav Pandit */ 12494e0f7b90SParav Pandit remove_compat_devs(device); 1250921eab11SJason Gunthorpe } 1251921eab11SJason Gunthorpe 1252921eab11SJason Gunthorpe /* 1253921eab11SJason Gunthorpe * An enabled device is visible to all clients and to all the public facing 1254d0899892SJason Gunthorpe * APIs that return a device pointer. This always returns with a new get, even 1255d0899892SJason Gunthorpe * if it fails. 1256921eab11SJason Gunthorpe */ 1257d0899892SJason Gunthorpe static int enable_device_and_get(struct ib_device *device) 1258921eab11SJason Gunthorpe { 1259921eab11SJason Gunthorpe struct ib_client *client; 1260921eab11SJason Gunthorpe unsigned long index; 1261d0899892SJason Gunthorpe int ret = 0; 1262921eab11SJason Gunthorpe 1263d0899892SJason Gunthorpe /* 1264d0899892SJason Gunthorpe * One ref belongs to the xa and the other belongs to this 1265d0899892SJason Gunthorpe * thread. This is needed to guard against parallel unregistration. 1266d0899892SJason Gunthorpe */ 1267d0899892SJason Gunthorpe refcount_set(&device->refcount, 2); 1268921eab11SJason Gunthorpe down_write(&devices_rwsem); 1269921eab11SJason Gunthorpe xa_set_mark(&devices, device->index, DEVICE_REGISTERED); 1270d0899892SJason Gunthorpe 1271d0899892SJason Gunthorpe /* 1272d0899892SJason Gunthorpe * By using downgrade_write() we ensure that no other thread can clear 1273d0899892SJason Gunthorpe * DEVICE_REGISTERED while we are completing the client setup. 1274d0899892SJason Gunthorpe */ 1275d0899892SJason Gunthorpe downgrade_write(&devices_rwsem); 1276921eab11SJason Gunthorpe 1277ca22354bSJason Gunthorpe if (device->ops.enable_driver) { 1278ca22354bSJason Gunthorpe ret = device->ops.enable_driver(device); 1279ca22354bSJason Gunthorpe if (ret) 1280ca22354bSJason Gunthorpe goto out; 1281ca22354bSJason Gunthorpe } 1282ca22354bSJason Gunthorpe 1283921eab11SJason Gunthorpe down_read(&clients_rwsem); 1284921eab11SJason Gunthorpe xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { 1285921eab11SJason Gunthorpe ret = add_client_context(device, client); 1286d0899892SJason Gunthorpe if (ret) 1287d0899892SJason Gunthorpe break; 1288d0899892SJason Gunthorpe } 1289921eab11SJason Gunthorpe up_read(&clients_rwsem); 12904e0f7b90SParav Pandit if (!ret) 12914e0f7b90SParav Pandit ret = add_compat_devs(device); 1292ca22354bSJason Gunthorpe out: 1293d0899892SJason Gunthorpe up_read(&devices_rwsem); 1294921eab11SJason Gunthorpe return ret; 1295921eab11SJason Gunthorpe } 1296921eab11SJason Gunthorpe 1297548cb4fbSParav Pandit /** 1298548cb4fbSParav Pandit * ib_register_device - Register an IB device with IB core 1299548cb4fbSParav Pandit * @device:Device to register 1300548cb4fbSParav Pandit * 1301548cb4fbSParav Pandit * Low-level drivers use ib_register_device() to register their 1302548cb4fbSParav Pandit * devices with the IB core. All registered clients will receive a 1303548cb4fbSParav Pandit * callback for each device that is added. @device must be allocated 1304548cb4fbSParav Pandit * with ib_alloc_device(). 1305d0899892SJason Gunthorpe * 1306d0899892SJason Gunthorpe * If the driver uses ops.dealloc_driver and calls any ib_unregister_device() 1307d0899892SJason Gunthorpe * asynchronously then the device pointer may become freed as soon as this 1308d0899892SJason Gunthorpe * function returns. 1309548cb4fbSParav Pandit */ 1310ea4baf7fSParav Pandit int ib_register_device(struct ib_device *device, const char *name) 1311548cb4fbSParav Pandit { 1312548cb4fbSParav Pandit int ret; 13131da177e4SLinus Torvalds 13140df91bb6SJason Gunthorpe ret = assign_name(device, name); 1315e349f858SJason Gunthorpe if (ret) 1316921eab11SJason Gunthorpe return ret; 13171da177e4SLinus Torvalds 1318548cb4fbSParav Pandit ret = setup_device(device); 1319548cb4fbSParav Pandit if (ret) 1320d0899892SJason Gunthorpe return ret; 132103db3a2dSMatan Barak 1322d45f89d5SJason Gunthorpe ret = ib_cache_setup_one(device); 1323d45f89d5SJason Gunthorpe if (ret) { 1324d45f89d5SJason Gunthorpe dev_warn(&device->dev, 1325d45f89d5SJason Gunthorpe "Couldn't set up InfiniBand P_Key/GID cache\n"); 1326d0899892SJason Gunthorpe return ret; 1327d45f89d5SJason Gunthorpe } 1328d45f89d5SJason Gunthorpe 13297527a7b1SParav Pandit ib_device_register_rdmacg(device); 13303e153a93SIra Weiny 1331413d3347SMark Zhang rdma_counter_init(device); 1332413d3347SMark Zhang 1333e7a5b4aaSLeon Romanovsky /* 1334e7a5b4aaSLeon Romanovsky * Ensure that ADD uevent is not fired because it 1335e7a5b4aaSLeon Romanovsky * is too early amd device is not initialized yet. 1336e7a5b4aaSLeon Romanovsky */ 1337e7a5b4aaSLeon Romanovsky dev_set_uevent_suppress(&device->dev, true); 13385f8f5499SParav Pandit ret = device_add(&device->dev); 13395f8f5499SParav Pandit if (ret) 13405f8f5499SParav Pandit goto cg_cleanup; 13415f8f5499SParav Pandit 1342ea4baf7fSParav Pandit ret = ib_device_register_sysfs(device); 13431da177e4SLinus Torvalds if (ret) { 134443c7c851SJason Gunthorpe dev_warn(&device->dev, 134543c7c851SJason Gunthorpe "Couldn't register device with driver model\n"); 13465f8f5499SParav Pandit goto dev_cleanup; 13471da177e4SLinus Torvalds } 13481da177e4SLinus Torvalds 1349d0899892SJason Gunthorpe ret = enable_device_and_get(device); 1350e7a5b4aaSLeon Romanovsky dev_set_uevent_suppress(&device->dev, false); 1351e7a5b4aaSLeon Romanovsky /* Mark for userspace that device is ready */ 1352e7a5b4aaSLeon Romanovsky kobject_uevent(&device->dev.kobj, KOBJ_ADD); 1353d0899892SJason Gunthorpe if (ret) { 1354d0899892SJason Gunthorpe void (*dealloc_fn)(struct ib_device *); 1355d0899892SJason Gunthorpe 1356d0899892SJason Gunthorpe /* 1357d0899892SJason Gunthorpe * If we hit this error flow then we don't want to 1358d0899892SJason Gunthorpe * automatically dealloc the device since the caller is 1359d0899892SJason Gunthorpe * expected to call ib_dealloc_device() after 1360d0899892SJason Gunthorpe * ib_register_device() fails. This is tricky due to the 1361d0899892SJason Gunthorpe * possibility for a parallel unregistration along with this 1362d0899892SJason Gunthorpe * error flow. Since we have a refcount here we know any 1363d0899892SJason Gunthorpe * parallel flow is stopped in disable_device and will see the 1364d0899892SJason Gunthorpe * NULL pointers, causing the responsibility to 1365d0899892SJason Gunthorpe * ib_dealloc_device() to revert back to this thread. 1366d0899892SJason Gunthorpe */ 1367d0899892SJason Gunthorpe dealloc_fn = device->ops.dealloc_driver; 1368d0899892SJason Gunthorpe device->ops.dealloc_driver = NULL; 1369d0899892SJason Gunthorpe ib_device_put(device); 1370d0899892SJason Gunthorpe __ib_unregister_device(device); 1371d0899892SJason Gunthorpe device->ops.dealloc_driver = dealloc_fn; 1372d0899892SJason Gunthorpe return ret; 1373d0899892SJason Gunthorpe } 1374d0899892SJason Gunthorpe ib_device_put(device); 13751da177e4SLinus Torvalds 13764be3a4faSParav Pandit return 0; 13774be3a4faSParav Pandit 13785f8f5499SParav Pandit dev_cleanup: 13795f8f5499SParav Pandit device_del(&device->dev); 13802fb4f4eaSParav Pandit cg_cleanup: 1381e7a5b4aaSLeon Romanovsky dev_set_uevent_suppress(&device->dev, false); 13822fb4f4eaSParav Pandit ib_device_unregister_rdmacg(device); 1383d45f89d5SJason Gunthorpe ib_cache_cleanup_one(device); 13841da177e4SLinus Torvalds return ret; 13851da177e4SLinus Torvalds } 13861da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_device); 13871da177e4SLinus Torvalds 1388d0899892SJason Gunthorpe /* Callers must hold a get on the device. */ 1389d0899892SJason Gunthorpe static void __ib_unregister_device(struct ib_device *ib_dev) 1390d0899892SJason Gunthorpe { 1391d0899892SJason Gunthorpe /* 1392d0899892SJason Gunthorpe * We have a registration lock so that all the calls to unregister are 1393d0899892SJason Gunthorpe * fully fenced, once any unregister returns the device is truely 1394d0899892SJason Gunthorpe * unregistered even if multiple callers are unregistering it at the 1395d0899892SJason Gunthorpe * same time. This also interacts with the registration flow and 1396d0899892SJason Gunthorpe * provides sane semantics if register and unregister are racing. 1397d0899892SJason Gunthorpe */ 1398d0899892SJason Gunthorpe mutex_lock(&ib_dev->unregistration_lock); 1399d0899892SJason Gunthorpe if (!refcount_read(&ib_dev->refcount)) 1400d0899892SJason Gunthorpe goto out; 1401d0899892SJason Gunthorpe 1402d0899892SJason Gunthorpe disable_device(ib_dev); 14033042492bSParav Pandit 14043042492bSParav Pandit /* Expedite removing unregistered pointers from the hash table */ 14053042492bSParav Pandit free_netdevs(ib_dev); 14063042492bSParav Pandit 1407d0899892SJason Gunthorpe ib_device_unregister_sysfs(ib_dev); 1408d0899892SJason Gunthorpe device_del(&ib_dev->dev); 1409d0899892SJason Gunthorpe ib_device_unregister_rdmacg(ib_dev); 1410d0899892SJason Gunthorpe ib_cache_cleanup_one(ib_dev); 1411d0899892SJason Gunthorpe 1412d0899892SJason Gunthorpe /* 1413d0899892SJason Gunthorpe * Drivers using the new flow may not call ib_dealloc_device except 1414d0899892SJason Gunthorpe * in error unwind prior to registration success. 1415d0899892SJason Gunthorpe */ 1416d0899892SJason Gunthorpe if (ib_dev->ops.dealloc_driver) { 1417d0899892SJason Gunthorpe WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); 1418d0899892SJason Gunthorpe ib_dealloc_device(ib_dev); 1419d0899892SJason Gunthorpe } 1420d0899892SJason Gunthorpe out: 1421d0899892SJason Gunthorpe mutex_unlock(&ib_dev->unregistration_lock); 1422d0899892SJason Gunthorpe } 1423d0899892SJason Gunthorpe 14241da177e4SLinus Torvalds /** 14251da177e4SLinus Torvalds * ib_unregister_device - Unregister an IB device 1426d0899892SJason Gunthorpe * @device: The device to unregister 14271da177e4SLinus Torvalds * 14281da177e4SLinus Torvalds * Unregister an IB device. All clients will receive a remove callback. 1429d0899892SJason Gunthorpe * 1430d0899892SJason Gunthorpe * Callers should call this routine only once, and protect against races with 1431d0899892SJason Gunthorpe * registration. Typically it should only be called as part of a remove 1432d0899892SJason Gunthorpe * callback in an implementation of driver core's struct device_driver and 1433d0899892SJason Gunthorpe * related. 1434d0899892SJason Gunthorpe * 1435d0899892SJason Gunthorpe * If ops.dealloc_driver is used then ib_dev will be freed upon return from 1436d0899892SJason Gunthorpe * this function. 14371da177e4SLinus Torvalds */ 1438d0899892SJason Gunthorpe void ib_unregister_device(struct ib_device *ib_dev) 14391da177e4SLinus Torvalds { 1440d0899892SJason Gunthorpe get_device(&ib_dev->dev); 1441d0899892SJason Gunthorpe __ib_unregister_device(ib_dev); 1442d0899892SJason Gunthorpe put_device(&ib_dev->dev); 14431da177e4SLinus Torvalds } 14441da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_device); 14451da177e4SLinus Torvalds 1446d0899892SJason Gunthorpe /** 1447d0899892SJason Gunthorpe * ib_unregister_device_and_put - Unregister a device while holding a 'get' 1448d0899892SJason Gunthorpe * device: The device to unregister 1449d0899892SJason Gunthorpe * 1450d0899892SJason Gunthorpe * This is the same as ib_unregister_device(), except it includes an internal 1451d0899892SJason Gunthorpe * ib_device_put() that should match a 'get' obtained by the caller. 1452d0899892SJason Gunthorpe * 1453d0899892SJason Gunthorpe * It is safe to call this routine concurrently from multiple threads while 1454d0899892SJason Gunthorpe * holding the 'get'. When the function returns the device is fully 1455d0899892SJason Gunthorpe * unregistered. 1456d0899892SJason Gunthorpe * 1457d0899892SJason Gunthorpe * Drivers using this flow MUST use the driver_unregister callback to clean up 1458d0899892SJason Gunthorpe * their resources associated with the device and dealloc it. 1459d0899892SJason Gunthorpe */ 1460d0899892SJason Gunthorpe void ib_unregister_device_and_put(struct ib_device *ib_dev) 1461d0899892SJason Gunthorpe { 1462d0899892SJason Gunthorpe WARN_ON(!ib_dev->ops.dealloc_driver); 1463d0899892SJason Gunthorpe get_device(&ib_dev->dev); 1464d0899892SJason Gunthorpe ib_device_put(ib_dev); 1465d0899892SJason Gunthorpe __ib_unregister_device(ib_dev); 1466d0899892SJason Gunthorpe put_device(&ib_dev->dev); 1467d0899892SJason Gunthorpe } 1468d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_device_and_put); 1469d0899892SJason Gunthorpe 1470d0899892SJason Gunthorpe /** 1471d0899892SJason Gunthorpe * ib_unregister_driver - Unregister all IB devices for a driver 1472d0899892SJason Gunthorpe * @driver_id: The driver to unregister 1473d0899892SJason Gunthorpe * 1474d0899892SJason Gunthorpe * This implements a fence for device unregistration. It only returns once all 1475d0899892SJason Gunthorpe * devices associated with the driver_id have fully completed their 1476d0899892SJason Gunthorpe * unregistration and returned from ib_unregister_device*(). 1477d0899892SJason Gunthorpe * 1478d0899892SJason Gunthorpe * If device's are not yet unregistered it goes ahead and starts unregistering 1479d0899892SJason Gunthorpe * them. 1480d0899892SJason Gunthorpe * 1481d0899892SJason Gunthorpe * This does not block creation of new devices with the given driver_id, that 1482d0899892SJason Gunthorpe * is the responsibility of the caller. 1483d0899892SJason Gunthorpe */ 1484d0899892SJason Gunthorpe void ib_unregister_driver(enum rdma_driver_id driver_id) 1485d0899892SJason Gunthorpe { 1486d0899892SJason Gunthorpe struct ib_device *ib_dev; 1487d0899892SJason Gunthorpe unsigned long index; 1488d0899892SJason Gunthorpe 1489d0899892SJason Gunthorpe down_read(&devices_rwsem); 1490d0899892SJason Gunthorpe xa_for_each (&devices, index, ib_dev) { 1491b9560a41SJason Gunthorpe if (ib_dev->ops.driver_id != driver_id) 1492d0899892SJason Gunthorpe continue; 1493d0899892SJason Gunthorpe 1494d0899892SJason Gunthorpe get_device(&ib_dev->dev); 1495d0899892SJason Gunthorpe up_read(&devices_rwsem); 1496d0899892SJason Gunthorpe 1497d0899892SJason Gunthorpe WARN_ON(!ib_dev->ops.dealloc_driver); 1498d0899892SJason Gunthorpe __ib_unregister_device(ib_dev); 1499d0899892SJason Gunthorpe 1500d0899892SJason Gunthorpe put_device(&ib_dev->dev); 1501d0899892SJason Gunthorpe down_read(&devices_rwsem); 1502d0899892SJason Gunthorpe } 1503d0899892SJason Gunthorpe up_read(&devices_rwsem); 1504d0899892SJason Gunthorpe } 1505d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_driver); 1506d0899892SJason Gunthorpe 1507d0899892SJason Gunthorpe static void ib_unregister_work(struct work_struct *work) 1508d0899892SJason Gunthorpe { 1509d0899892SJason Gunthorpe struct ib_device *ib_dev = 1510d0899892SJason Gunthorpe container_of(work, struct ib_device, unregistration_work); 1511d0899892SJason Gunthorpe 1512d0899892SJason Gunthorpe __ib_unregister_device(ib_dev); 1513d0899892SJason Gunthorpe put_device(&ib_dev->dev); 1514d0899892SJason Gunthorpe } 1515d0899892SJason Gunthorpe 1516d0899892SJason Gunthorpe /** 1517d0899892SJason Gunthorpe * ib_unregister_device_queued - Unregister a device using a work queue 1518d0899892SJason Gunthorpe * device: The device to unregister 1519d0899892SJason Gunthorpe * 1520d0899892SJason Gunthorpe * This schedules an asynchronous unregistration using a WQ for the device. A 1521d0899892SJason Gunthorpe * driver should use this to avoid holding locks while doing unregistration, 1522d0899892SJason Gunthorpe * such as holding the RTNL lock. 1523d0899892SJason Gunthorpe * 1524d0899892SJason Gunthorpe * Drivers using this API must use ib_unregister_driver before module unload 1525d0899892SJason Gunthorpe * to ensure that all scheduled unregistrations have completed. 1526d0899892SJason Gunthorpe */ 1527d0899892SJason Gunthorpe void ib_unregister_device_queued(struct ib_device *ib_dev) 1528d0899892SJason Gunthorpe { 1529d0899892SJason Gunthorpe WARN_ON(!refcount_read(&ib_dev->refcount)); 1530d0899892SJason Gunthorpe WARN_ON(!ib_dev->ops.dealloc_driver); 1531d0899892SJason Gunthorpe get_device(&ib_dev->dev); 1532d0899892SJason Gunthorpe if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work)) 1533d0899892SJason Gunthorpe put_device(&ib_dev->dev); 1534d0899892SJason Gunthorpe } 1535d0899892SJason Gunthorpe EXPORT_SYMBOL(ib_unregister_device_queued); 1536d0899892SJason Gunthorpe 1537decbc7a6SParav Pandit /* 1538decbc7a6SParav Pandit * The caller must pass in a device that has the kref held and the refcount 1539decbc7a6SParav Pandit * released. If the device is in cur_net and still registered then it is moved 1540decbc7a6SParav Pandit * into net. 1541decbc7a6SParav Pandit */ 1542decbc7a6SParav Pandit static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, 1543decbc7a6SParav Pandit struct net *net) 1544decbc7a6SParav Pandit { 1545decbc7a6SParav Pandit int ret2 = -EINVAL; 1546decbc7a6SParav Pandit int ret; 1547decbc7a6SParav Pandit 1548decbc7a6SParav Pandit mutex_lock(&device->unregistration_lock); 1549decbc7a6SParav Pandit 1550decbc7a6SParav Pandit /* 15512e5b8a01SParav Pandit * If a device not under ib_device_get() or if the unregistration_lock 15522e5b8a01SParav Pandit * is not held, the namespace can be changed, or it can be unregistered. 15532e5b8a01SParav Pandit * Check again under the lock. 1554decbc7a6SParav Pandit */ 1555decbc7a6SParav Pandit if (refcount_read(&device->refcount) == 0 || 1556decbc7a6SParav Pandit !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) { 1557decbc7a6SParav Pandit ret = -ENODEV; 1558decbc7a6SParav Pandit goto out; 1559decbc7a6SParav Pandit } 1560decbc7a6SParav Pandit 1561decbc7a6SParav Pandit kobject_uevent(&device->dev.kobj, KOBJ_REMOVE); 1562decbc7a6SParav Pandit disable_device(device); 1563decbc7a6SParav Pandit 1564decbc7a6SParav Pandit /* 1565decbc7a6SParav Pandit * At this point no one can be using the device, so it is safe to 1566decbc7a6SParav Pandit * change the namespace. 1567decbc7a6SParav Pandit */ 1568decbc7a6SParav Pandit write_pnet(&device->coredev.rdma_net, net); 1569decbc7a6SParav Pandit 15702e5b8a01SParav Pandit down_read(&devices_rwsem); 1571decbc7a6SParav Pandit /* 1572decbc7a6SParav Pandit * Currently rdma devices are system wide unique. So the device name 1573decbc7a6SParav Pandit * is guaranteed free in the new namespace. Publish the new namespace 1574decbc7a6SParav Pandit * at the sysfs level. 1575decbc7a6SParav Pandit */ 1576decbc7a6SParav Pandit ret = device_rename(&device->dev, dev_name(&device->dev)); 1577decbc7a6SParav Pandit up_read(&devices_rwsem); 1578decbc7a6SParav Pandit if (ret) { 1579decbc7a6SParav Pandit dev_warn(&device->dev, 1580decbc7a6SParav Pandit "%s: Couldn't rename device after namespace change\n", 1581decbc7a6SParav Pandit __func__); 1582decbc7a6SParav Pandit /* Try and put things back and re-enable the device */ 1583decbc7a6SParav Pandit write_pnet(&device->coredev.rdma_net, cur_net); 1584decbc7a6SParav Pandit } 1585decbc7a6SParav Pandit 1586decbc7a6SParav Pandit ret2 = enable_device_and_get(device); 15872e5b8a01SParav Pandit if (ret2) { 1588decbc7a6SParav Pandit /* 1589decbc7a6SParav Pandit * This shouldn't really happen, but if it does, let the user 1590decbc7a6SParav Pandit * retry at later point. So don't disable the device. 1591decbc7a6SParav Pandit */ 1592decbc7a6SParav Pandit dev_warn(&device->dev, 1593decbc7a6SParav Pandit "%s: Couldn't re-enable device after namespace change\n", 1594decbc7a6SParav Pandit __func__); 15952e5b8a01SParav Pandit } 1596decbc7a6SParav Pandit kobject_uevent(&device->dev.kobj, KOBJ_ADD); 15972e5b8a01SParav Pandit 1598decbc7a6SParav Pandit ib_device_put(device); 1599decbc7a6SParav Pandit out: 1600decbc7a6SParav Pandit mutex_unlock(&device->unregistration_lock); 1601decbc7a6SParav Pandit if (ret) 1602decbc7a6SParav Pandit return ret; 1603decbc7a6SParav Pandit return ret2; 1604decbc7a6SParav Pandit } 1605decbc7a6SParav Pandit 16062e5b8a01SParav Pandit int ib_device_set_netns_put(struct sk_buff *skb, 16072e5b8a01SParav Pandit struct ib_device *dev, u32 ns_fd) 16082e5b8a01SParav Pandit { 16092e5b8a01SParav Pandit struct net *net; 16102e5b8a01SParav Pandit int ret; 16112e5b8a01SParav Pandit 16122e5b8a01SParav Pandit net = get_net_ns_by_fd(ns_fd); 16132e5b8a01SParav Pandit if (IS_ERR(net)) { 16142e5b8a01SParav Pandit ret = PTR_ERR(net); 16152e5b8a01SParav Pandit goto net_err; 16162e5b8a01SParav Pandit } 16172e5b8a01SParav Pandit 16182e5b8a01SParav Pandit if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 16192e5b8a01SParav Pandit ret = -EPERM; 16202e5b8a01SParav Pandit goto ns_err; 16212e5b8a01SParav Pandit } 16222e5b8a01SParav Pandit 16232e5b8a01SParav Pandit /* 16242e5b8a01SParav Pandit * Currently supported only for those providers which support 16252e5b8a01SParav Pandit * disassociation and don't do port specific sysfs init. Once a 16262e5b8a01SParav Pandit * port_cleanup infrastructure is implemented, this limitation will be 16272e5b8a01SParav Pandit * removed. 16282e5b8a01SParav Pandit */ 16292e5b8a01SParav Pandit if (!dev->ops.disassociate_ucontext || dev->ops.init_port || 16302e5b8a01SParav Pandit ib_devices_shared_netns) { 16312e5b8a01SParav Pandit ret = -EOPNOTSUPP; 16322e5b8a01SParav Pandit goto ns_err; 16332e5b8a01SParav Pandit } 16342e5b8a01SParav Pandit 16352e5b8a01SParav Pandit get_device(&dev->dev); 16362e5b8a01SParav Pandit ib_device_put(dev); 16372e5b8a01SParav Pandit ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net); 16382e5b8a01SParav Pandit put_device(&dev->dev); 16392e5b8a01SParav Pandit 16402e5b8a01SParav Pandit put_net(net); 16412e5b8a01SParav Pandit return ret; 16422e5b8a01SParav Pandit 16432e5b8a01SParav Pandit ns_err: 16442e5b8a01SParav Pandit put_net(net); 16452e5b8a01SParav Pandit net_err: 16462e5b8a01SParav Pandit ib_device_put(dev); 16472e5b8a01SParav Pandit return ret; 16482e5b8a01SParav Pandit } 16492e5b8a01SParav Pandit 16504e0f7b90SParav Pandit static struct pernet_operations rdma_dev_net_ops = { 16514e0f7b90SParav Pandit .init = rdma_dev_init_net, 16524e0f7b90SParav Pandit .exit = rdma_dev_exit_net, 16534e0f7b90SParav Pandit .id = &rdma_dev_net_id, 16544e0f7b90SParav Pandit .size = sizeof(struct rdma_dev_net), 16554e0f7b90SParav Pandit }; 16564e0f7b90SParav Pandit 1657e59178d8SJason Gunthorpe static int assign_client_id(struct ib_client *client) 1658e59178d8SJason Gunthorpe { 1659e59178d8SJason Gunthorpe int ret; 1660e59178d8SJason Gunthorpe 1661921eab11SJason Gunthorpe down_write(&clients_rwsem); 1662e59178d8SJason Gunthorpe /* 1663e59178d8SJason Gunthorpe * The add/remove callbacks must be called in FIFO/LIFO order. To 1664e59178d8SJason Gunthorpe * achieve this we assign client_ids so they are sorted in 1665e59178d8SJason Gunthorpe * registration order, and retain a linked list we can reverse iterate 1666e59178d8SJason Gunthorpe * to get the LIFO order. The extra linked list can go away if xarray 1667e59178d8SJason Gunthorpe * learns to reverse iterate. 1668e59178d8SJason Gunthorpe */ 1669ea295481SLinus Torvalds if (list_empty(&client_list)) { 1670e59178d8SJason Gunthorpe client->client_id = 0; 1671ea295481SLinus Torvalds } else { 1672ea295481SLinus Torvalds struct ib_client *last; 1673ea295481SLinus Torvalds 1674ea295481SLinus Torvalds last = list_last_entry(&client_list, struct ib_client, list); 1675ea295481SLinus Torvalds client->client_id = last->client_id + 1; 1676ea295481SLinus Torvalds } 1677ea295481SLinus Torvalds ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); 1678e59178d8SJason Gunthorpe if (ret) 1679e59178d8SJason Gunthorpe goto out; 1680e59178d8SJason Gunthorpe 1681921eab11SJason Gunthorpe xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); 1682921eab11SJason Gunthorpe list_add_tail(&client->list, &client_list); 1683921eab11SJason Gunthorpe 1684e59178d8SJason Gunthorpe out: 1685921eab11SJason Gunthorpe up_write(&clients_rwsem); 1686e59178d8SJason Gunthorpe return ret; 1687e59178d8SJason Gunthorpe } 1688e59178d8SJason Gunthorpe 16891da177e4SLinus Torvalds /** 16901da177e4SLinus Torvalds * ib_register_client - Register an IB client 16911da177e4SLinus Torvalds * @client:Client to register 16921da177e4SLinus Torvalds * 16931da177e4SLinus Torvalds * Upper level users of the IB drivers can use ib_register_client() to 16941da177e4SLinus Torvalds * register callbacks for IB device addition and removal. When an IB 16951da177e4SLinus Torvalds * device is added, each registered client's add method will be called 16961da177e4SLinus Torvalds * (in the order the clients were registered), and when a device is 16971da177e4SLinus Torvalds * removed, each client's remove method will be called (in the reverse 16981da177e4SLinus Torvalds * order that clients were registered). In addition, when 16991da177e4SLinus Torvalds * ib_register_client() is called, the client will receive an add 17001da177e4SLinus Torvalds * callback for all devices already registered. 17011da177e4SLinus Torvalds */ 17021da177e4SLinus Torvalds int ib_register_client(struct ib_client *client) 17031da177e4SLinus Torvalds { 17041da177e4SLinus Torvalds struct ib_device *device; 17050df91bb6SJason Gunthorpe unsigned long index; 1706e59178d8SJason Gunthorpe int ret; 17071da177e4SLinus Torvalds 1708e59178d8SJason Gunthorpe ret = assign_client_id(client); 1709921eab11SJason Gunthorpe if (ret) 1710921eab11SJason Gunthorpe return ret; 1711921eab11SJason Gunthorpe 1712921eab11SJason Gunthorpe down_read(&devices_rwsem); 1713921eab11SJason Gunthorpe xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { 1714921eab11SJason Gunthorpe ret = add_client_context(device, client); 1715e59178d8SJason Gunthorpe if (ret) { 1716921eab11SJason Gunthorpe up_read(&devices_rwsem); 1717921eab11SJason Gunthorpe ib_unregister_client(client); 1718e59178d8SJason Gunthorpe return ret; 1719e59178d8SJason Gunthorpe } 1720921eab11SJason Gunthorpe } 1721921eab11SJason Gunthorpe up_read(&devices_rwsem); 17221da177e4SLinus Torvalds return 0; 17231da177e4SLinus Torvalds } 17241da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_client); 17251da177e4SLinus Torvalds 17261da177e4SLinus Torvalds /** 17271da177e4SLinus Torvalds * ib_unregister_client - Unregister an IB client 17281da177e4SLinus Torvalds * @client:Client to unregister 17291da177e4SLinus Torvalds * 17301da177e4SLinus Torvalds * Upper level users use ib_unregister_client() to remove their client 17311da177e4SLinus Torvalds * registration. When ib_unregister_client() is called, the client 17321da177e4SLinus Torvalds * will receive a remove callback for each IB device still registered. 1733921eab11SJason Gunthorpe * 1734921eab11SJason Gunthorpe * This is a full fence, once it returns no client callbacks will be called, 1735921eab11SJason Gunthorpe * or are running in another thread. 17361da177e4SLinus Torvalds */ 17371da177e4SLinus Torvalds void ib_unregister_client(struct ib_client *client) 17381da177e4SLinus Torvalds { 17391da177e4SLinus Torvalds struct ib_device *device; 17400df91bb6SJason Gunthorpe unsigned long index; 17411da177e4SLinus Torvalds 1742921eab11SJason Gunthorpe down_write(&clients_rwsem); 1743e59178d8SJason Gunthorpe xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); 1744921eab11SJason Gunthorpe up_write(&clients_rwsem); 1745921eab11SJason Gunthorpe /* 1746921eab11SJason Gunthorpe * Every device still known must be serialized to make sure we are 1747921eab11SJason Gunthorpe * done with the client callbacks before we return. 1748921eab11SJason Gunthorpe */ 1749921eab11SJason Gunthorpe down_read(&devices_rwsem); 1750921eab11SJason Gunthorpe xa_for_each (&devices, index, device) 1751921eab11SJason Gunthorpe remove_client_context(device, client->client_id); 1752921eab11SJason Gunthorpe up_read(&devices_rwsem); 17535aa44bb9SHaggai Eran 1754921eab11SJason Gunthorpe down_write(&clients_rwsem); 1755e59178d8SJason Gunthorpe list_del(&client->list); 1756e59178d8SJason Gunthorpe xa_erase(&clients, client->client_id); 1757921eab11SJason Gunthorpe up_write(&clients_rwsem); 17581da177e4SLinus Torvalds } 17591da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_client); 17601da177e4SLinus Torvalds 17610e2d00ebSJason Gunthorpe static int __ib_get_global_client_nl_info(const char *client_name, 17620e2d00ebSJason Gunthorpe struct ib_client_nl_info *res) 17630e2d00ebSJason Gunthorpe { 17640e2d00ebSJason Gunthorpe struct ib_client *client; 17650e2d00ebSJason Gunthorpe unsigned long index; 17660e2d00ebSJason Gunthorpe int ret = -ENOENT; 17670e2d00ebSJason Gunthorpe 17680e2d00ebSJason Gunthorpe down_read(&clients_rwsem); 17690e2d00ebSJason Gunthorpe xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { 17700e2d00ebSJason Gunthorpe if (strcmp(client->name, client_name) != 0) 17710e2d00ebSJason Gunthorpe continue; 17720e2d00ebSJason Gunthorpe if (!client->get_global_nl_info) { 17730e2d00ebSJason Gunthorpe ret = -EOPNOTSUPP; 17740e2d00ebSJason Gunthorpe break; 17750e2d00ebSJason Gunthorpe } 17760e2d00ebSJason Gunthorpe ret = client->get_global_nl_info(res); 17770e2d00ebSJason Gunthorpe if (WARN_ON(ret == -ENOENT)) 17780e2d00ebSJason Gunthorpe ret = -EINVAL; 17790e2d00ebSJason Gunthorpe if (!ret && res->cdev) 17800e2d00ebSJason Gunthorpe get_device(res->cdev); 17810e2d00ebSJason Gunthorpe break; 17820e2d00ebSJason Gunthorpe } 17830e2d00ebSJason Gunthorpe up_read(&clients_rwsem); 17840e2d00ebSJason Gunthorpe return ret; 17850e2d00ebSJason Gunthorpe } 17860e2d00ebSJason Gunthorpe 17870e2d00ebSJason Gunthorpe static int __ib_get_client_nl_info(struct ib_device *ibdev, 17880e2d00ebSJason Gunthorpe const char *client_name, 17890e2d00ebSJason Gunthorpe struct ib_client_nl_info *res) 17900e2d00ebSJason Gunthorpe { 17910e2d00ebSJason Gunthorpe unsigned long index; 17920e2d00ebSJason Gunthorpe void *client_data; 17930e2d00ebSJason Gunthorpe int ret = -ENOENT; 17940e2d00ebSJason Gunthorpe 17950e2d00ebSJason Gunthorpe down_read(&ibdev->client_data_rwsem); 17960e2d00ebSJason Gunthorpe xan_for_each_marked (&ibdev->client_data, index, client_data, 17970e2d00ebSJason Gunthorpe CLIENT_DATA_REGISTERED) { 17980e2d00ebSJason Gunthorpe struct ib_client *client = xa_load(&clients, index); 17990e2d00ebSJason Gunthorpe 18000e2d00ebSJason Gunthorpe if (!client || strcmp(client->name, client_name) != 0) 18010e2d00ebSJason Gunthorpe continue; 18020e2d00ebSJason Gunthorpe if (!client->get_nl_info) { 18030e2d00ebSJason Gunthorpe ret = -EOPNOTSUPP; 18040e2d00ebSJason Gunthorpe break; 18050e2d00ebSJason Gunthorpe } 18060e2d00ebSJason Gunthorpe ret = client->get_nl_info(ibdev, client_data, res); 18070e2d00ebSJason Gunthorpe if (WARN_ON(ret == -ENOENT)) 18080e2d00ebSJason Gunthorpe ret = -EINVAL; 18090e2d00ebSJason Gunthorpe 18100e2d00ebSJason Gunthorpe /* 18110e2d00ebSJason Gunthorpe * The cdev is guaranteed valid as long as we are inside the 18120e2d00ebSJason Gunthorpe * client_data_rwsem as remove_one can't be called. Keep it 18130e2d00ebSJason Gunthorpe * valid for the caller. 18140e2d00ebSJason Gunthorpe */ 18150e2d00ebSJason Gunthorpe if (!ret && res->cdev) 18160e2d00ebSJason Gunthorpe get_device(res->cdev); 18170e2d00ebSJason Gunthorpe break; 18180e2d00ebSJason Gunthorpe } 18190e2d00ebSJason Gunthorpe up_read(&ibdev->client_data_rwsem); 18200e2d00ebSJason Gunthorpe 18210e2d00ebSJason Gunthorpe return ret; 18220e2d00ebSJason Gunthorpe } 18230e2d00ebSJason Gunthorpe 18240e2d00ebSJason Gunthorpe /** 18250e2d00ebSJason Gunthorpe * ib_get_client_nl_info - Fetch the nl_info from a client 18260e2d00ebSJason Gunthorpe * @device - IB device 18270e2d00ebSJason Gunthorpe * @client_name - Name of the client 18280e2d00ebSJason Gunthorpe * @res - Result of the query 18290e2d00ebSJason Gunthorpe */ 18300e2d00ebSJason Gunthorpe int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, 18310e2d00ebSJason Gunthorpe struct ib_client_nl_info *res) 18320e2d00ebSJason Gunthorpe { 18330e2d00ebSJason Gunthorpe int ret; 18340e2d00ebSJason Gunthorpe 18350e2d00ebSJason Gunthorpe if (ibdev) 18360e2d00ebSJason Gunthorpe ret = __ib_get_client_nl_info(ibdev, client_name, res); 18370e2d00ebSJason Gunthorpe else 18380e2d00ebSJason Gunthorpe ret = __ib_get_global_client_nl_info(client_name, res); 18390e2d00ebSJason Gunthorpe #ifdef CONFIG_MODULES 18400e2d00ebSJason Gunthorpe if (ret == -ENOENT) { 18410e2d00ebSJason Gunthorpe request_module("rdma-client-%s", client_name); 18420e2d00ebSJason Gunthorpe if (ibdev) 18430e2d00ebSJason Gunthorpe ret = __ib_get_client_nl_info(ibdev, client_name, res); 18440e2d00ebSJason Gunthorpe else 18450e2d00ebSJason Gunthorpe ret = __ib_get_global_client_nl_info(client_name, res); 18460e2d00ebSJason Gunthorpe } 18470e2d00ebSJason Gunthorpe #endif 18480e2d00ebSJason Gunthorpe if (ret) { 18490e2d00ebSJason Gunthorpe if (ret == -ENOENT) 18500e2d00ebSJason Gunthorpe return -EOPNOTSUPP; 18510e2d00ebSJason Gunthorpe return ret; 18520e2d00ebSJason Gunthorpe } 18530e2d00ebSJason Gunthorpe 18540e2d00ebSJason Gunthorpe if (WARN_ON(!res->cdev)) 18550e2d00ebSJason Gunthorpe return -EINVAL; 18560e2d00ebSJason Gunthorpe return 0; 18570e2d00ebSJason Gunthorpe } 18580e2d00ebSJason Gunthorpe 18591da177e4SLinus Torvalds /** 18609cd330d3SKrishna Kumar * ib_set_client_data - Set IB client context 18611da177e4SLinus Torvalds * @device:Device to set context for 18621da177e4SLinus Torvalds * @client:Client to set context for 18631da177e4SLinus Torvalds * @data:Context to set 18641da177e4SLinus Torvalds * 18650df91bb6SJason Gunthorpe * ib_set_client_data() sets client context data that can be retrieved with 18660df91bb6SJason Gunthorpe * ib_get_client_data(). This can only be called while the client is 18670df91bb6SJason Gunthorpe * registered to the device, once the ib_client remove() callback returns this 18680df91bb6SJason Gunthorpe * cannot be called. 18691da177e4SLinus Torvalds */ 18701da177e4SLinus Torvalds void ib_set_client_data(struct ib_device *device, struct ib_client *client, 18711da177e4SLinus Torvalds void *data) 18721da177e4SLinus Torvalds { 18730df91bb6SJason Gunthorpe void *rc; 18741da177e4SLinus Torvalds 18750df91bb6SJason Gunthorpe if (WARN_ON(IS_ERR(data))) 18760df91bb6SJason Gunthorpe data = NULL; 18771da177e4SLinus Torvalds 18780df91bb6SJason Gunthorpe rc = xa_store(&device->client_data, client->client_id, data, 18790df91bb6SJason Gunthorpe GFP_KERNEL); 18800df91bb6SJason Gunthorpe WARN_ON(xa_is_err(rc)); 18811da177e4SLinus Torvalds } 18821da177e4SLinus Torvalds EXPORT_SYMBOL(ib_set_client_data); 18831da177e4SLinus Torvalds 18841da177e4SLinus Torvalds /** 18851da177e4SLinus Torvalds * ib_register_event_handler - Register an IB event handler 18861da177e4SLinus Torvalds * @event_handler:Handler to register 18871da177e4SLinus Torvalds * 18881da177e4SLinus Torvalds * ib_register_event_handler() registers an event handler that will be 18891da177e4SLinus Torvalds * called back when asynchronous IB events occur (as defined in 18901da177e4SLinus Torvalds * chapter 11 of the InfiniBand Architecture Specification). This 18911da177e4SLinus Torvalds * callback may occur in interrupt context. 18921da177e4SLinus Torvalds */ 1893dcc9881eSLeon Romanovsky void ib_register_event_handler(struct ib_event_handler *event_handler) 18941da177e4SLinus Torvalds { 18951da177e4SLinus Torvalds unsigned long flags; 18961da177e4SLinus Torvalds 18971da177e4SLinus Torvalds spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 18981da177e4SLinus Torvalds list_add_tail(&event_handler->list, 18991da177e4SLinus Torvalds &event_handler->device->event_handler_list); 19001da177e4SLinus Torvalds spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 19011da177e4SLinus Torvalds } 19021da177e4SLinus Torvalds EXPORT_SYMBOL(ib_register_event_handler); 19031da177e4SLinus Torvalds 19041da177e4SLinus Torvalds /** 19051da177e4SLinus Torvalds * ib_unregister_event_handler - Unregister an event handler 19061da177e4SLinus Torvalds * @event_handler:Handler to unregister 19071da177e4SLinus Torvalds * 19081da177e4SLinus Torvalds * Unregister an event handler registered with 19091da177e4SLinus Torvalds * ib_register_event_handler(). 19101da177e4SLinus Torvalds */ 1911dcc9881eSLeon Romanovsky void ib_unregister_event_handler(struct ib_event_handler *event_handler) 19121da177e4SLinus Torvalds { 19131da177e4SLinus Torvalds unsigned long flags; 19141da177e4SLinus Torvalds 19151da177e4SLinus Torvalds spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 19161da177e4SLinus Torvalds list_del(&event_handler->list); 19171da177e4SLinus Torvalds spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 19181da177e4SLinus Torvalds } 19191da177e4SLinus Torvalds EXPORT_SYMBOL(ib_unregister_event_handler); 19201da177e4SLinus Torvalds 19211da177e4SLinus Torvalds /** 19221da177e4SLinus Torvalds * ib_dispatch_event - Dispatch an asynchronous event 19231da177e4SLinus Torvalds * @event:Event to dispatch 19241da177e4SLinus Torvalds * 19251da177e4SLinus Torvalds * Low-level drivers must call ib_dispatch_event() to dispatch the 19261da177e4SLinus Torvalds * event to all registered event handlers when an asynchronous event 19271da177e4SLinus Torvalds * occurs. 19281da177e4SLinus Torvalds */ 19291da177e4SLinus Torvalds void ib_dispatch_event(struct ib_event *event) 19301da177e4SLinus Torvalds { 19311da177e4SLinus Torvalds unsigned long flags; 19321da177e4SLinus Torvalds struct ib_event_handler *handler; 19331da177e4SLinus Torvalds 19341da177e4SLinus Torvalds spin_lock_irqsave(&event->device->event_handler_lock, flags); 19351da177e4SLinus Torvalds 19361da177e4SLinus Torvalds list_for_each_entry(handler, &event->device->event_handler_list, list) 19371da177e4SLinus Torvalds handler->handler(handler, event); 19381da177e4SLinus Torvalds 19391da177e4SLinus Torvalds spin_unlock_irqrestore(&event->device->event_handler_lock, flags); 19401da177e4SLinus Torvalds } 19411da177e4SLinus Torvalds EXPORT_SYMBOL(ib_dispatch_event); 19421da177e4SLinus Torvalds 19431da177e4SLinus Torvalds /** 19441da177e4SLinus Torvalds * ib_query_port - Query IB port attributes 19451da177e4SLinus Torvalds * @device:Device to query 19461da177e4SLinus Torvalds * @port_num:Port number to query 19471da177e4SLinus Torvalds * @port_attr:Port attributes 19481da177e4SLinus Torvalds * 19491da177e4SLinus Torvalds * ib_query_port() returns the attributes of a port through the 19501da177e4SLinus Torvalds * @port_attr pointer. 19511da177e4SLinus Torvalds */ 19521da177e4SLinus Torvalds int ib_query_port(struct ib_device *device, 19531da177e4SLinus Torvalds u8 port_num, 19541da177e4SLinus Torvalds struct ib_port_attr *port_attr) 19551da177e4SLinus Torvalds { 1956fad61ad4SEli Cohen union ib_gid gid; 1957fad61ad4SEli Cohen int err; 1958fad61ad4SEli Cohen 195924dc831bSYuval Shaia if (!rdma_is_port_valid(device, port_num)) 1960116c0074SRoland Dreier return -EINVAL; 1961116c0074SRoland Dreier 1962fad61ad4SEli Cohen memset(port_attr, 0, sizeof(*port_attr)); 19633023a1e9SKamal Heib err = device->ops.query_port(device, port_num, port_attr); 1964fad61ad4SEli Cohen if (err || port_attr->subnet_prefix) 1965fad61ad4SEli Cohen return err; 1966fad61ad4SEli Cohen 1967d7012467SEli Cohen if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) 1968d7012467SEli Cohen return 0; 1969d7012467SEli Cohen 19703023a1e9SKamal Heib err = device->ops.query_gid(device, port_num, 0, &gid); 1971fad61ad4SEli Cohen if (err) 1972fad61ad4SEli Cohen return err; 1973fad61ad4SEli Cohen 1974fad61ad4SEli Cohen port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); 1975fad61ad4SEli Cohen return 0; 19761da177e4SLinus Torvalds } 19771da177e4SLinus Torvalds EXPORT_SYMBOL(ib_query_port); 19781da177e4SLinus Torvalds 1979324e227eSJason Gunthorpe static void add_ndev_hash(struct ib_port_data *pdata) 1980324e227eSJason Gunthorpe { 1981324e227eSJason Gunthorpe unsigned long flags; 1982324e227eSJason Gunthorpe 1983324e227eSJason Gunthorpe might_sleep(); 1984324e227eSJason Gunthorpe 1985324e227eSJason Gunthorpe spin_lock_irqsave(&ndev_hash_lock, flags); 1986324e227eSJason Gunthorpe if (hash_hashed(&pdata->ndev_hash_link)) { 1987324e227eSJason Gunthorpe hash_del_rcu(&pdata->ndev_hash_link); 1988324e227eSJason Gunthorpe spin_unlock_irqrestore(&ndev_hash_lock, flags); 1989324e227eSJason Gunthorpe /* 1990324e227eSJason Gunthorpe * We cannot do hash_add_rcu after a hash_del_rcu until the 1991324e227eSJason Gunthorpe * grace period 1992324e227eSJason Gunthorpe */ 1993324e227eSJason Gunthorpe synchronize_rcu(); 1994324e227eSJason Gunthorpe spin_lock_irqsave(&ndev_hash_lock, flags); 1995324e227eSJason Gunthorpe } 1996324e227eSJason Gunthorpe if (pdata->netdev) 1997324e227eSJason Gunthorpe hash_add_rcu(ndev_hash, &pdata->ndev_hash_link, 1998324e227eSJason Gunthorpe (uintptr_t)pdata->netdev); 1999324e227eSJason Gunthorpe spin_unlock_irqrestore(&ndev_hash_lock, flags); 2000324e227eSJason Gunthorpe } 2001324e227eSJason Gunthorpe 20021da177e4SLinus Torvalds /** 2003c2261dd7SJason Gunthorpe * ib_device_set_netdev - Associate the ib_dev with an underlying net_device 2004c2261dd7SJason Gunthorpe * @ib_dev: Device to modify 2005c2261dd7SJason Gunthorpe * @ndev: net_device to affiliate, may be NULL 2006c2261dd7SJason Gunthorpe * @port: IB port the net_device is connected to 2007c2261dd7SJason Gunthorpe * 2008c2261dd7SJason Gunthorpe * Drivers should use this to link the ib_device to a netdev so the netdev 2009c2261dd7SJason Gunthorpe * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be 2010c2261dd7SJason Gunthorpe * affiliated with any port. 2011c2261dd7SJason Gunthorpe * 2012c2261dd7SJason Gunthorpe * The caller must ensure that the given ndev is not unregistered or 2013c2261dd7SJason Gunthorpe * unregistering, and that either the ib_device is unregistered or 2014c2261dd7SJason Gunthorpe * ib_device_set_netdev() is called with NULL when the ndev sends a 2015c2261dd7SJason Gunthorpe * NETDEV_UNREGISTER event. 2016c2261dd7SJason Gunthorpe */ 2017c2261dd7SJason Gunthorpe int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 2018c2261dd7SJason Gunthorpe unsigned int port) 2019c2261dd7SJason Gunthorpe { 2020c2261dd7SJason Gunthorpe struct net_device *old_ndev; 2021c2261dd7SJason Gunthorpe struct ib_port_data *pdata; 2022c2261dd7SJason Gunthorpe unsigned long flags; 2023c2261dd7SJason Gunthorpe int ret; 2024c2261dd7SJason Gunthorpe 2025c2261dd7SJason Gunthorpe /* 2026c2261dd7SJason Gunthorpe * Drivers wish to call this before ib_register_driver, so we have to 2027c2261dd7SJason Gunthorpe * setup the port data early. 2028c2261dd7SJason Gunthorpe */ 2029c2261dd7SJason Gunthorpe ret = alloc_port_data(ib_dev); 2030c2261dd7SJason Gunthorpe if (ret) 2031c2261dd7SJason Gunthorpe return ret; 2032c2261dd7SJason Gunthorpe 2033c2261dd7SJason Gunthorpe if (!rdma_is_port_valid(ib_dev, port)) 2034c2261dd7SJason Gunthorpe return -EINVAL; 2035c2261dd7SJason Gunthorpe 2036c2261dd7SJason Gunthorpe pdata = &ib_dev->port_data[port]; 2037c2261dd7SJason Gunthorpe spin_lock_irqsave(&pdata->netdev_lock, flags); 2038324e227eSJason Gunthorpe old_ndev = rcu_dereference_protected( 2039324e227eSJason Gunthorpe pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 2040324e227eSJason Gunthorpe if (old_ndev == ndev) { 2041c2261dd7SJason Gunthorpe spin_unlock_irqrestore(&pdata->netdev_lock, flags); 2042c2261dd7SJason Gunthorpe return 0; 2043c2261dd7SJason Gunthorpe } 2044c2261dd7SJason Gunthorpe 2045c2261dd7SJason Gunthorpe if (ndev) 2046c2261dd7SJason Gunthorpe dev_hold(ndev); 2047324e227eSJason Gunthorpe rcu_assign_pointer(pdata->netdev, ndev); 2048c2261dd7SJason Gunthorpe spin_unlock_irqrestore(&pdata->netdev_lock, flags); 2049c2261dd7SJason Gunthorpe 2050324e227eSJason Gunthorpe add_ndev_hash(pdata); 2051c2261dd7SJason Gunthorpe if (old_ndev) 2052c2261dd7SJason Gunthorpe dev_put(old_ndev); 2053c2261dd7SJason Gunthorpe 2054c2261dd7SJason Gunthorpe return 0; 2055c2261dd7SJason Gunthorpe } 2056c2261dd7SJason Gunthorpe EXPORT_SYMBOL(ib_device_set_netdev); 2057c2261dd7SJason Gunthorpe 2058c2261dd7SJason Gunthorpe static void free_netdevs(struct ib_device *ib_dev) 2059c2261dd7SJason Gunthorpe { 2060c2261dd7SJason Gunthorpe unsigned long flags; 2061c2261dd7SJason Gunthorpe unsigned int port; 2062c2261dd7SJason Gunthorpe 206346bdf370SKamal Heib if (!ib_dev->port_data) 206446bdf370SKamal Heib return; 206546bdf370SKamal Heib 2066c2261dd7SJason Gunthorpe rdma_for_each_port (ib_dev, port) { 2067c2261dd7SJason Gunthorpe struct ib_port_data *pdata = &ib_dev->port_data[port]; 2068324e227eSJason Gunthorpe struct net_device *ndev; 2069c2261dd7SJason Gunthorpe 2070c2261dd7SJason Gunthorpe spin_lock_irqsave(&pdata->netdev_lock, flags); 2071324e227eSJason Gunthorpe ndev = rcu_dereference_protected( 2072324e227eSJason Gunthorpe pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 2073324e227eSJason Gunthorpe if (ndev) { 2074324e227eSJason Gunthorpe spin_lock(&ndev_hash_lock); 2075324e227eSJason Gunthorpe hash_del_rcu(&pdata->ndev_hash_link); 2076324e227eSJason Gunthorpe spin_unlock(&ndev_hash_lock); 2077324e227eSJason Gunthorpe 2078324e227eSJason Gunthorpe /* 2079324e227eSJason Gunthorpe * If this is the last dev_put there is still a 2080324e227eSJason Gunthorpe * synchronize_rcu before the netdev is kfreed, so we 2081324e227eSJason Gunthorpe * can continue to rely on unlocked pointer 2082324e227eSJason Gunthorpe * comparisons after the put 2083324e227eSJason Gunthorpe */ 2084324e227eSJason Gunthorpe rcu_assign_pointer(pdata->netdev, NULL); 2085324e227eSJason Gunthorpe dev_put(ndev); 2086c2261dd7SJason Gunthorpe } 2087c2261dd7SJason Gunthorpe spin_unlock_irqrestore(&pdata->netdev_lock, flags); 2088c2261dd7SJason Gunthorpe } 2089c2261dd7SJason Gunthorpe } 2090c2261dd7SJason Gunthorpe 2091c2261dd7SJason Gunthorpe struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 2092c2261dd7SJason Gunthorpe unsigned int port) 2093c2261dd7SJason Gunthorpe { 2094c2261dd7SJason Gunthorpe struct ib_port_data *pdata; 2095c2261dd7SJason Gunthorpe struct net_device *res; 2096c2261dd7SJason Gunthorpe 2097c2261dd7SJason Gunthorpe if (!rdma_is_port_valid(ib_dev, port)) 2098c2261dd7SJason Gunthorpe return NULL; 2099c2261dd7SJason Gunthorpe 2100c2261dd7SJason Gunthorpe pdata = &ib_dev->port_data[port]; 2101c2261dd7SJason Gunthorpe 2102c2261dd7SJason Gunthorpe /* 2103c2261dd7SJason Gunthorpe * New drivers should use ib_device_set_netdev() not the legacy 2104c2261dd7SJason Gunthorpe * get_netdev(). 2105c2261dd7SJason Gunthorpe */ 2106c2261dd7SJason Gunthorpe if (ib_dev->ops.get_netdev) 2107c2261dd7SJason Gunthorpe res = ib_dev->ops.get_netdev(ib_dev, port); 2108c2261dd7SJason Gunthorpe else { 2109c2261dd7SJason Gunthorpe spin_lock(&pdata->netdev_lock); 2110324e227eSJason Gunthorpe res = rcu_dereference_protected( 2111324e227eSJason Gunthorpe pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 2112c2261dd7SJason Gunthorpe if (res) 2113c2261dd7SJason Gunthorpe dev_hold(res); 2114c2261dd7SJason Gunthorpe spin_unlock(&pdata->netdev_lock); 2115c2261dd7SJason Gunthorpe } 2116c2261dd7SJason Gunthorpe 2117c2261dd7SJason Gunthorpe /* 2118c2261dd7SJason Gunthorpe * If we are starting to unregister expedite things by preventing 2119c2261dd7SJason Gunthorpe * propagation of an unregistering netdev. 2120c2261dd7SJason Gunthorpe */ 2121c2261dd7SJason Gunthorpe if (res && res->reg_state != NETREG_REGISTERED) { 2122c2261dd7SJason Gunthorpe dev_put(res); 2123c2261dd7SJason Gunthorpe return NULL; 2124c2261dd7SJason Gunthorpe } 2125c2261dd7SJason Gunthorpe 2126c2261dd7SJason Gunthorpe return res; 2127c2261dd7SJason Gunthorpe } 2128c2261dd7SJason Gunthorpe 2129c2261dd7SJason Gunthorpe /** 2130324e227eSJason Gunthorpe * ib_device_get_by_netdev - Find an IB device associated with a netdev 2131324e227eSJason Gunthorpe * @ndev: netdev to locate 2132324e227eSJason Gunthorpe * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) 2133324e227eSJason Gunthorpe * 2134324e227eSJason Gunthorpe * Find and hold an ib_device that is associated with a netdev via 2135324e227eSJason Gunthorpe * ib_device_set_netdev(). The caller must call ib_device_put() on the 2136324e227eSJason Gunthorpe * returned pointer. 2137324e227eSJason Gunthorpe */ 2138324e227eSJason Gunthorpe struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, 2139324e227eSJason Gunthorpe enum rdma_driver_id driver_id) 2140324e227eSJason Gunthorpe { 2141324e227eSJason Gunthorpe struct ib_device *res = NULL; 2142324e227eSJason Gunthorpe struct ib_port_data *cur; 2143324e227eSJason Gunthorpe 2144324e227eSJason Gunthorpe rcu_read_lock(); 2145324e227eSJason Gunthorpe hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link, 2146324e227eSJason Gunthorpe (uintptr_t)ndev) { 2147324e227eSJason Gunthorpe if (rcu_access_pointer(cur->netdev) == ndev && 2148324e227eSJason Gunthorpe (driver_id == RDMA_DRIVER_UNKNOWN || 2149b9560a41SJason Gunthorpe cur->ib_dev->ops.driver_id == driver_id) && 2150324e227eSJason Gunthorpe ib_device_try_get(cur->ib_dev)) { 2151324e227eSJason Gunthorpe res = cur->ib_dev; 2152324e227eSJason Gunthorpe break; 2153324e227eSJason Gunthorpe } 2154324e227eSJason Gunthorpe } 2155324e227eSJason Gunthorpe rcu_read_unlock(); 2156324e227eSJason Gunthorpe 2157324e227eSJason Gunthorpe return res; 2158324e227eSJason Gunthorpe } 2159324e227eSJason Gunthorpe EXPORT_SYMBOL(ib_device_get_by_netdev); 2160324e227eSJason Gunthorpe 2161324e227eSJason Gunthorpe /** 216203db3a2dSMatan Barak * ib_enum_roce_netdev - enumerate all RoCE ports 216303db3a2dSMatan Barak * @ib_dev : IB device we want to query 216403db3a2dSMatan Barak * @filter: Should we call the callback? 216503db3a2dSMatan Barak * @filter_cookie: Cookie passed to filter 216603db3a2dSMatan Barak * @cb: Callback to call for each found RoCE ports 216703db3a2dSMatan Barak * @cookie: Cookie passed back to the callback 216803db3a2dSMatan Barak * 216903db3a2dSMatan Barak * Enumerates all of the physical RoCE ports of ib_dev 217003db3a2dSMatan Barak * which are related to netdevice and calls callback() on each 217103db3a2dSMatan Barak * device for which filter() function returns non zero. 217203db3a2dSMatan Barak */ 217303db3a2dSMatan Barak void ib_enum_roce_netdev(struct ib_device *ib_dev, 217403db3a2dSMatan Barak roce_netdev_filter filter, 217503db3a2dSMatan Barak void *filter_cookie, 217603db3a2dSMatan Barak roce_netdev_callback cb, 217703db3a2dSMatan Barak void *cookie) 217803db3a2dSMatan Barak { 2179ea1075edSJason Gunthorpe unsigned int port; 218003db3a2dSMatan Barak 2181ea1075edSJason Gunthorpe rdma_for_each_port (ib_dev, port) 218203db3a2dSMatan Barak if (rdma_protocol_roce(ib_dev, port)) { 2183c2261dd7SJason Gunthorpe struct net_device *idev = 2184c2261dd7SJason Gunthorpe ib_device_get_netdev(ib_dev, port); 218503db3a2dSMatan Barak 218603db3a2dSMatan Barak if (filter(ib_dev, port, idev, filter_cookie)) 218703db3a2dSMatan Barak cb(ib_dev, port, idev, cookie); 218803db3a2dSMatan Barak 218903db3a2dSMatan Barak if (idev) 219003db3a2dSMatan Barak dev_put(idev); 219103db3a2dSMatan Barak } 219203db3a2dSMatan Barak } 219303db3a2dSMatan Barak 219403db3a2dSMatan Barak /** 219503db3a2dSMatan Barak * ib_enum_all_roce_netdevs - enumerate all RoCE devices 219603db3a2dSMatan Barak * @filter: Should we call the callback? 219703db3a2dSMatan Barak * @filter_cookie: Cookie passed to filter 219803db3a2dSMatan Barak * @cb: Callback to call for each found RoCE ports 219903db3a2dSMatan Barak * @cookie: Cookie passed back to the callback 220003db3a2dSMatan Barak * 220103db3a2dSMatan Barak * Enumerates all RoCE devices' physical ports which are related 220203db3a2dSMatan Barak * to netdevices and calls callback() on each device for which 220303db3a2dSMatan Barak * filter() function returns non zero. 220403db3a2dSMatan Barak */ 220503db3a2dSMatan Barak void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 220603db3a2dSMatan Barak void *filter_cookie, 220703db3a2dSMatan Barak roce_netdev_callback cb, 220803db3a2dSMatan Barak void *cookie) 220903db3a2dSMatan Barak { 221003db3a2dSMatan Barak struct ib_device *dev; 22110df91bb6SJason Gunthorpe unsigned long index; 221203db3a2dSMatan Barak 2213921eab11SJason Gunthorpe down_read(&devices_rwsem); 22140df91bb6SJason Gunthorpe xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) 221503db3a2dSMatan Barak ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); 2216921eab11SJason Gunthorpe up_read(&devices_rwsem); 221703db3a2dSMatan Barak } 221803db3a2dSMatan Barak 221903db3a2dSMatan Barak /** 22208030c835SLeon Romanovsky * ib_enum_all_devs - enumerate all ib_devices 22218030c835SLeon Romanovsky * @cb: Callback to call for each found ib_device 22228030c835SLeon Romanovsky * 22238030c835SLeon Romanovsky * Enumerates all ib_devices and calls callback() on each device. 22248030c835SLeon Romanovsky */ 22258030c835SLeon Romanovsky int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, 22268030c835SLeon Romanovsky struct netlink_callback *cb) 22278030c835SLeon Romanovsky { 22280df91bb6SJason Gunthorpe unsigned long index; 22298030c835SLeon Romanovsky struct ib_device *dev; 22308030c835SLeon Romanovsky unsigned int idx = 0; 22318030c835SLeon Romanovsky int ret = 0; 22328030c835SLeon Romanovsky 2233921eab11SJason Gunthorpe down_read(&devices_rwsem); 22340df91bb6SJason Gunthorpe xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 223537eeab55SParav Pandit if (!rdma_dev_access_netns(dev, sock_net(skb->sk))) 223637eeab55SParav Pandit continue; 223737eeab55SParav Pandit 22388030c835SLeon Romanovsky ret = nldev_cb(dev, skb, cb, idx); 22398030c835SLeon Romanovsky if (ret) 22408030c835SLeon Romanovsky break; 22418030c835SLeon Romanovsky idx++; 22428030c835SLeon Romanovsky } 2243921eab11SJason Gunthorpe up_read(&devices_rwsem); 22448030c835SLeon Romanovsky return ret; 22458030c835SLeon Romanovsky } 22468030c835SLeon Romanovsky 22478030c835SLeon Romanovsky /** 22481da177e4SLinus Torvalds * ib_query_pkey - Get P_Key table entry 22491da177e4SLinus Torvalds * @device:Device to query 22501da177e4SLinus Torvalds * @port_num:Port number to query 22511da177e4SLinus Torvalds * @index:P_Key table index to query 22521da177e4SLinus Torvalds * @pkey:Returned P_Key 22531da177e4SLinus Torvalds * 22541da177e4SLinus Torvalds * ib_query_pkey() fetches the specified P_Key table entry. 22551da177e4SLinus Torvalds */ 22561da177e4SLinus Torvalds int ib_query_pkey(struct ib_device *device, 22571da177e4SLinus Torvalds u8 port_num, u16 index, u16 *pkey) 22581da177e4SLinus Torvalds { 22599af3f5cfSYuval Shaia if (!rdma_is_port_valid(device, port_num)) 22609af3f5cfSYuval Shaia return -EINVAL; 22619af3f5cfSYuval Shaia 22623023a1e9SKamal Heib return device->ops.query_pkey(device, port_num, index, pkey); 22631da177e4SLinus Torvalds } 22641da177e4SLinus Torvalds EXPORT_SYMBOL(ib_query_pkey); 22651da177e4SLinus Torvalds 22661da177e4SLinus Torvalds /** 22671da177e4SLinus Torvalds * ib_modify_device - Change IB device attributes 22681da177e4SLinus Torvalds * @device:Device to modify 22691da177e4SLinus Torvalds * @device_modify_mask:Mask of attributes to change 22701da177e4SLinus Torvalds * @device_modify:New attribute values 22711da177e4SLinus Torvalds * 22721da177e4SLinus Torvalds * ib_modify_device() changes a device's attributes as specified by 22731da177e4SLinus Torvalds * the @device_modify_mask and @device_modify structure. 22741da177e4SLinus Torvalds */ 22751da177e4SLinus Torvalds int ib_modify_device(struct ib_device *device, 22761da177e4SLinus Torvalds int device_modify_mask, 22771da177e4SLinus Torvalds struct ib_device_modify *device_modify) 22781da177e4SLinus Torvalds { 22793023a1e9SKamal Heib if (!device->ops.modify_device) 228010e1b54bSBart Van Assche return -ENOSYS; 228110e1b54bSBart Van Assche 22823023a1e9SKamal Heib return device->ops.modify_device(device, device_modify_mask, 22831da177e4SLinus Torvalds device_modify); 22841da177e4SLinus Torvalds } 22851da177e4SLinus Torvalds EXPORT_SYMBOL(ib_modify_device); 22861da177e4SLinus Torvalds 22871da177e4SLinus Torvalds /** 22881da177e4SLinus Torvalds * ib_modify_port - Modifies the attributes for the specified port. 22891da177e4SLinus Torvalds * @device: The device to modify. 22901da177e4SLinus Torvalds * @port_num: The number of the port to modify. 22911da177e4SLinus Torvalds * @port_modify_mask: Mask used to specify which attributes of the port 22921da177e4SLinus Torvalds * to change. 22931da177e4SLinus Torvalds * @port_modify: New attribute values for the port. 22941da177e4SLinus Torvalds * 22951da177e4SLinus Torvalds * ib_modify_port() changes a port's attributes as specified by the 22961da177e4SLinus Torvalds * @port_modify_mask and @port_modify structure. 22971da177e4SLinus Torvalds */ 22981da177e4SLinus Torvalds int ib_modify_port(struct ib_device *device, 22991da177e4SLinus Torvalds u8 port_num, int port_modify_mask, 23001da177e4SLinus Torvalds struct ib_port_modify *port_modify) 23011da177e4SLinus Torvalds { 230261e0962dSSelvin Xavier int rc; 230310e1b54bSBart Van Assche 230424dc831bSYuval Shaia if (!rdma_is_port_valid(device, port_num)) 2305116c0074SRoland Dreier return -EINVAL; 2306116c0074SRoland Dreier 23073023a1e9SKamal Heib if (device->ops.modify_port) 23083023a1e9SKamal Heib rc = device->ops.modify_port(device, port_num, 23093023a1e9SKamal Heib port_modify_mask, 23101da177e4SLinus Torvalds port_modify); 231161e0962dSSelvin Xavier else 231261e0962dSSelvin Xavier rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; 231361e0962dSSelvin Xavier return rc; 23141da177e4SLinus Torvalds } 23151da177e4SLinus Torvalds EXPORT_SYMBOL(ib_modify_port); 23161da177e4SLinus Torvalds 23175eb620c8SYosef Etigin /** 23185eb620c8SYosef Etigin * ib_find_gid - Returns the port number and GID table index where 2319dbb12562SParav Pandit * a specified GID value occurs. Its searches only for IB link layer. 23205eb620c8SYosef Etigin * @device: The device to query. 23215eb620c8SYosef Etigin * @gid: The GID value to search for. 23225eb620c8SYosef Etigin * @port_num: The port number of the device where the GID value was found. 23235eb620c8SYosef Etigin * @index: The index into the GID table where the GID was found. This 23245eb620c8SYosef Etigin * parameter may be NULL. 23255eb620c8SYosef Etigin */ 23265eb620c8SYosef Etigin int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2327b26c4a11SParav Pandit u8 *port_num, u16 *index) 23285eb620c8SYosef Etigin { 23295eb620c8SYosef Etigin union ib_gid tmp_gid; 2330ea1075edSJason Gunthorpe unsigned int port; 2331ea1075edSJason Gunthorpe int ret, i; 23325eb620c8SYosef Etigin 2333ea1075edSJason Gunthorpe rdma_for_each_port (device, port) { 233422d24f75SParav Pandit if (!rdma_protocol_ib(device, port)) 2335b39ffa1dSMatan Barak continue; 2336b39ffa1dSMatan Barak 23378ceb1357SJason Gunthorpe for (i = 0; i < device->port_data[port].immutable.gid_tbl_len; 23388ceb1357SJason Gunthorpe ++i) { 23391dfce294SParav Pandit ret = rdma_query_gid(device, port, i, &tmp_gid); 23405eb620c8SYosef Etigin if (ret) 23415eb620c8SYosef Etigin return ret; 23425eb620c8SYosef Etigin if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 23435eb620c8SYosef Etigin *port_num = port; 23445eb620c8SYosef Etigin if (index) 23455eb620c8SYosef Etigin *index = i; 23465eb620c8SYosef Etigin return 0; 23475eb620c8SYosef Etigin } 23485eb620c8SYosef Etigin } 23495eb620c8SYosef Etigin } 23505eb620c8SYosef Etigin 23515eb620c8SYosef Etigin return -ENOENT; 23525eb620c8SYosef Etigin } 23535eb620c8SYosef Etigin EXPORT_SYMBOL(ib_find_gid); 23545eb620c8SYosef Etigin 23555eb620c8SYosef Etigin /** 23565eb620c8SYosef Etigin * ib_find_pkey - Returns the PKey table index where a specified 23575eb620c8SYosef Etigin * PKey value occurs. 23585eb620c8SYosef Etigin * @device: The device to query. 23595eb620c8SYosef Etigin * @port_num: The port number of the device to search for the PKey. 23605eb620c8SYosef Etigin * @pkey: The PKey value to search for. 23615eb620c8SYosef Etigin * @index: The index into the PKey table where the PKey was found. 23625eb620c8SYosef Etigin */ 23635eb620c8SYosef Etigin int ib_find_pkey(struct ib_device *device, 23645eb620c8SYosef Etigin u8 port_num, u16 pkey, u16 *index) 23655eb620c8SYosef Etigin { 23665eb620c8SYosef Etigin int ret, i; 23675eb620c8SYosef Etigin u16 tmp_pkey; 2368ff7166c4SJack Morgenstein int partial_ix = -1; 23695eb620c8SYosef Etigin 23708ceb1357SJason Gunthorpe for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len; 23718ceb1357SJason Gunthorpe ++i) { 23725eb620c8SYosef Etigin ret = ib_query_pkey(device, port_num, i, &tmp_pkey); 23735eb620c8SYosef Etigin if (ret) 23745eb620c8SYosef Etigin return ret; 237536026eccSMoni Shoua if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { 2376ff7166c4SJack Morgenstein /* if there is full-member pkey take it.*/ 2377ff7166c4SJack Morgenstein if (tmp_pkey & 0x8000) { 23785eb620c8SYosef Etigin *index = i; 23795eb620c8SYosef Etigin return 0; 23805eb620c8SYosef Etigin } 2381ff7166c4SJack Morgenstein if (partial_ix < 0) 2382ff7166c4SJack Morgenstein partial_ix = i; 2383ff7166c4SJack Morgenstein } 23845eb620c8SYosef Etigin } 23855eb620c8SYosef Etigin 2386ff7166c4SJack Morgenstein /*no full-member, if exists take the limited*/ 2387ff7166c4SJack Morgenstein if (partial_ix >= 0) { 2388ff7166c4SJack Morgenstein *index = partial_ix; 2389ff7166c4SJack Morgenstein return 0; 2390ff7166c4SJack Morgenstein } 23915eb620c8SYosef Etigin return -ENOENT; 23925eb620c8SYosef Etigin } 23935eb620c8SYosef Etigin EXPORT_SYMBOL(ib_find_pkey); 23945eb620c8SYosef Etigin 23959268f72dSYotam Kenneth /** 23969268f72dSYotam Kenneth * ib_get_net_dev_by_params() - Return the appropriate net_dev 23979268f72dSYotam Kenneth * for a received CM request 23989268f72dSYotam Kenneth * @dev: An RDMA device on which the request has been received. 23999268f72dSYotam Kenneth * @port: Port number on the RDMA device. 24009268f72dSYotam Kenneth * @pkey: The Pkey the request came on. 24019268f72dSYotam Kenneth * @gid: A GID that the net_dev uses to communicate. 24029268f72dSYotam Kenneth * @addr: Contains the IP address that the request specified as its 24039268f72dSYotam Kenneth * destination. 2404921eab11SJason Gunthorpe * 24059268f72dSYotam Kenneth */ 24069268f72dSYotam Kenneth struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, 24079268f72dSYotam Kenneth u8 port, 24089268f72dSYotam Kenneth u16 pkey, 24099268f72dSYotam Kenneth const union ib_gid *gid, 24109268f72dSYotam Kenneth const struct sockaddr *addr) 24119268f72dSYotam Kenneth { 24129268f72dSYotam Kenneth struct net_device *net_dev = NULL; 24130df91bb6SJason Gunthorpe unsigned long index; 24140df91bb6SJason Gunthorpe void *client_data; 24159268f72dSYotam Kenneth 24169268f72dSYotam Kenneth if (!rdma_protocol_ib(dev, port)) 24179268f72dSYotam Kenneth return NULL; 24189268f72dSYotam Kenneth 2419921eab11SJason Gunthorpe /* 2420921eab11SJason Gunthorpe * Holding the read side guarantees that the client will not become 2421921eab11SJason Gunthorpe * unregistered while we are calling get_net_dev_by_params() 2422921eab11SJason Gunthorpe */ 2423921eab11SJason Gunthorpe down_read(&dev->client_data_rwsem); 24240df91bb6SJason Gunthorpe xan_for_each_marked (&dev->client_data, index, client_data, 24250df91bb6SJason Gunthorpe CLIENT_DATA_REGISTERED) { 24260df91bb6SJason Gunthorpe struct ib_client *client = xa_load(&clients, index); 24279268f72dSYotam Kenneth 24280df91bb6SJason Gunthorpe if (!client || !client->get_net_dev_by_params) 24299268f72dSYotam Kenneth continue; 24309268f72dSYotam Kenneth 24310df91bb6SJason Gunthorpe net_dev = client->get_net_dev_by_params(dev, port, pkey, gid, 24320df91bb6SJason Gunthorpe addr, client_data); 24339268f72dSYotam Kenneth if (net_dev) 24349268f72dSYotam Kenneth break; 24359268f72dSYotam Kenneth } 2436921eab11SJason Gunthorpe up_read(&dev->client_data_rwsem); 24379268f72dSYotam Kenneth 24389268f72dSYotam Kenneth return net_dev; 24399268f72dSYotam Kenneth } 24409268f72dSYotam Kenneth EXPORT_SYMBOL(ib_get_net_dev_by_params); 24419268f72dSYotam Kenneth 2442521ed0d9SKamal Heib void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) 2443521ed0d9SKamal Heib { 24443023a1e9SKamal Heib struct ib_device_ops *dev_ops = &dev->ops; 2445521ed0d9SKamal Heib #define SET_DEVICE_OP(ptr, name) \ 2446521ed0d9SKamal Heib do { \ 2447521ed0d9SKamal Heib if (ops->name) \ 2448521ed0d9SKamal Heib if (!((ptr)->name)) \ 2449521ed0d9SKamal Heib (ptr)->name = ops->name; \ 2450521ed0d9SKamal Heib } while (0) 2451521ed0d9SKamal Heib 245230471d4bSLeon Romanovsky #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name) 245330471d4bSLeon Romanovsky 2454b9560a41SJason Gunthorpe if (ops->driver_id != RDMA_DRIVER_UNKNOWN) { 2455b9560a41SJason Gunthorpe WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN && 2456b9560a41SJason Gunthorpe dev_ops->driver_id != ops->driver_id); 2457b9560a41SJason Gunthorpe dev_ops->driver_id = ops->driver_id; 2458b9560a41SJason Gunthorpe } 24597a154142SJason Gunthorpe if (ops->owner) { 24607a154142SJason Gunthorpe WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner); 24617a154142SJason Gunthorpe dev_ops->owner = ops->owner; 24627a154142SJason Gunthorpe } 246372c6ec18SJason Gunthorpe if (ops->uverbs_abi_ver) 246472c6ec18SJason Gunthorpe dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver; 2465b9560a41SJason Gunthorpe 24668f71bb00SJason Gunthorpe dev_ops->uverbs_no_driver_id_binding |= 24678f71bb00SJason Gunthorpe ops->uverbs_no_driver_id_binding; 24688f71bb00SJason Gunthorpe 24693023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, add_gid); 24702f1927b0SMoni Shoua SET_DEVICE_OP(dev_ops, advise_mr); 24713023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_dm); 24723023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_fmr); 24733023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_hw_stats); 24743023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_mr); 247526bc7eaeSIsrael Rukshin SET_DEVICE_OP(dev_ops, alloc_mr_integrity); 24763023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_mw); 24773023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_pd); 24783023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_rdma_netdev); 24793023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_ucontext); 24803023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, alloc_xrcd); 24813023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, attach_mcast); 24823023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, check_mr_status); 2483c4ffee7cSMark Zhang SET_DEVICE_OP(dev_ops, counter_alloc_stats); 248499fa331dSMark Zhang SET_DEVICE_OP(dev_ops, counter_bind_qp); 248599fa331dSMark Zhang SET_DEVICE_OP(dev_ops, counter_dealloc); 248699fa331dSMark Zhang SET_DEVICE_OP(dev_ops, counter_unbind_qp); 2487c4ffee7cSMark Zhang SET_DEVICE_OP(dev_ops, counter_update_stats); 24883023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_ah); 24893023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_counters); 24903023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_cq); 24913023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_flow); 24923023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_flow_action_esp); 24933023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_qp); 24943023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_rwq_ind_table); 24953023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_srq); 24963023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, create_wq); 24973023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_dm); 2498d0899892SJason Gunthorpe SET_DEVICE_OP(dev_ops, dealloc_driver); 24993023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_fmr); 25003023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_mw); 25013023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_pd); 25023023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_ucontext); 25033023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dealloc_xrcd); 25043023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, del_gid); 25053023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, dereg_mr); 25063023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_ah); 25073023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_counters); 25083023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_cq); 25093023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_flow); 25103023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_flow_action); 25113023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_qp); 25123023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table); 25133023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_srq); 25143023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, destroy_wq); 25153023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, detach_mcast); 25163023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, disassociate_ucontext); 25173023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, drain_rq); 25183023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, drain_sq); 2519ca22354bSJason Gunthorpe SET_DEVICE_OP(dev_ops, enable_driver); 252002da3750SLeon Romanovsky SET_DEVICE_OP(dev_ops, fill_res_entry); 25213023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_dev_fw_str); 25223023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_dma_mr); 25233023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_hw_stats); 25243023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_link_layer); 25253023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_netdev); 25263023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_port_immutable); 25273023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_vector_affinity); 25283023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_vf_config); 25293023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, get_vf_stats); 2530ea4baf7fSParav Pandit SET_DEVICE_OP(dev_ops, init_port); 2531dd05cb82SKamal Heib SET_DEVICE_OP(dev_ops, iw_accept); 2532dd05cb82SKamal Heib SET_DEVICE_OP(dev_ops, iw_add_ref); 2533dd05cb82SKamal Heib SET_DEVICE_OP(dev_ops, iw_connect); 2534dd05cb82SKamal Heib SET_DEVICE_OP(dev_ops, iw_create_listen); 2535dd05cb82SKamal Heib SET_DEVICE_OP(dev_ops, iw_destroy_listen); 2536dd05cb82SKamal Heib SET_DEVICE_OP(dev_ops, iw_get_qp); 2537dd05cb82SKamal Heib SET_DEVICE_OP(dev_ops, iw_reject); 2538dd05cb82SKamal Heib SET_DEVICE_OP(dev_ops, iw_rem_ref); 25393023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, map_mr_sg); 25402cdfcdd8SMax Gurtovoy SET_DEVICE_OP(dev_ops, map_mr_sg_pi); 25413023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, map_phys_fmr); 25423023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, mmap); 25433023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_ah); 25443023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_cq); 25453023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_device); 25463023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_flow_action_esp); 25473023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_port); 25483023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_qp); 25493023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_srq); 25503023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, modify_wq); 25513023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, peek_cq); 25523023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, poll_cq); 25533023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, post_recv); 25543023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, post_send); 25553023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, post_srq_recv); 25563023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, process_mad); 25573023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_ah); 25583023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_device); 25593023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_gid); 25603023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_pkey); 25613023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_port); 25623023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_qp); 25633023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, query_srq); 25643023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); 25653023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, read_counters); 25663023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, reg_dm_mr); 25673023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, reg_user_mr); 25683023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, req_ncomp_notif); 25693023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, req_notify_cq); 25703023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, rereg_user_mr); 25713023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, resize_cq); 25723023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, set_vf_guid); 25733023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, set_vf_link_state); 25743023a1e9SKamal Heib SET_DEVICE_OP(dev_ops, unmap_fmr); 257521a428a0SLeon Romanovsky 2576d3456914SLeon Romanovsky SET_OBJ_SIZE(dev_ops, ib_ah); 2577e39afe3dSLeon Romanovsky SET_OBJ_SIZE(dev_ops, ib_cq); 257821a428a0SLeon Romanovsky SET_OBJ_SIZE(dev_ops, ib_pd); 257968e326deSLeon Romanovsky SET_OBJ_SIZE(dev_ops, ib_srq); 2580a2a074efSLeon Romanovsky SET_OBJ_SIZE(dev_ops, ib_ucontext); 2581521ed0d9SKamal Heib } 2582521ed0d9SKamal Heib EXPORT_SYMBOL(ib_set_device_ops); 2583521ed0d9SKamal Heib 2584d0e312feSLeon Romanovsky static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { 2585735c631aSMark Bloch [RDMA_NL_LS_OP_RESOLVE] = { 2586647c75acSLeon Romanovsky .doit = ib_nl_handle_resolve_resp, 2587e3a2b93dSLeon Romanovsky .flags = RDMA_NL_ADMIN_PERM, 2588e3a2b93dSLeon Romanovsky }, 2589735c631aSMark Bloch [RDMA_NL_LS_OP_SET_TIMEOUT] = { 2590647c75acSLeon Romanovsky .doit = ib_nl_handle_set_timeout, 2591e3a2b93dSLeon Romanovsky .flags = RDMA_NL_ADMIN_PERM, 2592e3a2b93dSLeon Romanovsky }, 2593ae43f828SMark Bloch [RDMA_NL_LS_OP_IP_RESOLVE] = { 2594647c75acSLeon Romanovsky .doit = ib_nl_handle_ip_res_resp, 2595e3a2b93dSLeon Romanovsky .flags = RDMA_NL_ADMIN_PERM, 2596e3a2b93dSLeon Romanovsky }, 2597735c631aSMark Bloch }; 2598735c631aSMark Bloch 25991da177e4SLinus Torvalds static int __init ib_core_init(void) 26001da177e4SLinus Torvalds { 26011da177e4SLinus Torvalds int ret; 26021da177e4SLinus Torvalds 2603f0626710STejun Heo ib_wq = alloc_workqueue("infiniband", 0, 0); 2604f0626710STejun Heo if (!ib_wq) 2605f0626710STejun Heo return -ENOMEM; 2606f0626710STejun Heo 260714d3a3b2SChristoph Hellwig ib_comp_wq = alloc_workqueue("ib-comp-wq", 2608b7363e67SSagi Grimberg WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 260914d3a3b2SChristoph Hellwig if (!ib_comp_wq) { 261014d3a3b2SChristoph Hellwig ret = -ENOMEM; 261114d3a3b2SChristoph Hellwig goto err; 261214d3a3b2SChristoph Hellwig } 261314d3a3b2SChristoph Hellwig 2614f794809aSJack Morgenstein ib_comp_unbound_wq = 2615f794809aSJack Morgenstein alloc_workqueue("ib-comp-unb-wq", 2616f794809aSJack Morgenstein WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | 2617f794809aSJack Morgenstein WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); 2618f794809aSJack Morgenstein if (!ib_comp_unbound_wq) { 2619f794809aSJack Morgenstein ret = -ENOMEM; 2620f794809aSJack Morgenstein goto err_comp; 2621f794809aSJack Morgenstein } 2622f794809aSJack Morgenstein 262355aeed06SJason Gunthorpe ret = class_register(&ib_class); 2624fd75c789SNir Muchtar if (ret) { 2625aba25a3eSParav Pandit pr_warn("Couldn't create InfiniBand device class\n"); 2626f794809aSJack Morgenstein goto err_comp_unbound; 2627fd75c789SNir Muchtar } 26281da177e4SLinus Torvalds 2629c9901724SLeon Romanovsky ret = rdma_nl_init(); 26301da177e4SLinus Torvalds if (ret) { 2631c9901724SLeon Romanovsky pr_warn("Couldn't init IB netlink interface: err %d\n", ret); 2632fd75c789SNir Muchtar goto err_sysfs; 26331da177e4SLinus Torvalds } 26341da177e4SLinus Torvalds 2635e3f20f02SLeon Romanovsky ret = addr_init(); 2636e3f20f02SLeon Romanovsky if (ret) { 2637e3f20f02SLeon Romanovsky pr_warn("Could't init IB address resolution\n"); 2638e3f20f02SLeon Romanovsky goto err_ibnl; 2639e3f20f02SLeon Romanovsky } 2640e3f20f02SLeon Romanovsky 26414c2cb422SMark Bloch ret = ib_mad_init(); 26424c2cb422SMark Bloch if (ret) { 26434c2cb422SMark Bloch pr_warn("Couldn't init IB MAD\n"); 26444c2cb422SMark Bloch goto err_addr; 26454c2cb422SMark Bloch } 26464c2cb422SMark Bloch 2647c2e49c92SMark Bloch ret = ib_sa_init(); 2648c2e49c92SMark Bloch if (ret) { 2649c2e49c92SMark Bloch pr_warn("Couldn't init SA\n"); 2650c2e49c92SMark Bloch goto err_mad; 2651c2e49c92SMark Bloch } 2652c2e49c92SMark Bloch 26538f408ab6SDaniel Jurgens ret = register_lsm_notifier(&ibdev_lsm_nb); 26548f408ab6SDaniel Jurgens if (ret) { 26558f408ab6SDaniel Jurgens pr_warn("Couldn't register LSM notifier. ret %d\n", ret); 2656c9901724SLeon Romanovsky goto err_sa; 26578f408ab6SDaniel Jurgens } 26588f408ab6SDaniel Jurgens 26594e0f7b90SParav Pandit ret = register_pernet_device(&rdma_dev_net_ops); 26604e0f7b90SParav Pandit if (ret) { 26614e0f7b90SParav Pandit pr_warn("Couldn't init compat dev. ret %d\n", ret); 26624e0f7b90SParav Pandit goto err_compat; 26634e0f7b90SParav Pandit } 26644e0f7b90SParav Pandit 26656c80b41aSLeon Romanovsky nldev_init(); 2666c9901724SLeon Romanovsky rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); 26675ef8c0c1SJason Gunthorpe roce_gid_mgmt_init(); 2668b2cbae2cSRoland Dreier 2669fd75c789SNir Muchtar return 0; 2670fd75c789SNir Muchtar 26714e0f7b90SParav Pandit err_compat: 26724e0f7b90SParav Pandit unregister_lsm_notifier(&ibdev_lsm_nb); 2673735c631aSMark Bloch err_sa: 2674735c631aSMark Bloch ib_sa_cleanup(); 2675c2e49c92SMark Bloch err_mad: 2676c2e49c92SMark Bloch ib_mad_cleanup(); 26774c2cb422SMark Bloch err_addr: 26784c2cb422SMark Bloch addr_cleanup(); 2679e3f20f02SLeon Romanovsky err_ibnl: 2680c9901724SLeon Romanovsky rdma_nl_exit(); 2681fd75c789SNir Muchtar err_sysfs: 268255aeed06SJason Gunthorpe class_unregister(&ib_class); 2683f794809aSJack Morgenstein err_comp_unbound: 2684f794809aSJack Morgenstein destroy_workqueue(ib_comp_unbound_wq); 268514d3a3b2SChristoph Hellwig err_comp: 268614d3a3b2SChristoph Hellwig destroy_workqueue(ib_comp_wq); 2687fd75c789SNir Muchtar err: 2688fd75c789SNir Muchtar destroy_workqueue(ib_wq); 26891da177e4SLinus Torvalds return ret; 26901da177e4SLinus Torvalds } 26911da177e4SLinus Torvalds 26921da177e4SLinus Torvalds static void __exit ib_core_cleanup(void) 26931da177e4SLinus Torvalds { 26945ef8c0c1SJason Gunthorpe roce_gid_mgmt_cleanup(); 26956c80b41aSLeon Romanovsky nldev_exit(); 2696c9901724SLeon Romanovsky rdma_nl_unregister(RDMA_NL_LS); 26974e0f7b90SParav Pandit unregister_pernet_device(&rdma_dev_net_ops); 2698c9901724SLeon Romanovsky unregister_lsm_notifier(&ibdev_lsm_nb); 2699c2e49c92SMark Bloch ib_sa_cleanup(); 27004c2cb422SMark Bloch ib_mad_cleanup(); 2701e3f20f02SLeon Romanovsky addr_cleanup(); 2702c9901724SLeon Romanovsky rdma_nl_exit(); 270355aeed06SJason Gunthorpe class_unregister(&ib_class); 2704f794809aSJack Morgenstein destroy_workqueue(ib_comp_unbound_wq); 270514d3a3b2SChristoph Hellwig destroy_workqueue(ib_comp_wq); 2706f7c6a7b5SRoland Dreier /* Make sure that any pending umem accounting work is done. */ 2707f0626710STejun Heo destroy_workqueue(ib_wq); 2708d0899892SJason Gunthorpe flush_workqueue(system_unbound_wq); 2709e59178d8SJason Gunthorpe WARN_ON(!xa_empty(&clients)); 27100df91bb6SJason Gunthorpe WARN_ON(!xa_empty(&devices)); 27111da177e4SLinus Torvalds } 27121da177e4SLinus Torvalds 2713e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); 2714e3bf14bdSJason Gunthorpe 271562dfa795SParav Pandit /* ib core relies on netdev stack to first register net_ns_type_operations 271662dfa795SParav Pandit * ns kobject type before ib_core initialization. 271762dfa795SParav Pandit */ 271862dfa795SParav Pandit fs_initcall(ib_core_init); 27191da177e4SLinus Torvalds module_exit(ib_core_cleanup); 2720