16c80b41aSLeon Romanovsky /* 26c80b41aSLeon Romanovsky * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 36c80b41aSLeon Romanovsky * 46c80b41aSLeon Romanovsky * Redistribution and use in source and binary forms, with or without 56c80b41aSLeon Romanovsky * modification, are permitted provided that the following conditions are met: 66c80b41aSLeon Romanovsky * 76c80b41aSLeon Romanovsky * 1. Redistributions of source code must retain the above copyright 86c80b41aSLeon Romanovsky * notice, this list of conditions and the following disclaimer. 96c80b41aSLeon Romanovsky * 2. Redistributions in binary form must reproduce the above copyright 106c80b41aSLeon Romanovsky * notice, this list of conditions and the following disclaimer in the 116c80b41aSLeon Romanovsky * documentation and/or other materials provided with the distribution. 126c80b41aSLeon Romanovsky * 3. Neither the names of the copyright holders nor the names of its 136c80b41aSLeon Romanovsky * contributors may be used to endorse or promote products derived from 146c80b41aSLeon Romanovsky * this software without specific prior written permission. 156c80b41aSLeon Romanovsky * 166c80b41aSLeon Romanovsky * Alternatively, this software may be distributed under the terms of the 176c80b41aSLeon Romanovsky * GNU General Public License ("GPL") version 2 as published by the Free 186c80b41aSLeon Romanovsky * Software Foundation. 196c80b41aSLeon Romanovsky * 206c80b41aSLeon Romanovsky * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 216c80b41aSLeon Romanovsky * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 226c80b41aSLeon Romanovsky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 236c80b41aSLeon Romanovsky * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 246c80b41aSLeon Romanovsky * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 256c80b41aSLeon Romanovsky * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 266c80b41aSLeon Romanovsky * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 276c80b41aSLeon Romanovsky * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 286c80b41aSLeon Romanovsky * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 296c80b41aSLeon Romanovsky * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 306c80b41aSLeon Romanovsky * POSSIBILITY OF SUCH DAMAGE. 316c80b41aSLeon Romanovsky */ 326c80b41aSLeon Romanovsky 33e3bf14bdSJason Gunthorpe #include <linux/module.h> 34bf3c5a93SLeon Romanovsky #include <linux/pid.h> 35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h> 363856ec4bSSteve Wise #include <linux/mutex.h> 37b4c598a6SLeon Romanovsky #include <net/netlink.h> 3800313983SSteve Wise #include <rdma/rdma_cm.h> 396c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h> 406c80b41aSLeon Romanovsky 416c80b41aSLeon Romanovsky #include "core_priv.h" 4200313983SSteve Wise #include "cma_priv.h" 4341eda65cSLeon Romanovsky #include "restrack.h" 446c80b41aSLeon Romanovsky 45b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { 46b4c598a6SLeon Romanovsky [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, 47b4c598a6SLeon Romanovsky [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, 48b4c598a6SLeon Romanovsky .len = IB_DEVICE_NAME_MAX - 1}, 49b4c598a6SLeon Romanovsky [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, 508621a7e3SLeon Romanovsky [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, 518621a7e3SLeon Romanovsky .len = IB_FW_VERSION_NAME_MAX - 1}, 521aaff896SLeon Romanovsky [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, 531aaff896SLeon Romanovsky [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, 5412026fbbSLeon Romanovsky [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, 5580a06dd3SLeon Romanovsky [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, 5680a06dd3SLeon Romanovsky [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, 5734840feaSLeon Romanovsky [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, 585654e49dSLeon Romanovsky [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, 595654e49dSLeon Romanovsky [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, 601bb77b8cSLeon Romanovsky [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, 61bf3c5a93SLeon Romanovsky [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, 62bf3c5a93SLeon Romanovsky [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, 63bf3c5a93SLeon Romanovsky [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING, 64bf3c5a93SLeon Romanovsky .len = 16 }, 65bf3c5a93SLeon Romanovsky [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 }, 66b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, 67b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, 68b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, 69b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, 70b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, 71b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, 72b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, 73b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, 74b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, 75b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, 76b5fa635aSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, 77b5fa635aSLeon Romanovsky .len = TASK_COMM_LEN }, 7800313983SSteve Wise [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, 7900313983SSteve Wise [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, 8000313983SSteve Wise [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, 8100313983SSteve Wise [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { 8200313983SSteve Wise .len = sizeof(struct __kernel_sockaddr_storage) }, 8300313983SSteve Wise [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { 8400313983SSteve Wise .len = sizeof(struct __kernel_sockaddr_storage) }, 85a34fc089SSteve Wise [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, 86a34fc089SSteve Wise [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, 87a34fc089SSteve Wise [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, 88a34fc089SSteve Wise [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, 89a34fc089SSteve Wise [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, 90fccec5b8SSteve Wise [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, 91fccec5b8SSteve Wise [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, 92fccec5b8SSteve Wise [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, 93fccec5b8SSteve Wise [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, 94fccec5b8SSteve Wise [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, 95fccec5b8SSteve Wise [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, 9629cf1351SSteve Wise [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, 9729cf1351SSteve Wise [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, 9829cf1351SSteve Wise [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, 9929cf1351SSteve Wise [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 }, 1005b2cc79dSLeon Romanovsky [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, 1015b2cc79dSLeon Romanovsky [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, 1025b2cc79dSLeon Romanovsky .len = IFNAMSIZ }, 103da5c8507SSteve Wise [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, 104da5c8507SSteve Wise [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, 105da5c8507SSteve Wise [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, 106da5c8507SSteve Wise .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN }, 107da5c8507SSteve Wise [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, 108da5c8507SSteve Wise [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, 109da5c8507SSteve Wise [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, 110da5c8507SSteve Wise [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, 111da5c8507SSteve Wise [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, 112517b773eSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 }, 113517b773eSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 }, 114517b773eSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 }, 115517b773eSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 }, 116c3d02788SLeon Romanovsky [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, 1173856ec4bSSteve Wise [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING, 1183856ec4bSSteve Wise .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN }, 119cb7e0e13SParav Pandit [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, 1209e886b39SLeon Romanovsky [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING, 1219e886b39SLeon Romanovsky .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN }, 1222e5b8a01SParav Pandit [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, 123b4c598a6SLeon Romanovsky }; 124b4c598a6SLeon Romanovsky 12573937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name, 12673937e8aSSteve Wise enum rdma_nldev_print_type print_type) 12773937e8aSSteve Wise { 12873937e8aSSteve Wise if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) 12973937e8aSSteve Wise return -EMSGSIZE; 13073937e8aSSteve Wise if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && 13173937e8aSSteve Wise nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) 13273937e8aSSteve Wise return -EMSGSIZE; 13373937e8aSSteve Wise 13473937e8aSSteve Wise return 0; 13573937e8aSSteve Wise } 13673937e8aSSteve Wise 13773937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, 13873937e8aSSteve Wise enum rdma_nldev_print_type print_type, 13973937e8aSSteve Wise u32 value) 14073937e8aSSteve Wise { 14173937e8aSSteve Wise if (put_driver_name_print_type(msg, name, print_type)) 14273937e8aSSteve Wise return -EMSGSIZE; 14373937e8aSSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) 14473937e8aSSteve Wise return -EMSGSIZE; 14573937e8aSSteve Wise 14673937e8aSSteve Wise return 0; 14773937e8aSSteve Wise } 14873937e8aSSteve Wise 14973937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, 15073937e8aSSteve Wise enum rdma_nldev_print_type print_type, 15173937e8aSSteve Wise u64 value) 15273937e8aSSteve Wise { 15373937e8aSSteve Wise if (put_driver_name_print_type(msg, name, print_type)) 15473937e8aSSteve Wise return -EMSGSIZE; 15573937e8aSSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, 15673937e8aSSteve Wise RDMA_NLDEV_ATTR_PAD)) 15773937e8aSSteve Wise return -EMSGSIZE; 15873937e8aSSteve Wise 15973937e8aSSteve Wise return 0; 16073937e8aSSteve Wise } 16173937e8aSSteve Wise 16273937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) 16373937e8aSSteve Wise { 16473937e8aSSteve Wise return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 16573937e8aSSteve Wise value); 16673937e8aSSteve Wise } 16773937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32); 16873937e8aSSteve Wise 16973937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, 17073937e8aSSteve Wise u32 value) 17173937e8aSSteve Wise { 17273937e8aSSteve Wise return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 17373937e8aSSteve Wise value); 17473937e8aSSteve Wise } 17573937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); 17673937e8aSSteve Wise 17773937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) 17873937e8aSSteve Wise { 17973937e8aSSteve Wise return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 18073937e8aSSteve Wise value); 18173937e8aSSteve Wise } 18273937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64); 18373937e8aSSteve Wise 18473937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) 18573937e8aSSteve Wise { 18673937e8aSSteve Wise return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 18773937e8aSSteve Wise value); 18873937e8aSSteve Wise } 18973937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); 19073937e8aSSteve Wise 191c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) 192b4c598a6SLeon Romanovsky { 193b4c598a6SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) 194b4c598a6SLeon Romanovsky return -EMSGSIZE; 195896de009SJason Gunthorpe if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, 196896de009SJason Gunthorpe dev_name(&device->dev))) 197b4c598a6SLeon Romanovsky return -EMSGSIZE; 198c2409810SLeon Romanovsky 199c2409810SLeon Romanovsky return 0; 200c2409810SLeon Romanovsky } 201c2409810SLeon Romanovsky 202c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) 203c2409810SLeon Romanovsky { 204c2409810SLeon Romanovsky char fw[IB_FW_VERSION_NAME_MAX]; 2059e886b39SLeon Romanovsky int ret = 0; 2069e886b39SLeon Romanovsky u8 port; 207c2409810SLeon Romanovsky 208c2409810SLeon Romanovsky if (fill_nldev_handle(msg, device)) 209c2409810SLeon Romanovsky return -EMSGSIZE; 210c2409810SLeon Romanovsky 211b4c598a6SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) 212b4c598a6SLeon Romanovsky return -EMSGSIZE; 213ac505253SLeon Romanovsky 214ac505253SLeon Romanovsky BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); 215ac505253SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 21625a0ad85SSteve Wise device->attrs.device_cap_flags, 21725a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 218ac505253SLeon Romanovsky return -EMSGSIZE; 219ac505253SLeon Romanovsky 2208621a7e3SLeon Romanovsky ib_get_device_fw_str(device, fw); 2215b2cc79dSLeon Romanovsky /* Device without FW has strlen(fw) = 0 */ 2228621a7e3SLeon Romanovsky if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) 2238621a7e3SLeon Romanovsky return -EMSGSIZE; 2248621a7e3SLeon Romanovsky 2251aaff896SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, 22625a0ad85SSteve Wise be64_to_cpu(device->node_guid), 22725a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 2281aaff896SLeon Romanovsky return -EMSGSIZE; 2291aaff896SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, 23025a0ad85SSteve Wise be64_to_cpu(device->attrs.sys_image_guid), 23125a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 2321aaff896SLeon Romanovsky return -EMSGSIZE; 2331bb77b8cSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) 2341bb77b8cSLeon Romanovsky return -EMSGSIZE; 2359e886b39SLeon Romanovsky 2369e886b39SLeon Romanovsky /* 2379e886b39SLeon Romanovsky * Link type is determined on first port and mlx4 device 2389e886b39SLeon Romanovsky * which can potentially have two different link type for the same 2399e886b39SLeon Romanovsky * IB device is considered as better to be avoided in the future, 2409e886b39SLeon Romanovsky */ 2419e886b39SLeon Romanovsky port = rdma_start_port(device); 2429e886b39SLeon Romanovsky if (rdma_cap_opa_mad(device, port)) 2439e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); 2449e886b39SLeon Romanovsky else if (rdma_protocol_ib(device, port)) 2459e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); 2469e886b39SLeon Romanovsky else if (rdma_protocol_iwarp(device, port)) 2479e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); 2489e886b39SLeon Romanovsky else if (rdma_protocol_roce(device, port)) 2499e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); 2509e886b39SLeon Romanovsky else if (rdma_protocol_usnic(device, port)) 2519e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, 2529e886b39SLeon Romanovsky "usnic"); 2539e886b39SLeon Romanovsky return ret; 254b4c598a6SLeon Romanovsky } 255b4c598a6SLeon Romanovsky 2567d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg, 2575b2cc79dSLeon Romanovsky struct ib_device *device, u32 port, 2585b2cc79dSLeon Romanovsky const struct net *net) 2597d02f605SLeon Romanovsky { 2605b2cc79dSLeon Romanovsky struct net_device *netdev = NULL; 261ac505253SLeon Romanovsky struct ib_port_attr attr; 262ac505253SLeon Romanovsky int ret; 2634fa2813dSMichael Guralnik u64 cap_flags = 0; 264ac505253SLeon Romanovsky 265c2409810SLeon Romanovsky if (fill_nldev_handle(msg, device)) 2667d02f605SLeon Romanovsky return -EMSGSIZE; 267c2409810SLeon Romanovsky 2687d02f605SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) 2697d02f605SLeon Romanovsky return -EMSGSIZE; 270ac505253SLeon Romanovsky 271ac505253SLeon Romanovsky ret = ib_query_port(device, port, &attr); 272ac505253SLeon Romanovsky if (ret) 273ac505253SLeon Romanovsky return ret; 274ac505253SLeon Romanovsky 275dd8028f1SLeon Romanovsky if (rdma_protocol_ib(device, port)) { 2764fa2813dSMichael Guralnik BUILD_BUG_ON((sizeof(attr.port_cap_flags) + 2774fa2813dSMichael Guralnik sizeof(attr.port_cap_flags2)) > sizeof(u64)); 2784fa2813dSMichael Guralnik cap_flags = attr.port_cap_flags | 2794fa2813dSMichael Guralnik ((u64)attr.port_cap_flags2 << 32); 280ac505253SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 2814fa2813dSMichael Guralnik cap_flags, RDMA_NLDEV_ATTR_PAD)) 282ac505253SLeon Romanovsky return -EMSGSIZE; 283dd8028f1SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, 28425a0ad85SSteve Wise attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) 28512026fbbSLeon Romanovsky return -EMSGSIZE; 28680a06dd3SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) 28780a06dd3SLeon Romanovsky return -EMSGSIZE; 28880a06dd3SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) 28980a06dd3SLeon Romanovsky return -EMSGSIZE; 29034840feaSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) 29134840feaSLeon Romanovsky return -EMSGSIZE; 29280a06dd3SLeon Romanovsky } 2935654e49dSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) 2945654e49dSLeon Romanovsky return -EMSGSIZE; 2955654e49dSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) 2965654e49dSLeon Romanovsky return -EMSGSIZE; 2975b2cc79dSLeon Romanovsky 298c2261dd7SJason Gunthorpe netdev = ib_device_get_netdev(device, port); 2995b2cc79dSLeon Romanovsky if (netdev && net_eq(dev_net(netdev), net)) { 3005b2cc79dSLeon Romanovsky ret = nla_put_u32(msg, 3015b2cc79dSLeon Romanovsky RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); 3025b2cc79dSLeon Romanovsky if (ret) 3035b2cc79dSLeon Romanovsky goto out; 3045b2cc79dSLeon Romanovsky ret = nla_put_string(msg, 3055b2cc79dSLeon Romanovsky RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); 3065b2cc79dSLeon Romanovsky } 3075b2cc79dSLeon Romanovsky 3085b2cc79dSLeon Romanovsky out: 3095b2cc79dSLeon Romanovsky if (netdev) 3105b2cc79dSLeon Romanovsky dev_put(netdev); 3115b2cc79dSLeon Romanovsky return ret; 3127d02f605SLeon Romanovsky } 3137d02f605SLeon Romanovsky 314bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg, 315bf3c5a93SLeon Romanovsky const char *name, u64 curr) 316bf3c5a93SLeon Romanovsky { 317bf3c5a93SLeon Romanovsky struct nlattr *entry_attr; 318bf3c5a93SLeon Romanovsky 319bf3c5a93SLeon Romanovsky entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); 320bf3c5a93SLeon Romanovsky if (!entry_attr) 321bf3c5a93SLeon Romanovsky return -EMSGSIZE; 322bf3c5a93SLeon Romanovsky 323bf3c5a93SLeon Romanovsky if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) 324bf3c5a93SLeon Romanovsky goto err; 32525a0ad85SSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 32625a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 327bf3c5a93SLeon Romanovsky goto err; 328bf3c5a93SLeon Romanovsky 329bf3c5a93SLeon Romanovsky nla_nest_end(msg, entry_attr); 330bf3c5a93SLeon Romanovsky return 0; 331bf3c5a93SLeon Romanovsky 332bf3c5a93SLeon Romanovsky err: 333bf3c5a93SLeon Romanovsky nla_nest_cancel(msg, entry_attr); 334bf3c5a93SLeon Romanovsky return -EMSGSIZE; 335bf3c5a93SLeon Romanovsky } 336bf3c5a93SLeon Romanovsky 337bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device) 338bf3c5a93SLeon Romanovsky { 339bf3c5a93SLeon Romanovsky static const char * const names[RDMA_RESTRACK_MAX] = { 340bf3c5a93SLeon Romanovsky [RDMA_RESTRACK_PD] = "pd", 341bf3c5a93SLeon Romanovsky [RDMA_RESTRACK_CQ] = "cq", 342bf3c5a93SLeon Romanovsky [RDMA_RESTRACK_QP] = "qp", 34300313983SSteve Wise [RDMA_RESTRACK_CM_ID] = "cm_id", 344fccec5b8SSteve Wise [RDMA_RESTRACK_MR] = "mr", 345ffd321e4SLeon Romanovsky [RDMA_RESTRACK_CTX] = "ctx", 346bf3c5a93SLeon Romanovsky }; 347bf3c5a93SLeon Romanovsky 348bf3c5a93SLeon Romanovsky struct nlattr *table_attr; 349bf3c5a93SLeon Romanovsky int ret, i, curr; 350bf3c5a93SLeon Romanovsky 351bf3c5a93SLeon Romanovsky if (fill_nldev_handle(msg, device)) 352bf3c5a93SLeon Romanovsky return -EMSGSIZE; 353bf3c5a93SLeon Romanovsky 354bf3c5a93SLeon Romanovsky table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); 355bf3c5a93SLeon Romanovsky if (!table_attr) 356bf3c5a93SLeon Romanovsky return -EMSGSIZE; 357bf3c5a93SLeon Romanovsky 358bf3c5a93SLeon Romanovsky for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 359bf3c5a93SLeon Romanovsky if (!names[i]) 360bf3c5a93SLeon Romanovsky continue; 3610ad699c0SLeon Romanovsky curr = rdma_restrack_count(device, i, 3620ad699c0SLeon Romanovsky task_active_pid_ns(current)); 363bf3c5a93SLeon Romanovsky ret = fill_res_info_entry(msg, names[i], curr); 364bf3c5a93SLeon Romanovsky if (ret) 365bf3c5a93SLeon Romanovsky goto err; 366bf3c5a93SLeon Romanovsky } 367bf3c5a93SLeon Romanovsky 368bf3c5a93SLeon Romanovsky nla_nest_end(msg, table_attr); 369bf3c5a93SLeon Romanovsky return 0; 370bf3c5a93SLeon Romanovsky 371bf3c5a93SLeon Romanovsky err: 372bf3c5a93SLeon Romanovsky nla_nest_cancel(msg, table_attr); 373bf3c5a93SLeon Romanovsky return ret; 374bf3c5a93SLeon Romanovsky } 375bf3c5a93SLeon Romanovsky 37600313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg, 37700313983SSteve Wise struct rdma_restrack_entry *res) 37800313983SSteve Wise { 37900313983SSteve Wise /* 38000313983SSteve Wise * For user resources, user is should read /proc/PID/comm to get the 38100313983SSteve Wise * name of the task file. 38200313983SSteve Wise */ 38300313983SSteve Wise if (rdma_is_kernel_res(res)) { 38400313983SSteve Wise if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, 38500313983SSteve Wise res->kern_name)) 38600313983SSteve Wise return -EMSGSIZE; 38700313983SSteve Wise } else { 38800313983SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, 38900313983SSteve Wise task_pid_vnr(res->task))) 39000313983SSteve Wise return -EMSGSIZE; 39100313983SSteve Wise } 39200313983SSteve Wise return 0; 39300313983SSteve Wise } 39400313983SSteve Wise 39502da3750SLeon Romanovsky static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg, 39602da3750SLeon Romanovsky struct rdma_restrack_entry *res) 39702da3750SLeon Romanovsky { 39802da3750SLeon Romanovsky if (!dev->ops.fill_res_entry) 39902da3750SLeon Romanovsky return false; 40002da3750SLeon Romanovsky return dev->ops.fill_res_entry(msg, res); 40102da3750SLeon Romanovsky } 40202da3750SLeon Romanovsky 403659067b0SLeon Romanovsky static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, 404d12ff624SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 405b5fa635aSLeon Romanovsky { 406d12ff624SSteve Wise struct ib_qp *qp = container_of(res, struct ib_qp, res); 40702da3750SLeon Romanovsky struct ib_device *dev = qp->device; 408b5fa635aSLeon Romanovsky struct ib_qp_init_attr qp_init_attr; 409b5fa635aSLeon Romanovsky struct ib_qp_attr qp_attr; 410b5fa635aSLeon Romanovsky int ret; 411b5fa635aSLeon Romanovsky 412b5fa635aSLeon Romanovsky ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); 413b5fa635aSLeon Romanovsky if (ret) 414b5fa635aSLeon Romanovsky return ret; 415b5fa635aSLeon Romanovsky 416b5fa635aSLeon Romanovsky if (port && port != qp_attr.port_num) 417c5dfe0eaSLeon Romanovsky return -EAGAIN; 418b5fa635aSLeon Romanovsky 419b5fa635aSLeon Romanovsky /* In create_qp() port is not set yet */ 420b5fa635aSLeon Romanovsky if (qp_attr.port_num && 421b5fa635aSLeon Romanovsky nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num)) 422b5fa635aSLeon Romanovsky goto err; 423b5fa635aSLeon Romanovsky 424b5fa635aSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num)) 425b5fa635aSLeon Romanovsky goto err; 426b5fa635aSLeon Romanovsky if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { 427b5fa635aSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, 428b5fa635aSLeon Romanovsky qp_attr.dest_qp_num)) 429b5fa635aSLeon Romanovsky goto err; 430b5fa635aSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, 431b5fa635aSLeon Romanovsky qp_attr.rq_psn)) 432b5fa635aSLeon Romanovsky goto err; 433b5fa635aSLeon Romanovsky } 434b5fa635aSLeon Romanovsky 435b5fa635aSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) 436b5fa635aSLeon Romanovsky goto err; 437b5fa635aSLeon Romanovsky 438b5fa635aSLeon Romanovsky if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || 439b5fa635aSLeon Romanovsky qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { 440b5fa635aSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, 441b5fa635aSLeon Romanovsky qp_attr.path_mig_state)) 442b5fa635aSLeon Romanovsky goto err; 443b5fa635aSLeon Romanovsky } 444b5fa635aSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) 445b5fa635aSLeon Romanovsky goto err; 446b5fa635aSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) 447b5fa635aSLeon Romanovsky goto err; 448b5fa635aSLeon Romanovsky 449c3d02788SLeon Romanovsky if (!rdma_is_kernel_res(res) && 450c3d02788SLeon Romanovsky nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) 451c3d02788SLeon Romanovsky goto err; 452c3d02788SLeon Romanovsky 45300313983SSteve Wise if (fill_res_name_pid(msg, res)) 454b5fa635aSLeon Romanovsky goto err; 45500313983SSteve Wise 45602da3750SLeon Romanovsky if (fill_res_entry(dev, msg, res)) 457da5c8507SSteve Wise goto err; 458da5c8507SSteve Wise 45900313983SSteve Wise return 0; 46000313983SSteve Wise 461c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE; 46200313983SSteve Wise } 46300313983SSteve Wise 464659067b0SLeon Romanovsky static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, 46500313983SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 46600313983SSteve Wise { 46700313983SSteve Wise struct rdma_id_private *id_priv = 46800313983SSteve Wise container_of(res, struct rdma_id_private, res); 46902da3750SLeon Romanovsky struct ib_device *dev = id_priv->id.device; 47000313983SSteve Wise struct rdma_cm_id *cm_id = &id_priv->id; 47100313983SSteve Wise 47200313983SSteve Wise if (port && port != cm_id->port_num) 47300313983SSteve Wise return 0; 47400313983SSteve Wise 47500313983SSteve Wise if (cm_id->port_num && 47600313983SSteve Wise nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) 47700313983SSteve Wise goto err; 47800313983SSteve Wise 47900313983SSteve Wise if (id_priv->qp_num) { 48000313983SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) 48100313983SSteve Wise goto err; 48200313983SSteve Wise if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) 483b5fa635aSLeon Romanovsky goto err; 484b5fa635aSLeon Romanovsky } 485b5fa635aSLeon Romanovsky 48600313983SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) 48700313983SSteve Wise goto err; 48800313983SSteve Wise 48900313983SSteve Wise if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) 49000313983SSteve Wise goto err; 49100313983SSteve Wise 49200313983SSteve Wise if (cm_id->route.addr.src_addr.ss_family && 49300313983SSteve Wise nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, 49400313983SSteve Wise sizeof(cm_id->route.addr.src_addr), 49500313983SSteve Wise &cm_id->route.addr.src_addr)) 49600313983SSteve Wise goto err; 49700313983SSteve Wise if (cm_id->route.addr.dst_addr.ss_family && 49800313983SSteve Wise nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, 49900313983SSteve Wise sizeof(cm_id->route.addr.dst_addr), 50000313983SSteve Wise &cm_id->route.addr.dst_addr)) 50100313983SSteve Wise goto err; 50200313983SSteve Wise 503517b773eSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) 504517b773eSLeon Romanovsky goto err; 505517b773eSLeon Romanovsky 50600313983SSteve Wise if (fill_res_name_pid(msg, res)) 50700313983SSteve Wise goto err; 50800313983SSteve Wise 50902da3750SLeon Romanovsky if (fill_res_entry(dev, msg, res)) 510da5c8507SSteve Wise goto err; 511da5c8507SSteve Wise 512b5fa635aSLeon Romanovsky return 0; 513b5fa635aSLeon Romanovsky 514c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE; 515b5fa635aSLeon Romanovsky } 516b5fa635aSLeon Romanovsky 517659067b0SLeon Romanovsky static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, 518a34fc089SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 519a34fc089SSteve Wise { 520a34fc089SSteve Wise struct ib_cq *cq = container_of(res, struct ib_cq, res); 52102da3750SLeon Romanovsky struct ib_device *dev = cq->device; 522a34fc089SSteve Wise 523a34fc089SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) 524a34fc089SSteve Wise goto err; 525a34fc089SSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 52625a0ad85SSteve Wise atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) 527a34fc089SSteve Wise goto err; 528a34fc089SSteve Wise 529a34fc089SSteve Wise /* Poll context is only valid for kernel CQs */ 530a34fc089SSteve Wise if (rdma_is_kernel_res(res) && 531a34fc089SSteve Wise nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) 532a34fc089SSteve Wise goto err; 533a34fc089SSteve Wise 534517b773eSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) 535517b773eSLeon Romanovsky goto err; 536c3d02788SLeon Romanovsky if (!rdma_is_kernel_res(res) && 537c3d02788SLeon Romanovsky nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, 538c3d02788SLeon Romanovsky cq->uobject->context->res.id)) 539c3d02788SLeon Romanovsky goto err; 540517b773eSLeon Romanovsky 541a34fc089SSteve Wise if (fill_res_name_pid(msg, res)) 542a34fc089SSteve Wise goto err; 543a34fc089SSteve Wise 54402da3750SLeon Romanovsky if (fill_res_entry(dev, msg, res)) 545da5c8507SSteve Wise goto err; 546da5c8507SSteve Wise 547a34fc089SSteve Wise return 0; 548a34fc089SSteve Wise 549c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE; 550a34fc089SSteve Wise } 551a34fc089SSteve Wise 552659067b0SLeon Romanovsky static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, 553fccec5b8SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 554fccec5b8SSteve Wise { 555fccec5b8SSteve Wise struct ib_mr *mr = container_of(res, struct ib_mr, res); 55602da3750SLeon Romanovsky struct ib_device *dev = mr->pd->device; 557fccec5b8SSteve Wise 558659067b0SLeon Romanovsky if (has_cap_net_admin) { 559fccec5b8SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) 560fccec5b8SSteve Wise goto err; 561fccec5b8SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) 562fccec5b8SSteve Wise goto err; 563fccec5b8SSteve Wise } 564fccec5b8SSteve Wise 56525a0ad85SSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 56625a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 567fccec5b8SSteve Wise goto err; 568fccec5b8SSteve Wise 569517b773eSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) 570517b773eSLeon Romanovsky goto err; 571517b773eSLeon Romanovsky 572c3d02788SLeon Romanovsky if (!rdma_is_kernel_res(res) && 573c3d02788SLeon Romanovsky nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) 574c3d02788SLeon Romanovsky goto err; 575c3d02788SLeon Romanovsky 576fccec5b8SSteve Wise if (fill_res_name_pid(msg, res)) 577fccec5b8SSteve Wise goto err; 578fccec5b8SSteve Wise 57902da3750SLeon Romanovsky if (fill_res_entry(dev, msg, res)) 580da5c8507SSteve Wise goto err; 581da5c8507SSteve Wise 582fccec5b8SSteve Wise return 0; 583fccec5b8SSteve Wise 584c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE; 585fccec5b8SSteve Wise } 586fccec5b8SSteve Wise 587659067b0SLeon Romanovsky static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, 58829cf1351SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 58929cf1351SSteve Wise { 59029cf1351SSteve Wise struct ib_pd *pd = container_of(res, struct ib_pd, res); 59102da3750SLeon Romanovsky struct ib_device *dev = pd->device; 59229cf1351SSteve Wise 593659067b0SLeon Romanovsky if (has_cap_net_admin) { 59429cf1351SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, 59529cf1351SSteve Wise pd->local_dma_lkey)) 59629cf1351SSteve Wise goto err; 59729cf1351SSteve Wise if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 59829cf1351SSteve Wise nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 59929cf1351SSteve Wise pd->unsafe_global_rkey)) 60029cf1351SSteve Wise goto err; 60129cf1351SSteve Wise } 60229cf1351SSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 60325a0ad85SSteve Wise atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 60429cf1351SSteve Wise goto err; 60529cf1351SSteve Wise 606517b773eSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) 607517b773eSLeon Romanovsky goto err; 608517b773eSLeon Romanovsky 609c3d02788SLeon Romanovsky if (!rdma_is_kernel_res(res) && 610c3d02788SLeon Romanovsky nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, 611c3d02788SLeon Romanovsky pd->uobject->context->res.id)) 612c3d02788SLeon Romanovsky goto err; 613c3d02788SLeon Romanovsky 61429cf1351SSteve Wise if (fill_res_name_pid(msg, res)) 61529cf1351SSteve Wise goto err; 61629cf1351SSteve Wise 61702da3750SLeon Romanovsky if (fill_res_entry(dev, msg, res)) 618da5c8507SSteve Wise goto err; 619da5c8507SSteve Wise 62029cf1351SSteve Wise return 0; 62129cf1351SSteve Wise 622c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE; 62329cf1351SSteve Wise } 62429cf1351SSteve Wise 625e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 626e5c9469eSLeon Romanovsky struct netlink_ext_ack *extack) 627e5c9469eSLeon Romanovsky { 628e5c9469eSLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 629e5c9469eSLeon Romanovsky struct ib_device *device; 630e5c9469eSLeon Romanovsky struct sk_buff *msg; 631e5c9469eSLeon Romanovsky u32 index; 632e5c9469eSLeon Romanovsky int err; 633e5c9469eSLeon Romanovsky 634e5c9469eSLeon Romanovsky err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 635e5c9469eSLeon Romanovsky nldev_policy, extack); 636e5c9469eSLeon Romanovsky if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 637e5c9469eSLeon Romanovsky return -EINVAL; 638e5c9469eSLeon Romanovsky 639e5c9469eSLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 640e5c9469eSLeon Romanovsky 64137eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 642e5c9469eSLeon Romanovsky if (!device) 643e5c9469eSLeon Romanovsky return -EINVAL; 644e5c9469eSLeon Romanovsky 645e5c9469eSLeon Romanovsky msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 646f8978bd9SLeon Romanovsky if (!msg) { 647f8978bd9SLeon Romanovsky err = -ENOMEM; 648f8978bd9SLeon Romanovsky goto err; 649f8978bd9SLeon Romanovsky } 650e5c9469eSLeon Romanovsky 651e5c9469eSLeon Romanovsky nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 652e5c9469eSLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 653e5c9469eSLeon Romanovsky 0, 0); 654e5c9469eSLeon Romanovsky 655e5c9469eSLeon Romanovsky err = fill_dev_info(msg, device); 656f8978bd9SLeon Romanovsky if (err) 657f8978bd9SLeon Romanovsky goto err_free; 658e5c9469eSLeon Romanovsky 659e5c9469eSLeon Romanovsky nlmsg_end(msg, nlh); 660e5c9469eSLeon Romanovsky 66101b67117SParav Pandit ib_device_put(device); 662e5c9469eSLeon Romanovsky return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 663f8978bd9SLeon Romanovsky 664f8978bd9SLeon Romanovsky err_free: 665f8978bd9SLeon Romanovsky nlmsg_free(msg); 666f8978bd9SLeon Romanovsky err: 66701b67117SParav Pandit ib_device_put(device); 668f8978bd9SLeon Romanovsky return err; 669e5c9469eSLeon Romanovsky } 670e5c9469eSLeon Romanovsky 67105d940d3SLeon Romanovsky static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 67205d940d3SLeon Romanovsky struct netlink_ext_ack *extack) 67305d940d3SLeon Romanovsky { 67405d940d3SLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 67505d940d3SLeon Romanovsky struct ib_device *device; 67605d940d3SLeon Romanovsky u32 index; 67705d940d3SLeon Romanovsky int err; 67805d940d3SLeon Romanovsky 67905d940d3SLeon Romanovsky err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, 68005d940d3SLeon Romanovsky extack); 68105d940d3SLeon Romanovsky if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 68205d940d3SLeon Romanovsky return -EINVAL; 68305d940d3SLeon Romanovsky 68405d940d3SLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 68537eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 68605d940d3SLeon Romanovsky if (!device) 68705d940d3SLeon Romanovsky return -EINVAL; 68805d940d3SLeon Romanovsky 68905d940d3SLeon Romanovsky if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { 69005d940d3SLeon Romanovsky char name[IB_DEVICE_NAME_MAX] = {}; 69105d940d3SLeon Romanovsky 69205d940d3SLeon Romanovsky nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 69305d940d3SLeon Romanovsky IB_DEVICE_NAME_MAX); 69405d940d3SLeon Romanovsky err = ib_device_rename(device, name); 6952e5b8a01SParav Pandit goto done; 69605d940d3SLeon Romanovsky } 69705d940d3SLeon Romanovsky 6982e5b8a01SParav Pandit if (tb[RDMA_NLDEV_NET_NS_FD]) { 6992e5b8a01SParav Pandit u32 ns_fd; 7002e5b8a01SParav Pandit 7012e5b8a01SParav Pandit ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]); 7022e5b8a01SParav Pandit err = ib_device_set_netns_put(skb, device, ns_fd); 7032e5b8a01SParav Pandit goto put_done; 7042e5b8a01SParav Pandit } 7052e5b8a01SParav Pandit 7062e5b8a01SParav Pandit done: 70701b67117SParav Pandit ib_device_put(device); 7082e5b8a01SParav Pandit put_done: 70905d940d3SLeon Romanovsky return err; 71005d940d3SLeon Romanovsky } 71105d940d3SLeon Romanovsky 712b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device, 713b4c598a6SLeon Romanovsky struct sk_buff *skb, 714b4c598a6SLeon Romanovsky struct netlink_callback *cb, 715b4c598a6SLeon Romanovsky unsigned int idx) 716b4c598a6SLeon Romanovsky { 717b4c598a6SLeon Romanovsky int start = cb->args[0]; 718b4c598a6SLeon Romanovsky struct nlmsghdr *nlh; 719b4c598a6SLeon Romanovsky 720b4c598a6SLeon Romanovsky if (idx < start) 721b4c598a6SLeon Romanovsky return 0; 722b4c598a6SLeon Romanovsky 723b4c598a6SLeon Romanovsky nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 724b4c598a6SLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 725b4c598a6SLeon Romanovsky 0, NLM_F_MULTI); 726b4c598a6SLeon Romanovsky 727b4c598a6SLeon Romanovsky if (fill_dev_info(skb, device)) { 728b4c598a6SLeon Romanovsky nlmsg_cancel(skb, nlh); 729b4c598a6SLeon Romanovsky goto out; 730b4c598a6SLeon Romanovsky } 731b4c598a6SLeon Romanovsky 732b4c598a6SLeon Romanovsky nlmsg_end(skb, nlh); 733b4c598a6SLeon Romanovsky 734b4c598a6SLeon Romanovsky idx++; 735b4c598a6SLeon Romanovsky 736b4c598a6SLeon Romanovsky out: cb->args[0] = idx; 737b4c598a6SLeon Romanovsky return skb->len; 738b4c598a6SLeon Romanovsky } 739b4c598a6SLeon Romanovsky 740b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 741b4c598a6SLeon Romanovsky { 742b4c598a6SLeon Romanovsky /* 743b4c598a6SLeon Romanovsky * There is no need to take lock, because 74437eeab55SParav Pandit * we are relying on ib_core's locking. 745b4c598a6SLeon Romanovsky */ 746b4c598a6SLeon Romanovsky return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); 747b4c598a6SLeon Romanovsky } 748b4c598a6SLeon Romanovsky 749c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 750c3f66f7bSLeon Romanovsky struct netlink_ext_ack *extack) 751c3f66f7bSLeon Romanovsky { 752c3f66f7bSLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 753c3f66f7bSLeon Romanovsky struct ib_device *device; 754c3f66f7bSLeon Romanovsky struct sk_buff *msg; 755c3f66f7bSLeon Romanovsky u32 index; 756c3f66f7bSLeon Romanovsky u32 port; 757c3f66f7bSLeon Romanovsky int err; 758c3f66f7bSLeon Romanovsky 759c3f66f7bSLeon Romanovsky err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 760c3f66f7bSLeon Romanovsky nldev_policy, extack); 761287683d0SLeon Romanovsky if (err || 762287683d0SLeon Romanovsky !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 763287683d0SLeon Romanovsky !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 764c3f66f7bSLeon Romanovsky return -EINVAL; 765c3f66f7bSLeon Romanovsky 766c3f66f7bSLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 76737eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 768c3f66f7bSLeon Romanovsky if (!device) 769c3f66f7bSLeon Romanovsky return -EINVAL; 770c3f66f7bSLeon Romanovsky 771c3f66f7bSLeon Romanovsky port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 772f8978bd9SLeon Romanovsky if (!rdma_is_port_valid(device, port)) { 773f8978bd9SLeon Romanovsky err = -EINVAL; 774f8978bd9SLeon Romanovsky goto err; 775f8978bd9SLeon Romanovsky } 776c3f66f7bSLeon Romanovsky 777c3f66f7bSLeon Romanovsky msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 778f8978bd9SLeon Romanovsky if (!msg) { 779f8978bd9SLeon Romanovsky err = -ENOMEM; 780f8978bd9SLeon Romanovsky goto err; 781f8978bd9SLeon Romanovsky } 782c3f66f7bSLeon Romanovsky 783c3f66f7bSLeon Romanovsky nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 784c3f66f7bSLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 785c3f66f7bSLeon Romanovsky 0, 0); 786c3f66f7bSLeon Romanovsky 7875b2cc79dSLeon Romanovsky err = fill_port_info(msg, device, port, sock_net(skb->sk)); 788f8978bd9SLeon Romanovsky if (err) 789f8978bd9SLeon Romanovsky goto err_free; 790c3f66f7bSLeon Romanovsky 791c3f66f7bSLeon Romanovsky nlmsg_end(msg, nlh); 79201b67117SParav Pandit ib_device_put(device); 793c3f66f7bSLeon Romanovsky 794c3f66f7bSLeon Romanovsky return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 795f8978bd9SLeon Romanovsky 796f8978bd9SLeon Romanovsky err_free: 797f8978bd9SLeon Romanovsky nlmsg_free(msg); 798f8978bd9SLeon Romanovsky err: 79901b67117SParav Pandit ib_device_put(device); 800f8978bd9SLeon Romanovsky return err; 801c3f66f7bSLeon Romanovsky } 802c3f66f7bSLeon Romanovsky 8037d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb, 8047d02f605SLeon Romanovsky struct netlink_callback *cb) 8057d02f605SLeon Romanovsky { 8067d02f605SLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 8077d02f605SLeon Romanovsky struct ib_device *device; 8087d02f605SLeon Romanovsky int start = cb->args[0]; 8097d02f605SLeon Romanovsky struct nlmsghdr *nlh; 8107d02f605SLeon Romanovsky u32 idx = 0; 8117d02f605SLeon Romanovsky u32 ifindex; 8127d02f605SLeon Romanovsky int err; 813ea1075edSJason Gunthorpe unsigned int p; 8147d02f605SLeon Romanovsky 8157d02f605SLeon Romanovsky err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 8167d02f605SLeon Romanovsky nldev_policy, NULL); 8177d02f605SLeon Romanovsky if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 8187d02f605SLeon Romanovsky return -EINVAL; 8197d02f605SLeon Romanovsky 8207d02f605SLeon Romanovsky ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 82137eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), ifindex); 8227d02f605SLeon Romanovsky if (!device) 8237d02f605SLeon Romanovsky return -EINVAL; 8247d02f605SLeon Romanovsky 825ea1075edSJason Gunthorpe rdma_for_each_port (device, p) { 8267d02f605SLeon Romanovsky /* 8277d02f605SLeon Romanovsky * The dumpit function returns all information from specific 8287d02f605SLeon Romanovsky * index. This specific index is taken from the netlink 8297d02f605SLeon Romanovsky * messages request sent by user and it is available 8307d02f605SLeon Romanovsky * in cb->args[0]. 8317d02f605SLeon Romanovsky * 8327d02f605SLeon Romanovsky * Usually, the user doesn't fill this field and it causes 8337d02f605SLeon Romanovsky * to return everything. 8347d02f605SLeon Romanovsky * 8357d02f605SLeon Romanovsky */ 8367d02f605SLeon Romanovsky if (idx < start) { 8377d02f605SLeon Romanovsky idx++; 8387d02f605SLeon Romanovsky continue; 8397d02f605SLeon Romanovsky } 8407d02f605SLeon Romanovsky 8417d02f605SLeon Romanovsky nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 8427d02f605SLeon Romanovsky cb->nlh->nlmsg_seq, 8437d02f605SLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 8447d02f605SLeon Romanovsky RDMA_NLDEV_CMD_PORT_GET), 8457d02f605SLeon Romanovsky 0, NLM_F_MULTI); 8467d02f605SLeon Romanovsky 8475b2cc79dSLeon Romanovsky if (fill_port_info(skb, device, p, sock_net(skb->sk))) { 8487d02f605SLeon Romanovsky nlmsg_cancel(skb, nlh); 8497d02f605SLeon Romanovsky goto out; 8507d02f605SLeon Romanovsky } 8517d02f605SLeon Romanovsky idx++; 8527d02f605SLeon Romanovsky nlmsg_end(skb, nlh); 8537d02f605SLeon Romanovsky } 8547d02f605SLeon Romanovsky 855f8978bd9SLeon Romanovsky out: 85601b67117SParav Pandit ib_device_put(device); 857f8978bd9SLeon Romanovsky cb->args[0] = idx; 8587d02f605SLeon Romanovsky return skb->len; 8597d02f605SLeon Romanovsky } 8607d02f605SLeon Romanovsky 861bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 862bf3c5a93SLeon Romanovsky struct netlink_ext_ack *extack) 863bf3c5a93SLeon Romanovsky { 864bf3c5a93SLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 865bf3c5a93SLeon Romanovsky struct ib_device *device; 866bf3c5a93SLeon Romanovsky struct sk_buff *msg; 867bf3c5a93SLeon Romanovsky u32 index; 868bf3c5a93SLeon Romanovsky int ret; 869bf3c5a93SLeon Romanovsky 870bf3c5a93SLeon Romanovsky ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 871bf3c5a93SLeon Romanovsky nldev_policy, extack); 872bf3c5a93SLeon Romanovsky if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 873bf3c5a93SLeon Romanovsky return -EINVAL; 874bf3c5a93SLeon Romanovsky 875bf3c5a93SLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 87637eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 877bf3c5a93SLeon Romanovsky if (!device) 878bf3c5a93SLeon Romanovsky return -EINVAL; 879bf3c5a93SLeon Romanovsky 880bf3c5a93SLeon Romanovsky msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 881f34727a1SDan Carpenter if (!msg) { 882f34727a1SDan Carpenter ret = -ENOMEM; 883bf3c5a93SLeon Romanovsky goto err; 884f34727a1SDan Carpenter } 885bf3c5a93SLeon Romanovsky 886bf3c5a93SLeon Romanovsky nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 887bf3c5a93SLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 888bf3c5a93SLeon Romanovsky 0, 0); 889bf3c5a93SLeon Romanovsky 890bf3c5a93SLeon Romanovsky ret = fill_res_info(msg, device); 891bf3c5a93SLeon Romanovsky if (ret) 892bf3c5a93SLeon Romanovsky goto err_free; 893bf3c5a93SLeon Romanovsky 894bf3c5a93SLeon Romanovsky nlmsg_end(msg, nlh); 89501b67117SParav Pandit ib_device_put(device); 896bf3c5a93SLeon Romanovsky return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 897bf3c5a93SLeon Romanovsky 898bf3c5a93SLeon Romanovsky err_free: 899bf3c5a93SLeon Romanovsky nlmsg_free(msg); 900bf3c5a93SLeon Romanovsky err: 90101b67117SParav Pandit ib_device_put(device); 902bf3c5a93SLeon Romanovsky return ret; 903bf3c5a93SLeon Romanovsky } 904bf3c5a93SLeon Romanovsky 905bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device, 906bf3c5a93SLeon Romanovsky struct sk_buff *skb, 907bf3c5a93SLeon Romanovsky struct netlink_callback *cb, 908bf3c5a93SLeon Romanovsky unsigned int idx) 909bf3c5a93SLeon Romanovsky { 910bf3c5a93SLeon Romanovsky int start = cb->args[0]; 911bf3c5a93SLeon Romanovsky struct nlmsghdr *nlh; 912bf3c5a93SLeon Romanovsky 913bf3c5a93SLeon Romanovsky if (idx < start) 914bf3c5a93SLeon Romanovsky return 0; 915bf3c5a93SLeon Romanovsky 916bf3c5a93SLeon Romanovsky nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 917bf3c5a93SLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 918bf3c5a93SLeon Romanovsky 0, NLM_F_MULTI); 919bf3c5a93SLeon Romanovsky 920bf3c5a93SLeon Romanovsky if (fill_res_info(skb, device)) { 921bf3c5a93SLeon Romanovsky nlmsg_cancel(skb, nlh); 922bf3c5a93SLeon Romanovsky goto out; 923bf3c5a93SLeon Romanovsky } 924bf3c5a93SLeon Romanovsky nlmsg_end(skb, nlh); 925bf3c5a93SLeon Romanovsky 926bf3c5a93SLeon Romanovsky idx++; 927bf3c5a93SLeon Romanovsky 928bf3c5a93SLeon Romanovsky out: 929bf3c5a93SLeon Romanovsky cb->args[0] = idx; 930bf3c5a93SLeon Romanovsky return skb->len; 931bf3c5a93SLeon Romanovsky } 932bf3c5a93SLeon Romanovsky 933bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb, 934bf3c5a93SLeon Romanovsky struct netlink_callback *cb) 935bf3c5a93SLeon Romanovsky { 936bf3c5a93SLeon Romanovsky return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); 937bf3c5a93SLeon Romanovsky } 938bf3c5a93SLeon Romanovsky 939d12ff624SSteve Wise struct nldev_fill_res_entry { 940659067b0SLeon Romanovsky int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin, 941d12ff624SSteve Wise struct rdma_restrack_entry *res, u32 port); 942d12ff624SSteve Wise enum rdma_nldev_attr nldev_attr; 943d12ff624SSteve Wise enum rdma_nldev_command nldev_cmd; 944c5dfe0eaSLeon Romanovsky u8 flags; 945c5dfe0eaSLeon Romanovsky u32 entry; 946c5dfe0eaSLeon Romanovsky u32 id; 947c5dfe0eaSLeon Romanovsky }; 948c5dfe0eaSLeon Romanovsky 949c5dfe0eaSLeon Romanovsky enum nldev_res_flags { 950c5dfe0eaSLeon Romanovsky NLDEV_PER_DEV = 1 << 0, 951d12ff624SSteve Wise }; 952d12ff624SSteve Wise 953d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { 954d12ff624SSteve Wise [RDMA_RESTRACK_QP] = { 955d12ff624SSteve Wise .fill_res_func = fill_res_qp_entry, 956d12ff624SSteve Wise .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET, 957d12ff624SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, 958c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, 9591b8b7788SLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_LQPN, 960d12ff624SSteve Wise }, 96100313983SSteve Wise [RDMA_RESTRACK_CM_ID] = { 96200313983SSteve Wise .fill_res_func = fill_res_cm_id_entry, 96300313983SSteve Wise .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET, 96400313983SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, 965c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, 966517b773eSLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_CM_IDN, 96700313983SSteve Wise }, 968a34fc089SSteve Wise [RDMA_RESTRACK_CQ] = { 969a34fc089SSteve Wise .fill_res_func = fill_res_cq_entry, 970a34fc089SSteve Wise .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET, 971a34fc089SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, 972c5dfe0eaSLeon Romanovsky .flags = NLDEV_PER_DEV, 973c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, 974517b773eSLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_CQN, 975a34fc089SSteve Wise }, 976fccec5b8SSteve Wise [RDMA_RESTRACK_MR] = { 977fccec5b8SSteve Wise .fill_res_func = fill_res_mr_entry, 978fccec5b8SSteve Wise .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET, 979fccec5b8SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, 980c5dfe0eaSLeon Romanovsky .flags = NLDEV_PER_DEV, 981c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, 982517b773eSLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_MRN, 983fccec5b8SSteve Wise }, 98429cf1351SSteve Wise [RDMA_RESTRACK_PD] = { 98529cf1351SSteve Wise .fill_res_func = fill_res_pd_entry, 98629cf1351SSteve Wise .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET, 98729cf1351SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, 988c5dfe0eaSLeon Romanovsky .flags = NLDEV_PER_DEV, 989c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, 990517b773eSLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_PDN, 99129cf1351SSteve Wise }, 992d12ff624SSteve Wise }; 993d12ff624SSteve Wise 9948be565e6SLeon Romanovsky static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res) 9958be565e6SLeon Romanovsky { 9968be565e6SLeon Romanovsky /* 9978be565e6SLeon Romanovsky * 1. Kern resources should be visible in init name space only 9988be565e6SLeon Romanovsky * 2. Present only resources visible in the current namespace 9998be565e6SLeon Romanovsky */ 10008be565e6SLeon Romanovsky if (rdma_is_kernel_res(res)) 10018be565e6SLeon Romanovsky return task_active_pid_ns(current) == &init_pid_ns; 10028be565e6SLeon Romanovsky return task_active_pid_ns(current) == task_active_pid_ns(res->task); 10038be565e6SLeon Romanovsky } 10048be565e6SLeon Romanovsky 1005c5dfe0eaSLeon Romanovsky static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1006c5dfe0eaSLeon Romanovsky struct netlink_ext_ack *extack, 1007c5dfe0eaSLeon Romanovsky enum rdma_restrack_type res_type) 1008c5dfe0eaSLeon Romanovsky { 1009c5dfe0eaSLeon Romanovsky const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 1010c5dfe0eaSLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1011c5dfe0eaSLeon Romanovsky struct rdma_restrack_entry *res; 1012c5dfe0eaSLeon Romanovsky struct ib_device *device; 1013c5dfe0eaSLeon Romanovsky u32 index, id, port = 0; 1014c5dfe0eaSLeon Romanovsky bool has_cap_net_admin; 1015c5dfe0eaSLeon Romanovsky struct sk_buff *msg; 1016c5dfe0eaSLeon Romanovsky int ret; 1017c5dfe0eaSLeon Romanovsky 1018c5dfe0eaSLeon Romanovsky ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1019c5dfe0eaSLeon Romanovsky nldev_policy, extack); 1020c5dfe0eaSLeon Romanovsky if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) 1021c5dfe0eaSLeon Romanovsky return -EINVAL; 1022c5dfe0eaSLeon Romanovsky 1023c5dfe0eaSLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 102437eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 1025c5dfe0eaSLeon Romanovsky if (!device) 1026c5dfe0eaSLeon Romanovsky return -EINVAL; 1027c5dfe0eaSLeon Romanovsky 1028c5dfe0eaSLeon Romanovsky if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1029c5dfe0eaSLeon Romanovsky port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1030c5dfe0eaSLeon Romanovsky if (!rdma_is_port_valid(device, port)) { 1031c5dfe0eaSLeon Romanovsky ret = -EINVAL; 1032c5dfe0eaSLeon Romanovsky goto err; 1033c5dfe0eaSLeon Romanovsky } 1034c5dfe0eaSLeon Romanovsky } 1035c5dfe0eaSLeon Romanovsky 1036c5dfe0eaSLeon Romanovsky if ((port && fe->flags & NLDEV_PER_DEV) || 1037c5dfe0eaSLeon Romanovsky (!port && ~fe->flags & NLDEV_PER_DEV)) { 1038c5dfe0eaSLeon Romanovsky ret = -EINVAL; 1039c5dfe0eaSLeon Romanovsky goto err; 1040c5dfe0eaSLeon Romanovsky } 1041c5dfe0eaSLeon Romanovsky 1042c5dfe0eaSLeon Romanovsky id = nla_get_u32(tb[fe->id]); 1043c5dfe0eaSLeon Romanovsky res = rdma_restrack_get_byid(device, res_type, id); 1044c5dfe0eaSLeon Romanovsky if (IS_ERR(res)) { 1045c5dfe0eaSLeon Romanovsky ret = PTR_ERR(res); 1046c5dfe0eaSLeon Romanovsky goto err; 1047c5dfe0eaSLeon Romanovsky } 1048c5dfe0eaSLeon Romanovsky 1049c5dfe0eaSLeon Romanovsky if (!is_visible_in_pid_ns(res)) { 1050c5dfe0eaSLeon Romanovsky ret = -ENOENT; 1051c5dfe0eaSLeon Romanovsky goto err_get; 1052c5dfe0eaSLeon Romanovsky } 1053c5dfe0eaSLeon Romanovsky 1054c5dfe0eaSLeon Romanovsky msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1055c5dfe0eaSLeon Romanovsky if (!msg) { 1056c5dfe0eaSLeon Romanovsky ret = -ENOMEM; 1057c5dfe0eaSLeon Romanovsky goto err; 1058c5dfe0eaSLeon Romanovsky } 1059c5dfe0eaSLeon Romanovsky 1060c5dfe0eaSLeon Romanovsky nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1061c5dfe0eaSLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd), 1062c5dfe0eaSLeon Romanovsky 0, 0); 1063c5dfe0eaSLeon Romanovsky 1064c5dfe0eaSLeon Romanovsky if (fill_nldev_handle(msg, device)) { 1065c5dfe0eaSLeon Romanovsky ret = -EMSGSIZE; 1066c5dfe0eaSLeon Romanovsky goto err_free; 1067c5dfe0eaSLeon Romanovsky } 1068c5dfe0eaSLeon Romanovsky 1069c5dfe0eaSLeon Romanovsky has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); 1070c5dfe0eaSLeon Romanovsky ret = fe->fill_res_func(msg, has_cap_net_admin, res, port); 1071c5dfe0eaSLeon Romanovsky rdma_restrack_put(res); 1072c5dfe0eaSLeon Romanovsky if (ret) 1073c5dfe0eaSLeon Romanovsky goto err_free; 1074c5dfe0eaSLeon Romanovsky 1075c5dfe0eaSLeon Romanovsky nlmsg_end(msg, nlh); 1076c5dfe0eaSLeon Romanovsky ib_device_put(device); 1077c5dfe0eaSLeon Romanovsky return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 1078c5dfe0eaSLeon Romanovsky 1079c5dfe0eaSLeon Romanovsky err_free: 1080c5dfe0eaSLeon Romanovsky nlmsg_free(msg); 1081c5dfe0eaSLeon Romanovsky err_get: 1082c5dfe0eaSLeon Romanovsky rdma_restrack_put(res); 1083c5dfe0eaSLeon Romanovsky err: 1084c5dfe0eaSLeon Romanovsky ib_device_put(device); 1085c5dfe0eaSLeon Romanovsky return ret; 1086c5dfe0eaSLeon Romanovsky } 1087c5dfe0eaSLeon Romanovsky 1088d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb, 1089d12ff624SSteve Wise struct netlink_callback *cb, 1090d12ff624SSteve Wise enum rdma_restrack_type res_type) 1091b5fa635aSLeon Romanovsky { 1092d12ff624SSteve Wise const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 1093b5fa635aSLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1094b5fa635aSLeon Romanovsky struct rdma_restrack_entry *res; 10957c77c6a9SLeon Romanovsky struct rdma_restrack_root *rt; 1096b5fa635aSLeon Romanovsky int err, ret = 0, idx = 0; 1097b5fa635aSLeon Romanovsky struct nlattr *table_attr; 1098c5dfe0eaSLeon Romanovsky struct nlattr *entry_attr; 1099b5fa635aSLeon Romanovsky struct ib_device *device; 1100b5fa635aSLeon Romanovsky int start = cb->args[0]; 1101659067b0SLeon Romanovsky bool has_cap_net_admin; 1102b5fa635aSLeon Romanovsky struct nlmsghdr *nlh; 1103fd47c2f9SLeon Romanovsky unsigned long id; 1104b5fa635aSLeon Romanovsky u32 index, port = 0; 1105d12ff624SSteve Wise bool filled = false; 1106b5fa635aSLeon Romanovsky 1107b5fa635aSLeon Romanovsky err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1108b5fa635aSLeon Romanovsky nldev_policy, NULL); 1109b5fa635aSLeon Romanovsky /* 1110d12ff624SSteve Wise * Right now, we are expecting the device index to get res information, 1111b5fa635aSLeon Romanovsky * but it is possible to extend this code to return all devices in 1112b5fa635aSLeon Romanovsky * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. 1113b5fa635aSLeon Romanovsky * if it doesn't exist, we will iterate over all devices. 1114b5fa635aSLeon Romanovsky * 1115b5fa635aSLeon Romanovsky * But it is not needed for now. 1116b5fa635aSLeon Romanovsky */ 1117b5fa635aSLeon Romanovsky if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1118b5fa635aSLeon Romanovsky return -EINVAL; 1119b5fa635aSLeon Romanovsky 1120b5fa635aSLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 112137eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 1122b5fa635aSLeon Romanovsky if (!device) 1123b5fa635aSLeon Romanovsky return -EINVAL; 1124b5fa635aSLeon Romanovsky 1125b5fa635aSLeon Romanovsky /* 1126b5fa635aSLeon Romanovsky * If no PORT_INDEX is supplied, we will return all QPs from that device 1127b5fa635aSLeon Romanovsky */ 1128b5fa635aSLeon Romanovsky if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1129b5fa635aSLeon Romanovsky port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1130b5fa635aSLeon Romanovsky if (!rdma_is_port_valid(device, port)) { 1131b5fa635aSLeon Romanovsky ret = -EINVAL; 1132b5fa635aSLeon Romanovsky goto err_index; 1133b5fa635aSLeon Romanovsky } 1134b5fa635aSLeon Romanovsky } 1135b5fa635aSLeon Romanovsky 1136b5fa635aSLeon Romanovsky nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1137d12ff624SSteve Wise RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd), 1138b5fa635aSLeon Romanovsky 0, NLM_F_MULTI); 1139b5fa635aSLeon Romanovsky 1140b5fa635aSLeon Romanovsky if (fill_nldev_handle(skb, device)) { 1141b5fa635aSLeon Romanovsky ret = -EMSGSIZE; 1142b5fa635aSLeon Romanovsky goto err; 1143b5fa635aSLeon Romanovsky } 1144b5fa635aSLeon Romanovsky 1145d12ff624SSteve Wise table_attr = nla_nest_start(skb, fe->nldev_attr); 1146b5fa635aSLeon Romanovsky if (!table_attr) { 1147b5fa635aSLeon Romanovsky ret = -EMSGSIZE; 1148b5fa635aSLeon Romanovsky goto err; 1149b5fa635aSLeon Romanovsky } 1150b5fa635aSLeon Romanovsky 1151659067b0SLeon Romanovsky has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN); 1152659067b0SLeon Romanovsky 11537c77c6a9SLeon Romanovsky rt = &device->res[res_type]; 11547c77c6a9SLeon Romanovsky xa_lock(&rt->xa); 1155fd47c2f9SLeon Romanovsky /* 1156fd47c2f9SLeon Romanovsky * FIXME: if the skip ahead is something common this loop should 1157fd47c2f9SLeon Romanovsky * use xas_for_each & xas_pause to optimize, we can have a lot of 1158fd47c2f9SLeon Romanovsky * objects. 1159fd47c2f9SLeon Romanovsky */ 11607c77c6a9SLeon Romanovsky xa_for_each(&rt->xa, id, res) { 11618be565e6SLeon Romanovsky if (!is_visible_in_pid_ns(res)) 1162f2a0e45fSLeon Romanovsky continue; 1163b5fa635aSLeon Romanovsky 1164f2a0e45fSLeon Romanovsky if (idx < start || !rdma_restrack_get(res)) 1165b5fa635aSLeon Romanovsky goto next; 1166b5fa635aSLeon Romanovsky 11677c77c6a9SLeon Romanovsky xa_unlock(&rt->xa); 11687c77c6a9SLeon Romanovsky 1169d12ff624SSteve Wise filled = true; 1170b5fa635aSLeon Romanovsky 1171c5dfe0eaSLeon Romanovsky entry_attr = nla_nest_start(skb, fe->entry); 1172c5dfe0eaSLeon Romanovsky if (!entry_attr) { 1173c5dfe0eaSLeon Romanovsky ret = -EMSGSIZE; 1174c5dfe0eaSLeon Romanovsky rdma_restrack_put(res); 11757c77c6a9SLeon Romanovsky goto msg_full; 1176c5dfe0eaSLeon Romanovsky } 1177c5dfe0eaSLeon Romanovsky 1178659067b0SLeon Romanovsky ret = fe->fill_res_func(skb, has_cap_net_admin, res, port); 1179b5fa635aSLeon Romanovsky rdma_restrack_put(res); 1180b5fa635aSLeon Romanovsky 11817c77c6a9SLeon Romanovsky if (ret) { 1182c5dfe0eaSLeon Romanovsky nla_nest_cancel(skb, entry_attr); 1183b5fa635aSLeon Romanovsky if (ret == -EMSGSIZE) 11847c77c6a9SLeon Romanovsky goto msg_full; 1185c5dfe0eaSLeon Romanovsky if (ret == -EAGAIN) 11867c77c6a9SLeon Romanovsky goto again; 1187b5fa635aSLeon Romanovsky goto res_err; 11887c77c6a9SLeon Romanovsky } 1189c5dfe0eaSLeon Romanovsky nla_nest_end(skb, entry_attr); 11907c77c6a9SLeon Romanovsky again: xa_lock(&rt->xa); 1191b5fa635aSLeon Romanovsky next: idx++; 1192b5fa635aSLeon Romanovsky } 11937c77c6a9SLeon Romanovsky xa_unlock(&rt->xa); 1194b5fa635aSLeon Romanovsky 11957c77c6a9SLeon Romanovsky msg_full: 1196b5fa635aSLeon Romanovsky nla_nest_end(skb, table_attr); 1197b5fa635aSLeon Romanovsky nlmsg_end(skb, nlh); 1198b5fa635aSLeon Romanovsky cb->args[0] = idx; 1199b5fa635aSLeon Romanovsky 1200b5fa635aSLeon Romanovsky /* 1201d12ff624SSteve Wise * No more entries to fill, cancel the message and 1202b5fa635aSLeon Romanovsky * return 0 to mark end of dumpit. 1203b5fa635aSLeon Romanovsky */ 1204d12ff624SSteve Wise if (!filled) 1205b5fa635aSLeon Romanovsky goto err; 1206b5fa635aSLeon Romanovsky 120701b67117SParav Pandit ib_device_put(device); 1208b5fa635aSLeon Romanovsky return skb->len; 1209b5fa635aSLeon Romanovsky 1210b5fa635aSLeon Romanovsky res_err: 1211b5fa635aSLeon Romanovsky nla_nest_cancel(skb, table_attr); 1212b5fa635aSLeon Romanovsky 1213b5fa635aSLeon Romanovsky err: 1214b5fa635aSLeon Romanovsky nlmsg_cancel(skb, nlh); 1215b5fa635aSLeon Romanovsky 1216b5fa635aSLeon Romanovsky err_index: 121701b67117SParav Pandit ib_device_put(device); 1218b5fa635aSLeon Romanovsky return ret; 1219b5fa635aSLeon Romanovsky } 1220b5fa635aSLeon Romanovsky 1221f732e713SLeon Romanovsky #define RES_GET_FUNCS(name, type) \ 1222f732e713SLeon Romanovsky static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ 1223f732e713SLeon Romanovsky struct netlink_callback *cb) \ 1224f732e713SLeon Romanovsky { \ 1225f732e713SLeon Romanovsky return res_get_common_dumpit(skb, cb, type); \ 1226c5dfe0eaSLeon Romanovsky } \ 1227c5dfe0eaSLeon Romanovsky static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ 1228c5dfe0eaSLeon Romanovsky struct nlmsghdr *nlh, \ 1229c5dfe0eaSLeon Romanovsky struct netlink_ext_ack *extack) \ 1230c5dfe0eaSLeon Romanovsky { \ 1231c5dfe0eaSLeon Romanovsky return res_get_common_doit(skb, nlh, extack, type); \ 1232d12ff624SSteve Wise } 1233d12ff624SSteve Wise 1234f732e713SLeon Romanovsky RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); 1235f732e713SLeon Romanovsky RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); 1236f732e713SLeon Romanovsky RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); 1237f732e713SLeon Romanovsky RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); 1238f732e713SLeon Romanovsky RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); 123929cf1351SSteve Wise 12403856ec4bSSteve Wise static LIST_HEAD(link_ops); 12413856ec4bSSteve Wise static DECLARE_RWSEM(link_ops_rwsem); 12423856ec4bSSteve Wise 12433856ec4bSSteve Wise static const struct rdma_link_ops *link_ops_get(const char *type) 12443856ec4bSSteve Wise { 12453856ec4bSSteve Wise const struct rdma_link_ops *ops; 12463856ec4bSSteve Wise 12473856ec4bSSteve Wise list_for_each_entry(ops, &link_ops, list) { 12483856ec4bSSteve Wise if (!strcmp(ops->type, type)) 12493856ec4bSSteve Wise goto out; 12503856ec4bSSteve Wise } 12513856ec4bSSteve Wise ops = NULL; 12523856ec4bSSteve Wise out: 12533856ec4bSSteve Wise return ops; 12543856ec4bSSteve Wise } 12553856ec4bSSteve Wise 12563856ec4bSSteve Wise void rdma_link_register(struct rdma_link_ops *ops) 12573856ec4bSSteve Wise { 12583856ec4bSSteve Wise down_write(&link_ops_rwsem); 1259afc1990eSDan Carpenter if (WARN_ON_ONCE(link_ops_get(ops->type))) 12603856ec4bSSteve Wise goto out; 12613856ec4bSSteve Wise list_add(&ops->list, &link_ops); 12623856ec4bSSteve Wise out: 12633856ec4bSSteve Wise up_write(&link_ops_rwsem); 12643856ec4bSSteve Wise } 12653856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_register); 12663856ec4bSSteve Wise 12673856ec4bSSteve Wise void rdma_link_unregister(struct rdma_link_ops *ops) 12683856ec4bSSteve Wise { 12693856ec4bSSteve Wise down_write(&link_ops_rwsem); 12703856ec4bSSteve Wise list_del(&ops->list); 12713856ec4bSSteve Wise up_write(&link_ops_rwsem); 12723856ec4bSSteve Wise } 12733856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_unregister); 12743856ec4bSSteve Wise 12753856ec4bSSteve Wise static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 12763856ec4bSSteve Wise struct netlink_ext_ack *extack) 12773856ec4bSSteve Wise { 12783856ec4bSSteve Wise struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 12793856ec4bSSteve Wise char ibdev_name[IB_DEVICE_NAME_MAX]; 12803856ec4bSSteve Wise const struct rdma_link_ops *ops; 12813856ec4bSSteve Wise char ndev_name[IFNAMSIZ]; 12823856ec4bSSteve Wise struct net_device *ndev; 12833856ec4bSSteve Wise char type[IFNAMSIZ]; 12843856ec4bSSteve Wise int err; 12853856ec4bSSteve Wise 12863856ec4bSSteve Wise err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 12873856ec4bSSteve Wise nldev_policy, extack); 12883856ec4bSSteve Wise if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || 12893856ec4bSSteve Wise !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) 12903856ec4bSSteve Wise return -EINVAL; 12913856ec4bSSteve Wise 12923856ec4bSSteve Wise nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 12933856ec4bSSteve Wise sizeof(ibdev_name)); 12943856ec4bSSteve Wise if (strchr(ibdev_name, '%')) 12953856ec4bSSteve Wise return -EINVAL; 12963856ec4bSSteve Wise 12973856ec4bSSteve Wise nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); 12983856ec4bSSteve Wise nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME], 12993856ec4bSSteve Wise sizeof(ndev_name)); 13003856ec4bSSteve Wise 13013856ec4bSSteve Wise ndev = dev_get_by_name(&init_net, ndev_name); 13023856ec4bSSteve Wise if (!ndev) 13033856ec4bSSteve Wise return -ENODEV; 13043856ec4bSSteve Wise 13053856ec4bSSteve Wise down_read(&link_ops_rwsem); 13063856ec4bSSteve Wise ops = link_ops_get(type); 13073856ec4bSSteve Wise #ifdef CONFIG_MODULES 13083856ec4bSSteve Wise if (!ops) { 13093856ec4bSSteve Wise up_read(&link_ops_rwsem); 13103856ec4bSSteve Wise request_module("rdma-link-%s", type); 13113856ec4bSSteve Wise down_read(&link_ops_rwsem); 13123856ec4bSSteve Wise ops = link_ops_get(type); 13133856ec4bSSteve Wise } 13143856ec4bSSteve Wise #endif 13153856ec4bSSteve Wise err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL; 13163856ec4bSSteve Wise up_read(&link_ops_rwsem); 13173856ec4bSSteve Wise dev_put(ndev); 13183856ec4bSSteve Wise 13193856ec4bSSteve Wise return err; 13203856ec4bSSteve Wise } 13213856ec4bSSteve Wise 13223856ec4bSSteve Wise static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 13233856ec4bSSteve Wise struct netlink_ext_ack *extack) 13243856ec4bSSteve Wise { 13253856ec4bSSteve Wise struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 13263856ec4bSSteve Wise struct ib_device *device; 13273856ec4bSSteve Wise u32 index; 13283856ec4bSSteve Wise int err; 13293856ec4bSSteve Wise 13303856ec4bSSteve Wise err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 13313856ec4bSSteve Wise nldev_policy, extack); 13323856ec4bSSteve Wise if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 13333856ec4bSSteve Wise return -EINVAL; 13343856ec4bSSteve Wise 13353856ec4bSSteve Wise index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 133637eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 13373856ec4bSSteve Wise if (!device) 13383856ec4bSSteve Wise return -EINVAL; 13393856ec4bSSteve Wise 13403856ec4bSSteve Wise if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) { 13413856ec4bSSteve Wise ib_device_put(device); 13423856ec4bSSteve Wise return -EINVAL; 13433856ec4bSSteve Wise } 13443856ec4bSSteve Wise 13453856ec4bSSteve Wise ib_unregister_device_and_put(device); 13463856ec4bSSteve Wise return 0; 13473856ec4bSSteve Wise } 13483856ec4bSSteve Wise 1349cb7e0e13SParav Pandit static int nldev_get_sys_get_dumpit(struct sk_buff *skb, 1350cb7e0e13SParav Pandit struct netlink_callback *cb) 1351cb7e0e13SParav Pandit { 1352cb7e0e13SParav Pandit struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1353cb7e0e13SParav Pandit struct nlmsghdr *nlh; 1354cb7e0e13SParav Pandit int err; 1355cb7e0e13SParav Pandit 1356cb7e0e13SParav Pandit err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1357cb7e0e13SParav Pandit nldev_policy, NULL); 1358cb7e0e13SParav Pandit if (err) 1359cb7e0e13SParav Pandit return err; 1360cb7e0e13SParav Pandit 1361cb7e0e13SParav Pandit nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1362cb7e0e13SParav Pandit RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1363cb7e0e13SParav Pandit RDMA_NLDEV_CMD_SYS_GET), 1364cb7e0e13SParav Pandit 0, 0); 1365cb7e0e13SParav Pandit 1366cb7e0e13SParav Pandit err = nla_put_u8(skb, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, 1367cb7e0e13SParav Pandit (u8)ib_devices_shared_netns); 1368cb7e0e13SParav Pandit if (err) { 1369cb7e0e13SParav Pandit nlmsg_cancel(skb, nlh); 1370cb7e0e13SParav Pandit return err; 1371cb7e0e13SParav Pandit } 1372cb7e0e13SParav Pandit 1373cb7e0e13SParav Pandit nlmsg_end(skb, nlh); 1374cb7e0e13SParav Pandit return skb->len; 1375cb7e0e13SParav Pandit } 1376cb7e0e13SParav Pandit 13772b34c558SParav Pandit static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 13782b34c558SParav Pandit struct netlink_ext_ack *extack) 13792b34c558SParav Pandit { 13802b34c558SParav Pandit struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 13812b34c558SParav Pandit u8 enable; 13822b34c558SParav Pandit int err; 13832b34c558SParav Pandit 13842b34c558SParav Pandit err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 13852b34c558SParav Pandit nldev_policy, extack); 13862b34c558SParav Pandit if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]) 13872b34c558SParav Pandit return -EINVAL; 13882b34c558SParav Pandit 13892b34c558SParav Pandit enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]); 13902b34c558SParav Pandit /* Only 0 and 1 are supported */ 13912b34c558SParav Pandit if (enable > 1) 13922b34c558SParav Pandit return -EINVAL; 13932b34c558SParav Pandit 13942b34c558SParav Pandit err = rdma_compatdev_set(enable); 13952b34c558SParav Pandit return err; 13962b34c558SParav Pandit } 13972b34c558SParav Pandit 1398d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { 1399b4c598a6SLeon Romanovsky [RDMA_NLDEV_CMD_GET] = { 1400e5c9469eSLeon Romanovsky .doit = nldev_get_doit, 1401b4c598a6SLeon Romanovsky .dump = nldev_get_dumpit, 1402b4c598a6SLeon Romanovsky }, 140305d940d3SLeon Romanovsky [RDMA_NLDEV_CMD_SET] = { 140405d940d3SLeon Romanovsky .doit = nldev_set_doit, 140505d940d3SLeon Romanovsky .flags = RDMA_NL_ADMIN_PERM, 140605d940d3SLeon Romanovsky }, 14073856ec4bSSteve Wise [RDMA_NLDEV_CMD_NEWLINK] = { 14083856ec4bSSteve Wise .doit = nldev_newlink, 14093856ec4bSSteve Wise .flags = RDMA_NL_ADMIN_PERM, 14103856ec4bSSteve Wise }, 14113856ec4bSSteve Wise [RDMA_NLDEV_CMD_DELLINK] = { 14123856ec4bSSteve Wise .doit = nldev_dellink, 14133856ec4bSSteve Wise .flags = RDMA_NL_ADMIN_PERM, 14143856ec4bSSteve Wise }, 14157d02f605SLeon Romanovsky [RDMA_NLDEV_CMD_PORT_GET] = { 1416c3f66f7bSLeon Romanovsky .doit = nldev_port_get_doit, 14177d02f605SLeon Romanovsky .dump = nldev_port_get_dumpit, 14187d02f605SLeon Romanovsky }, 1419bf3c5a93SLeon Romanovsky [RDMA_NLDEV_CMD_RES_GET] = { 1420bf3c5a93SLeon Romanovsky .doit = nldev_res_get_doit, 1421bf3c5a93SLeon Romanovsky .dump = nldev_res_get_dumpit, 1422bf3c5a93SLeon Romanovsky }, 1423b5fa635aSLeon Romanovsky [RDMA_NLDEV_CMD_RES_QP_GET] = { 1424c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_qp_doit, 1425b5fa635aSLeon Romanovsky .dump = nldev_res_get_qp_dumpit, 1426b5fa635aSLeon Romanovsky }, 142700313983SSteve Wise [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { 1428c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_cm_id_doit, 142900313983SSteve Wise .dump = nldev_res_get_cm_id_dumpit, 143000313983SSteve Wise }, 1431a34fc089SSteve Wise [RDMA_NLDEV_CMD_RES_CQ_GET] = { 1432c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_cq_doit, 1433a34fc089SSteve Wise .dump = nldev_res_get_cq_dumpit, 1434a34fc089SSteve Wise }, 1435fccec5b8SSteve Wise [RDMA_NLDEV_CMD_RES_MR_GET] = { 1436c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_mr_doit, 1437fccec5b8SSteve Wise .dump = nldev_res_get_mr_dumpit, 1438fccec5b8SSteve Wise }, 143929cf1351SSteve Wise [RDMA_NLDEV_CMD_RES_PD_GET] = { 1440c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_pd_doit, 144129cf1351SSteve Wise .dump = nldev_res_get_pd_dumpit, 144229cf1351SSteve Wise }, 1443cb7e0e13SParav Pandit [RDMA_NLDEV_CMD_SYS_GET] = { 1444cb7e0e13SParav Pandit .dump = nldev_get_sys_get_dumpit, 1445cb7e0e13SParav Pandit }, 14462b34c558SParav Pandit [RDMA_NLDEV_CMD_SYS_SET] = { 14472b34c558SParav Pandit .doit = nldev_set_sys_set_doit, 14482b34c558SParav Pandit .flags = RDMA_NL_ADMIN_PERM, 14492b34c558SParav Pandit }, 1450b4c598a6SLeon Romanovsky }; 1451b4c598a6SLeon Romanovsky 14526c80b41aSLeon Romanovsky void __init nldev_init(void) 14536c80b41aSLeon Romanovsky { 1454b4c598a6SLeon Romanovsky rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); 14556c80b41aSLeon Romanovsky } 14566c80b41aSLeon Romanovsky 14576c80b41aSLeon Romanovsky void __exit nldev_exit(void) 14586c80b41aSLeon Romanovsky { 14596c80b41aSLeon Romanovsky rdma_nl_unregister(RDMA_NL_NLDEV); 14606c80b41aSLeon Romanovsky } 1461e3bf14bdSJason Gunthorpe 1462e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5); 1463